r"""The torch package contains data structures for multi-dimensionaltensors and defines mathematical operations over these tensors.Additionally, it provides many utilities for efficient serialization ofTensors and arbitrary types, and other useful utilities.It has a CUDA counterpart, that enables you to run your tensor computationson an NVIDIA GPU with compute capability >= 3.0."""importmathimportosimportsysimportplatformimporttextwrapimportctypesimportinspectifsys.version_info<(3,):raiseException("Python 2 has reached end-of-life and is no longer supported by PyTorch.")from._utilsimport_import_dotted_name,classpropertyfrom._utils_internalimportget_file_path,prepare_multiprocessing_environment, \
USE_RTLD_GLOBAL_WITH_LIBTORCH,USE_GLOBAL_DEPS# TODO(torch_deploy) figure out how to freeze version.py in fbcode buildifsys.executable=='torch_deploy':__version__="torch-deploy-1.8"else:from.torch_versionimport__version__as__version__fromtypingimportAny,Callable,Dict,Optional,Set,Type,TYPE_CHECKING,Unionimportbuiltins__all__=['typename','is_tensor','is_storage','set_default_tensor_type','set_default_device','set_rng_state','get_rng_state','manual_seed','initial_seed','seed','save','load','set_printoptions','chunk','split','stack','matmul','no_grad','enable_grad','rand','randn','inference_mode','DoubleStorage','FloatStorage','LongStorage','IntStorage','ShortStorage','CharStorage','ByteStorage','BoolStorage','TypedStorage','UntypedStorage','DoubleTensor','FloatTensor','LongTensor','IntTensor','ShortTensor','CharTensor','ByteTensor','BoolTensor','Tensor','lobpcg','use_deterministic_algorithms','are_deterministic_algorithms_enabled','is_deterministic_algorithms_warn_only_enabled','set_deterministic_debug_mode','get_deterministic_debug_mode','set_float32_matmul_precision','get_float32_matmul_precision','set_warn_always','is_warn_always_enabled','SymInt','SymFloat','SymBool','sym_not','sym_int','sym_float','sym_max','sym_min','compile','vmap']################################################################################# Load the extension module################################################################################ifsys.platform=='win32':pfiles_path=os.getenv('ProgramFiles','C:\\Program Files')py_dll_path=os.path.join(sys.exec_prefix,'Library','bin')th_dll_path=os.path.join(os.path.dirname(__file__),'lib')# When users create a virtualenv that inherits the base environment,# we will need to add the corresponding library directory into# DLL search directories. Otherwise, it will rely on `PATH` which# is dependent on user settings.ifsys.exec_prefix!=sys.base_exec_prefix:base_py_dll_path=os.path.join(sys.base_exec_prefix,'Library','bin')else:base_py_dll_path=''dll_paths=list(filter(os.path.exists,[th_dll_path,py_dll_path,base_py_dll_path]))ifall([notos.path.exists(os.path.join(p,'nvToolsExt64_1.dll'))forpindll_paths]):nvtoolsext_dll_path=os.path.join(os.getenv('NVTOOLSEXT_PATH',os.path.join(pfiles_path,'NVIDIA Corporation','NvToolsExt')),'bin','x64')else:nvtoolsext_dll_path=''from.versionimportcudaascuda_versionimportglobifcuda_versionandall([notglob.glob(os.path.join(p,'cudart64*.dll'))forpindll_paths]):cuda_version_1=cuda_version.replace('.','_')cuda_path_var='CUDA_PATH_V'+cuda_version_1default_path=os.path.join(pfiles_path,'NVIDIA GPU Computing Toolkit','CUDA','v'+cuda_version)cuda_path=os.path.join(os.getenv(cuda_path_var,default_path),'bin')else:cuda_path=''dll_paths.extend(filter(os.path.exists,[nvtoolsext_dll_path,cuda_path]))kernel32=ctypes.WinDLL('kernel32.dll',use_last_error=True)with_load_library_flags=hasattr(kernel32,'AddDllDirectory')prev_error_mode=kernel32.SetErrorMode(0x0001)kernel32.LoadLibraryW.restype=ctypes.c_void_pifwith_load_library_flags:kernel32.LoadLibraryExW.restype=ctypes.c_void_pfordll_pathindll_paths:os.add_dll_directory(dll_path)try:ctypes.CDLL('vcruntime140.dll')ctypes.CDLL('msvcp140.dll')ctypes.CDLL('vcruntime140_1.dll')exceptOSError:print('''Microsoft Visual C++ Redistributable is not installed, this may lead to the DLL load failure. It can be downloaded at https://aka.ms/vs/16/release/vc_redist.x64.exe''')dlls=glob.glob(os.path.join(th_dll_path,'*.dll'))path_patched=Falsefordllindlls:is_loaded=Falseifwith_load_library_flags:res=kernel32.LoadLibraryExW(dll,None,0x00001100)last_error=ctypes.get_last_error()ifresisNoneandlast_error!=126:err=ctypes.WinError(last_error)err.strerror+=f' Error loading "{dll}" or one of its dependencies.'raiseerrelifresisnotNone:is_loaded=Trueifnotis_loaded:ifnotpath_patched:os.environ['PATH']=';'.join(dll_paths+[os.environ['PATH']])path_patched=Trueres=kernel32.LoadLibraryW(dll)ifresisNone:err=ctypes.WinError(ctypes.get_last_error())err.strerror+=f' Error loading "{dll}" or one of its dependencies.'raiseerrkernel32.SetErrorMode(prev_error_mode)def_preload_cuda_deps(lib_folder,lib_name):"""Preloads cuda deps if they could not be found otherwise."""# Should only be called on Linux if default path resolution have failedassertplatform.system()=='Linux','Should only be called on Linux'importgloblib_path=Noneforpathinsys.path:nvidia_path=os.path.join(path,'nvidia')ifnotos.path.exists(nvidia_path):continuecandidate_lib_paths=glob.glob(os.path.join(nvidia_path,lib_folder,'lib',lib_name))ifcandidate_lib_pathsandnotlib_path:lib_path=candidate_lib_paths[0]iflib_path:breakifnotlib_path:raiseValueError(f"{lib_name} not found in the system path {sys.path}")ctypes.CDLL(lib_path)# See Note [Global dependencies]def_load_global_deps():ifsys.executable=='torch_deploy'orplatform.system()=='Windows':returnlib_name='libtorch_global_deps'+('.dylib'ifplatform.system()=='Darwin'else'.so')here=os.path.abspath(__file__)lib_path=os.path.join(os.path.dirname(here),'lib',lib_name)try:ctypes.CDLL(lib_path,mode=ctypes.RTLD_GLOBAL)exceptOSErroraserr:# Can only happen for wheel with cuda libs as PYPI deps# As PyTorch is not purelib, but nvidia-*-cu11 iscuda_libs:Dict[str,str]={'cublas':'libcublas.so.*[0-9]','cudnn':'libcudnn.so.*[0-9]','cuda_nvrtc':'libnvrtc.so.*[0-9].*[0-9]','cuda_runtime':'libcudart.so.*[0-9].*[0-9]','cuda_cupti':'libcupti.so.*[0-9].*[0-9]','cufft':'libcufft.so.*[0-9]','curand':'libcurand.so.*[0-9]','cusolver':'libcusolver.so.*[0-9]','cusparse':'libcusparse.so.*[0-9]','nccl':'libnccl.so.*[0-9]','nvtx':'libnvToolsExt.so.*[0-9]',}is_cuda_lib_err=[libforlibincuda_libs.values()if(lib.split('.')[0]inerr.args[0])]ifnotis_cuda_lib_err:raiseerrforlib_folder,lib_nameincuda_libs.items():_preload_cuda_deps(lib_folder,lib_name)ctypes.CDLL(lib_path,mode=ctypes.RTLD_GLOBAL)if(USE_RTLD_GLOBAL_WITH_LIBTORCHoros.getenv('TORCH_USE_RTLD_GLOBAL'))and \
(sys.executable=="torch_deploy"orplatform.system()!='Windows'):# Do it the hard way. You might want to load libtorch with RTLD_GLOBAL in a# few circumstances:## 1. You're in a build environment (e.g., fbcode) where# libtorch_global_deps is not available, but you still need# to get mkl to link in with RTLD_GLOBAL or it will just# not work.## 2. You're trying to run PyTorch under UBSAN and you need# to ensure that only one copy of libtorch is loaded, so# vptr checks work properly## If you're using this setting, you must verify that all the libraries# you load consistently use the same libstdc++, or you may have# mysterious segfaults.#old_flags=sys.getdlopenflags()sys.setdlopenflags(os.RTLD_GLOBAL|os.RTLD_LAZY)fromtorch._Cimport*# noqa: F403sys.setdlopenflags(old_flags)delold_flagselse:# Easy way. You want this most of the time, because it will prevent# C++ symbols from libtorch clobbering C++ symbols from other# libraries, leading to mysterious segfaults.## If building in an environment where libtorch_global_deps isn't available# like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you will# want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False## See Note [Global dependencies]ifUSE_GLOBAL_DEPS:_load_global_deps()fromtorch._Cimport*# noqa: F403# Appease the type checker; ordinarily this binding is inserted by the# torch._C module initialization code in CifTYPE_CHECKING:importtorch._Cas_C
[docs]classSymInt:""" Like an int (including magic methods), but redirects all operations on the wrapped node. This is used in particular to symbolically record operations in the symbolic shape workflow. """def__init__(self,node):# This field MUST be named node; C++ binding code assumes that this# class has a field named node that stores SymNodeself.node=nodedef__bool__(self):returnself.node.bool_()def__int__(self):returnself.node.int_()# Magic methods installed by torch.fx.experimental.symbolic_shapesdef__eq__(self,other:object)->builtins.bool:raiseAssertionError("type stub not overridden")def__lt__(self,other)->builtins.bool:raiseAssertionError("type stub not overridden")def__gt__(self,other)->builtins.bool:raiseAssertionError("type stub not overridden")def__le__(self,other)->builtins.bool:raiseAssertionError("type stub not overridden")def__ge__(self,other)->builtins.bool:raiseAssertionError("type stub not overridden")def__sym_max__(self,other):raiseAssertionError("type stub not overridden")def__sym_min__(self,other):raiseAssertionError("type stub not overridden")def__sym_float__(self):raiseAssertionError("type stub not overridden")def__repr__(self):returnstr(self.node)
[docs]classSymFloat:""" Like an float (including magic methods), but redirects all operations on the wrapped node. This is used in particular to symbolically record operations in the symbolic shape workflow. """def__init__(self,node):fromtorch.fx.experimental.symbolic_shapesimportSymNodeassertisinstance(node,SymNode)# This field MUST be named node; C++ binding code assumes that this# class has a field named node that stores SymNodeself.node=nodedef__bool__(self):returnself.node.bool_()# Magic methods installed by torch.fx.experimental.symbolic_shapesdef__eq__(self,other:object)->builtins.bool:raiseAssertionError("type stub not overridden")def__lt__(self,other)->builtins.bool:raiseAssertionError("type stub not overridden")def__gt__(self,other)->builtins.bool:raiseAssertionError("type stub not overridden")def__le__(self,other)->builtins.bool:raiseAssertionError("type stub not overridden")def__ge__(self,other)->builtins.bool:raiseAssertionError("type stub not overridden")def__sym_max__(self,other):raiseAssertionError("type stub not overridden")def__sym_min__(self,other):raiseAssertionError("type stub not overridden")def__sym_int__(self):raiseAssertionError("type stub not overridden")def__repr__(self):returnself.node.str()
[docs]classSymBool:""" Like an bool (including magic methods), but redirects all operations on the wrapped node. This is used in particular to symbolically record operations in the symbolic shape workflow. Unlike regular bools, regular boolean operators will force extra guards instead of symbolically evaluate. Use the bitwise operators instead to handle this. """def__init__(self,node):fromtorch.fx.experimental.symbolic_shapesimportSymNodeassertisinstance(node,SymNode)# This field MUST be named node; C++ binding code assumes that this# class has a field named node that stores SymNodeself.node=nodedef__bool__(self):returnself.node.bool_()# Magic methods installed by torch.fx.experimental.symbolic_shapesdef__and__(self,other)->"SymBool":raiseAssertionError("type stub not overridden")def__or__(self,other)->"SymBool":raiseAssertionError("type stub not overridden")# We very carefully define __sym_not__, and not a number of other# plausible alternatives:## - We do not override __not__ because this is not a real magic# method; you cannot override the meaning of the not builtin in# Python. We use the name 'sym_not' to clarify that in user code you# cannot use the builtin not or operator.not_ or operator.__not__ and# hit this magic method; you must use our custom sym_not operator.## - We do not override the __invert__ method because SymBool is# meant to be usable in situations where bool is expected. However,# bitwise negation ~a does the wrong thing with booleans (because# bool is a subclass of int, so ~1 = -2 which is not falseish.)# This would be a giant footgun, so we get around it by defining# our own operator. Note that bitwise and/or do the right thing,# so we reuse the conventional operators there for readability.#def__sym_not__(self)->"SymBool":raiseAssertionError("type stub not overridden")def__repr__(self):returnself.node.str()
[docs]defsym_not(a):r""" SymInt-aware utility for logical negation. Args: a (SymBool or bool): Object to negate """ifhasattr(a,'__sym_not__'):returna.__sym_not__()returnnota
[docs]defsym_float(a):r""" SymInt-aware utility for float casting. Args: a (SymInt, SymFloat, or object): Object to cast """ifisinstance(a,SymFloat):returnaelifhasattr(a,'__sym_float__'):returna.__sym_float__()returnpy_float(a)# type: ignore[operator]
[docs]defsym_int(a):r""" SymInt-aware utility for int casting. Args: a (SymInt, SymFloat, or object): Object to cast """ifisinstance(a,SymInt):returnaelifisinstance(a,SymFloat):returnmath.floor(a)ifa>=0elsemath.ceil(a)# type: ignore[arg-type]returnpy_int(a)# type: ignore[operator]
[docs]defsym_max(a,b):""" SymInt-aware utility for max()."""ifisinstance(a,(SymInt,SymFloat)):returna.__sym_max__(b)elifisinstance(b,(SymInt,SymFloat)):# NB: If you actually care about preserving output type exactly# if you do something like max(0, 0.0), it is NOT sound to treat# min/max as commutativereturnb.__sym_max__(a)returnbuiltins.max(a,b)# type: ignore[operator]
[docs]defsym_min(a,b):""" SymInt-aware utility for max()."""ifisinstance(a,(SymInt,SymFloat)):returna.__sym_min__(b)elifisinstance(b,(SymInt,SymFloat)):returnb.__sym_min__(a)returnbuiltins.min(a,b)# type: ignore[operator]
# Check to see if we can load C extensions, and if not provide some guidance# on what the problem might be.try:# _initExtension is chosen (arbitrarily) as a sentinel.fromtorch._Cimport_initExtensionexceptImportError:importtorch._Cas_C_for_compiled_check# The __file__ check only works for Python 3.7 and above.if_C_for_compiled_check.__file__isNone:raiseImportError(textwrap.dedent(''' Failed to load PyTorch C extensions: It appears that PyTorch has loaded the `torch/_C` folder of the PyTorch repository rather than the C extensions which are expected in the `torch._C` namespace. This can occur when using the `install` workflow. e.g. $ python setup.py install && python -c "import torch" This error can generally be solved using the `develop` workflow $ python setup.py develop && python -c "import torch" # This should succeed or by running Python from a different directory. ''').strip())fromNoneraise# If __file__ is not None the cause is unknown, so just re-raise.fornameindir(_C):ifname[0]!='_'andnotname.endswith('Base'):__all__.append(name)obj=getattr(_C,name)if(isinstance(obj,Callable)orinspect.isclass(obj)):# type: ignore[arg-type]if(obj.__module__!='torch'):# TODO: fix their module from C++ sideifnamenotin['DisableTorchFunctionSubclass','DisableTorchFunction','Generator']:obj.__module__='torch'ifnotTYPE_CHECKING:# issue 38137 and python issue 43367. Submodules of a C extension are# non-standard, and attributes of those submodules cannot be pickled since# pickle expect to be able to import them as "from _C.sub import attr"# which fails with "_C is not a packageforattrindir(_C):candidate=getattr(_C,attr)iftype(candidate)istype(_C):# submoduleiff'torch._C.{attr}'notinsys.modules:sys.modules[f'torch._C.{attr}']=candidate################################################################################# Define basic utilities################################################################################deftypename(o):ifisinstance(o,torch.Tensor):returno.type()module=''class_name=''ifhasattr(o,'__module__')ando.__module__!='builtins' \
ando.__module__!='__builtin__'ando.__module__isnotNone:module=o.__module__+'.'ifhasattr(o,'__qualname__'):class_name=o.__qualname__elifhasattr(o,'__name__'):class_name=o.__name__else:class_name=o.__class__.__name__returnmodule+class_name
[docs]defis_tensor(obj):r"""Returns True if `obj` is a PyTorch tensor. Note that this function is simply doing ``isinstance(obj, Tensor)``. Using that ``isinstance`` check is better for typechecking with mypy, and more explicit - so it's recommended to use that instead of ``is_tensor``. Args: obj (Object): Object to test Example:: >>> x = torch.tensor([1, 2, 3]) >>> torch.is_tensor(x) True """returnisinstance(obj,torch.Tensor)
[docs]defis_storage(obj):r"""Returns True if `obj` is a PyTorch storage object. Args: obj (Object): Object to test """returntype(obj)in_storage_classes
_GLOBAL_DEVICE_CONTEXT=None
[docs]defset_default_device(device):"""Sets the default ``torch.Tensor`` to be allocated on ``device``. This does not affect factory function calls which are called with an explicit ``device`` argument. Factory calls will be performed as if they were passed ``device`` as an argument. To only temporarily change the default device instead of setting it globally, use ``with torch.device(device):`` instead. The default device is initially ``cpu``. If you set the default tensor device to another device (e.g., ``cuda``) without a device index, tensors will be allocated on whatever the current device for the device type, even after :func:`torch.cuda.set_device` is called. .. warning:: This function imposes a slight performance cost on every Python call to the torch API (not just factory functions). If this is causing problems for you, please comment on https://github.com/pytorch/pytorch/issues/92701 Args: device (device or string): the device to set as default Example:: >>> # xdoctest: +SKIP("requires cuda, changes global state") >>> torch.tensor([1.2, 3]).device device(type='cpu') >>> torch.set_default_device('cuda') # current device is 0 >>> torch.tensor([1.2, 3]).device device(type='cuda', index=0) >>> torch.set_default_device('cuda:1') >>> torch.tensor([1.2, 3]).device device(type='cuda', index=1) """global_GLOBAL_DEVICE_CONTEXTif_GLOBAL_DEVICE_CONTEXTisnotNone:_GLOBAL_DEVICE_CONTEXT.__exit__(None,None,None)ifdeviceisNone:_GLOBAL_DEVICE_CONTEXT=Nonereturnfromtorch.utils._deviceimportDeviceContext_GLOBAL_DEVICE_CONTEXT=DeviceContext(device)_GLOBAL_DEVICE_CONTEXT.__enter__()
[docs]defset_default_tensor_type(t):r"""Sets the default ``torch.Tensor`` type to floating point tensor type ``t``. This type will also be used as default floating point type for type inference in :func:`torch.tensor`. The default floating point tensor type is initially ``torch.FloatTensor``. Args: t (type or string): the floating point tensor type or its name Example:: >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?") >>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32 torch.float32 >>> torch.set_default_tensor_type(torch.DoubleTensor) >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor torch.float64 """ifisinstance(t,str):t=_import_dotted_name(t)_C._set_default_tensor_type(t)
[docs]defset_default_dtype(d):r""" Sets the default floating point dtype to :attr:`d`. Supports torch.float32 and torch.float64 as inputs. Other dtypes may be accepted without complaint but are not supported and are unlikely to work as expected. When PyTorch is initialized its default floating point dtype is torch.float32, and the intent of set_default_dtype(torch.float64) is to facilitate NumPy-like type inference. The default floating point dtype is used to: 1. Implicitly determine the default complex dtype. When the default floating point type is float32 the default complex dtype is complex64, and when the default floating point type is float64 the default complex type is complex128. 2. Infer the dtype for tensors constructed using Python floats or complex Python numbers. See examples below. 3. Determine the result of type promotion between bool and integer tensors and Python floats and complex Python numbers. Args: d (:class:`torch.dtype`): the floating point dtype to make the default. Either torch.float32 or torch.float64. Example: >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?") >>> # initial default for floating point is torch.float32 >>> # Python floats are interpreted as float32 >>> torch.tensor([1.2, 3]).dtype torch.float32 >>> # initial default for floating point is torch.complex64 >>> # Complex Python numbers are interpreted as complex64 >>> torch.tensor([1.2, 3j]).dtype torch.complex64 >>> torch.set_default_dtype(torch.float64) >>> # Python floats are now interpreted as float64 >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor torch.float64 >>> # Complex Python numbers are now interpreted as complex128 >>> torch.tensor([1.2, 3j]).dtype # a new complex tensor torch.complex128 """_C._set_default_dtype(d)
[docs]defuse_deterministic_algorithms(mode,*,warn_only=False):r""" Sets whether PyTorch operations must use "deterministic" algorithms. That is, algorithms which, given the same input, and when run on the same software and hardware, always produce the same output. When enabled, operations will use deterministic algorithms when available, and if only nondeterministic algorithms are available they will throw a :class:`RuntimeError` when called. .. note:: This setting alone is not always enough to make an application reproducible. Refer to :ref:`reproducibility` for more information. .. note:: :func:`torch.set_deterministic_debug_mode` offers an alternative interface for this feature. The following normally-nondeterministic operations will act deterministically when ``mode=True``: * :class:`torch.nn.Conv1d` when called on CUDA tensor * :class:`torch.nn.Conv2d` when called on CUDA tensor * :class:`torch.nn.Conv3d` when called on CUDA tensor * :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor * :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor * :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor * :func:`torch.bmm` when called on sparse-dense CUDA tensors * :func:`torch.Tensor.__getitem__` when attempting to differentiate a CPU tensor and the index is a list of tensors * :func:`torch.Tensor.index_put` with ``accumulate=False`` * :func:`torch.Tensor.index_put` with ``accumulate=True`` when called on a CPU tensor * :func:`torch.Tensor.put_` with ``accumulate=True`` when called on a CPU tensor * :func:`torch.Tensor.scatter_add_` when called on a CUDA tensor * :func:`torch.gather` when called on a CUDA tensor that requires grad * :func:`torch.index_add` when called on CUDA tensor * :func:`torch.index_select` when attempting to differentiate a CUDA tensor * :func:`torch.repeat_interleave` when attempting to differentiate a CUDA tensor * :func:`torch.Tensor.index_copy` when called on a CPU or CUDA tensor The following normally-nondeterministic operations will throw a :class:`RuntimeError` when ``mode=True``: * :class:`torch.nn.AvgPool3d` when attempting to differentiate a CUDA tensor * :class:`torch.nn.AdaptiveAvgPool2d` when attempting to differentiate a CUDA tensor * :class:`torch.nn.AdaptiveAvgPool3d` when attempting to differentiate a CUDA tensor * :class:`torch.nn.MaxPool3d` when attempting to differentiate a CUDA tensor * :class:`torch.nn.AdaptiveMaxPool2d` when attempting to differentiate a CUDA tensor * :class:`torch.nn.FractionalMaxPool2d` when attempting to differentiate a CUDA tensor * :class:`torch.nn.FractionalMaxPool3d` when attempting to differentiate a CUDA tensor * :class:`torch.nn.MaxUnpool1d` * :class:`torch.nn.MaxUnpool2d` * :class:`torch.nn.MaxUnpool3d` * :func:`torch.nn.functional.interpolate` when attempting to differentiate a CUDA tensor and one of the following modes is used: - ``linear`` - ``bilinear`` - ``bicubic`` - ``trilinear`` * :class:`torch.nn.ReflectionPad1d` when attempting to differentiate a CUDA tensor * :class:`torch.nn.ReflectionPad2d` when attempting to differentiate a CUDA tensor * :class:`torch.nn.ReflectionPad3d` when attempting to differentiate a CUDA tensor * :class:`torch.nn.ReplicationPad1d` when attempting to differentiate a CUDA tensor * :class:`torch.nn.ReplicationPad2d` when attempting to differentiate a CUDA tensor * :class:`torch.nn.ReplicationPad3d` when attempting to differentiate a CUDA tensor * :class:`torch.nn.NLLLoss` when called on a CUDA tensor * :class:`torch.nn.CTCLoss` when attempting to differentiate a CUDA tensor * :class:`torch.nn.EmbeddingBag` when attempting to differentiate a CUDA tensor when ``mode='max'`` * :func:`torch.Tensor.put_` when ``accumulate=False`` * :func:`torch.Tensor.put_` when ``accumulate=True`` and called on a CUDA tensor * :func:`torch.histc` when called on a CUDA tensor * :func:`torch.bincount` when called on a CUDA tensor * :func:`torch.kthvalue` with called on a CUDA tensor * :func:`torch.median` with indices output when called on a CUDA tensor * :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor * :func:`torch.cumsum` when called on a CUDA tensor when dtype is floating point or complex A handful of CUDA operations are nondeterministic if the CUDA version is 10.2 or greater, unless the environment variable ``CUBLAS_WORKSPACE_CONFIG=:4096:8`` or ``CUBLAS_WORKSPACE_CONFIG=:16:8`` is set. See the CUDA documentation for more details: `<https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility>`_ If one of these environment variable configurations is not set, a :class:`RuntimeError` will be raised from these operations when called with CUDA tensors: * :func:`torch.mm` * :func:`torch.mv` * :func:`torch.bmm` Note that deterministic operations tend to have worse performance than nondeterministic operations. .. note:: This flag does not detect or prevent nondeterministic behavior caused by calling an inplace operation on a tensor with an internal memory overlap or by giving such a tensor as the :attr:`out` argument for an operation. In these cases, multiple writes of different data may target a single memory location, and the order of writes is not guaranteed. Args: mode (:class:`bool`): If True, makes potentially nondeterministic operations switch to a deterministic algorithm or throw a runtime error. If False, allows nondeterministic operations. Keyword args: warn_only (:class:`bool`, optional): If True, operations that do not have a deterministic implementation will throw a warning instead of an error. Default: ``False`` Example:: >>> # xdoctest: +SKIP >>> torch.use_deterministic_algorithms(True) # Forward mode nondeterministic error >>> torch.randn(10, device='cuda').kthvalue(0) ... RuntimeError: kthvalue CUDA does not have a deterministic implementation... # Backward mode nondeterministic error >>> torch.nn.AvgPool3d(1)(torch.randn(3, 4, 5, 6, requires_grad=True).cuda()).sum().backward() ... RuntimeError: avg_pool3d_backward_cuda does not have a deterministic implementation... """_C._set_deterministic_algorithms(mode,warn_only=warn_only)
[docs]defare_deterministic_algorithms_enabled():r"""Returns True if the global deterministic flag is turned on. Refer to :func:`torch.use_deterministic_algorithms` documentation for more details. """return_C._get_deterministic_algorithms()
[docs]defis_deterministic_algorithms_warn_only_enabled():r"""Returns True if the global deterministic flag is set to warn only. Refer to :func:`torch.use_deterministic_algorithms` documentation for more details. """return_C._get_deterministic_algorithms_warn_only()
[docs]defset_deterministic_debug_mode(debug_mode:Union[builtins.int,str])->None:r"""Sets the debug mode for deterministic operations. .. note:: This is an alternative interface for :func:`torch.use_deterministic_algorithms`. Refer to that function's documentation for details about affected operations. Args: debug_mode(str or int): If "default" or 0, don't error or warn on nondeterministic operations. If "warn" or 1, warn on nondeterministic operations. If "error" or 2, error on nondeterministic operations. """# NOTE: builtins.int is used here because int in this scope resolves# to torch.intifnotisinstance(debug_mode,(builtins.int,str)):raiseTypeError(f'debug_mode must be str or int, but got {type(debug_mode)}')ifisinstance(debug_mode,str):ifdebug_mode=='default':debug_mode=0elifdebug_mode=='warn':debug_mode=1elifdebug_mode=='error':debug_mode=2else:raiseRuntimeError('invalid value of debug_mode, expected one of `default`, 'f'`warn`, `error`, but got {debug_mode}')ifdebug_mode==0:_C._set_deterministic_algorithms(False)elifdebug_mode==1:_C._set_deterministic_algorithms(True,warn_only=True)elifdebug_mode==2:_C._set_deterministic_algorithms(True)else:raiseRuntimeError('invalid value of debug_mode, expected 0, 1, or 2, 'f'but got {debug_mode}')
[docs]defget_deterministic_debug_mode()->builtins.int:r"""Returns the current value of the debug mode for deterministic operations. Refer to :func:`torch.set_deterministic_debug_mode` documentation for more details. """if_C._get_deterministic_algorithms():if_C._get_deterministic_algorithms_warn_only():return1else:return2else:return0
[docs]defget_float32_matmul_precision()->builtins.str:r"""Returns the current value of float32 matrix multiplication precision. Refer to :func:`torch.set_float32_matmul_precision` documentation for more details. """return_C._get_float32_matmul_precision()
[docs]defset_float32_matmul_precision(precision):r"""Sets the internal precision of float32 matrix multiplications. Running float32 matrix multiplications in lower precision may significantly increase performance, and in some programs the loss of precision has a negligible impact. Supports three settings: * "highest", float32 matrix multiplications use the float32 datatype for internal computations. * "high", float32 matrix multiplications use the TensorFloat32 or bfloat16_3x datatypes for internal computations, if fast matrix multiplication algorithms using those datatypes internally are available. Otherwise float32 matrix multiplications are computed as if the precision is "highest". * "medium", float32 matrix multiplications use the bfloat16 datatype for internal computations, if a fast matrix multiplication algorithm using that datatype internally is available. Otherwise float32 matrix multiplications are computed as if the precision is "high". .. note:: This does not change the output dtype of float32 matrix multiplications, it controls how the internal computation of the matrix multiplication is performed. .. note:: This does not change the precision of convolution operations. Other flags, like `torch.backends.cudnn.allow_tf32`, may control the precision of convolution operations. .. note:: This flag currently only affects one native device type: CUDA. If "high" or "medium" are set then the TensorFloat32 datatype will be used when computing float32 matrix multiplications, equivalent to setting `torch.backends.cuda.matmul.allow_tf32 = True`. When "highest" (the default) is set then the float32 datatype is used for internal computations, equivalent to setting `torch.backends.cuda.matmul.allow_tf32 = False`. Args: precision(str): can be set to "highest" (default), "high", or "medium" (see above). """_C._set_float32_matmul_precision(precision)
[docs]defset_warn_always(b):r"""When this flag is False (default) then some PyTorch warnings may only appear once per process. This helps avoid excessive warning information. Setting it to True causes these warnings to always appear, which may be helpful when debugging. Args: b (:class:`bool`): If True, force warnings to always be emitted If False, set to the default behaviour """_C._set_warnAlways(b)
[docs]defis_warn_always_enabled():r"""Returns True if the global warn_always flag is turned on. Refer to :func:`torch.set_warn_always` documentation for more details. """return_C._get_warnAlways()
################################################################################# Define numeric constants################################################################################# For Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) and# NumPy consistency (https://numpy.org/devdocs/reference/constants.html)frommathimporte,nan,inf,pi__all__.extend(['e','pi','nan','inf'])################################################################################# Define Storage and Tensor classes################################################################################from._tensorimportTensorfrom.storageimport_StorageBase,TypedStorage,_LegacyStorage,UntypedStorage,_warn_typed_storage_removal# NOTE: New <type>Storage classes should never be added. When adding a new# dtype, use torch.storage.TypedStorage directly.
_storage_classes={UntypedStorage,DoubleStorage,FloatStorage,LongStorage,IntStorage,ShortStorage,CharStorage,ByteStorage,HalfStorage,BoolStorage,QUInt8Storage,QInt8Storage,QInt32Storage,BFloat16Storage,ComplexFloatStorage,ComplexDoubleStorage,QUInt4x2Storage,QUInt2x4Storage,TypedStorage}# The _tensor_classes set is initialized by the call to _C._initialize_tensor_type_bindings()_tensor_classes:Set[Type]=set()# If you edit these imports, please update torch/__init__.py.in as wellfrom.randomimportset_rng_state,get_rng_state,manual_seed,initial_seed,seedfrom.serializationimportsave,loadfrom._tensor_strimportset_printoptions################################################################################# Initialize extension################################################################################defmanager_path():ifsys.executable=='torch_deploy'orplatform.system()=='Windows':returnb""path=get_file_path('torch','bin','torch_shm_manager')prepare_multiprocessing_environment(get_file_path('torch'))ifnotos.path.exists(path):raiseRuntimeError("Unable to find torch_shm_manager at "+path)returnpath.encode('utf-8')fromtorch.ampimportautocast# Initializing the extension shadows the built-in python float / int classes;# store them for later use by SymInt / SymFloat.py_float=floatpy_int=int# Shared memory manager needs to know the exact location of manager executable_C._initExtension(manager_path())delmanager_path# Appease the type checker: it can't deal with direct setting of globals().# Note that we will see "too many" functions when reexporting this way; there# is not a good way to fix this problem. Perhaps, try to redesign VariableFunctions# so that this import is good enoughifTYPE_CHECKING:# Some type signatures pulled in from _VariableFunctions here clash with# signatures already imported. For now these clashes are ignored; see# PR #43339 for details.fromtorch._C._VariableFunctionsimport*# type: ignore[misc] # noqa: F403# Fixup segment_reduce visibility_segment_reduce=segment_reducedelsegment_reduce# Ops not to be exposed in `torch` namespace,# mostly helper ops.PRIVATE_OPS=('unique_dim',)fornameindir(_C._VariableFunctions):ifname.startswith('__')ornameinPRIVATE_OPS:continueobj=getattr(_C._VariableFunctions,name)obj.__module__='torch'# Hide some APIs that should not be publicifname=="segment_reduce":# TODO: Once the undocumented FC window is passed, remove the line bellowglobals()[name]=objname="_"+nameglobals()[name]=objifnotname.startswith("_"):__all__.append(name)################################################################################# Import interface functions defined in Python################################################################################# needs to be after the above ATen bindings so we can overwrite from Python sidefrom.functionalimport*# noqa: F403################################################################################# Remove unnecessary members################################################################################del_StorageBasedel_LegacyStorage################################################################################# Define _assert################################################################################# needs to be before the submodule imports to avoid circular dependencies
[docs]def_assert(condition,message):r"""A wrapper around Python's assert which is symbolically traceable. """from.overridesimporthas_torch_function,handle_torch_functioniftype(condition)isnottorch.Tensorandhas_torch_function((condition,)):returnhandle_torch_function(_assert,(condition,),condition,message)assertcondition,message
################################################################################# Import most common subpackages################################################################################# Use the redundant form so that type checkers know that these are a part of# the public API. The "regular" import lines are there solely for the runtime# side effect of adding to the imported module's members for other users.fromtorchimportcudaascudafromtorchimportcpuascpufromtorchimportautogradasautogradfromtorch.autogradimport(no_gradasno_grad,enable_gradasenable_grad,set_grad_enabledasset_grad_enabled,inference_modeasinference_mode,)fromtorchimportfftasfftfromtorchimportfuturesasfuturesfromtorchimport_awaitsas_awaitsfromtorchimportnestedasnestedfromtorchimportnnasnnfromtorch.signalimportwindowsaswindowsfromtorchimportoptimasoptimimporttorch.optim._multi_tensorfromtorchimportmultiprocessingasmultiprocessingfromtorchimportsparseassparsefromtorchimportspecialasspecialimporttorch.utils.backcompatfromtorchimportonnxasonnxfromtorchimportjitasjitfromtorchimportlinalgaslinalgfromtorchimporthubashubfromtorchimportrandomasrandomfromtorchimportdistributionsasdistributionsfromtorchimporttestingastestingimporttorch.backends.cudaimporttorch.backends.mpsimporttorch.backends.cudnnimporttorch.backends.mklimporttorch.backends.mkldnnimporttorch.backends.openmpimporttorch.backends.quantizedimporttorch.utils.datafromtorchimport__config__as__config__fromtorchimport__future__as__future__fromtorchimportprofilerasprofiler# Quantized, sparse, AO, etc. should be last to get imported, as nothing# is expected to depend on them.fromtorchimportaoasao# nn.quant* depends on ao -- so should be after those.importtorch.nn.quantizableimporttorch.nn.quantizedimporttorch.nn.qatimporttorch.nn.intrinsic_C._init_names(list(torch._storage_classes))# attach docstrings to torch and tensor functionsfrom.import_torch_docs,_tensor_docs,_storage_docsdel_torch_docs,_tensor_docs,_storage_docs
[docs]defcompiled_with_cxx11_abi():r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1"""return_C._GLIBCXX_USE_CXX11_ABI
# Import the ops "namespace"fromtorch._opsimportopsfromtorch._classesimportclasses# quantization depends on torch.fx# Import quantizationfromtorchimportquantizationasquantization# Import the quasi random samplerfromtorchimportquasirandomasquasirandom# If you are seeing this, it means that this call site was not checked if# the memory format could be preserved, and it was switched to old default# behaviour of contiguouslegacy_contiguous_format=contiguous_format# Register fork handler to initialize OpenMP in child processes (see gh-28389)fromtorch.multiprocessing._atforkimportregister_after_forkregister_after_fork(torch.get_num_threads)delregister_after_fork# Import tools that require fully imported torch (for applying# torch.jit.script as a decorator, for instance):from._lobpcgimportlobpcgaslobpcg# These were previously defined in native_functions.yaml and appeared on the# `torch` namespace, but we moved them to c10 dispatch to facilitate custom# class usage. We add these lines here to preserve backward compatibility.quantized_lstm=torch.ops.aten.quantized_lstmquantized_gru=torch.ops.aten.quantized_grufromtorch.utils.dlpackimportfrom_dlpack,to_dlpack# Import experimental masked operations support. See# [RFC-0016](https://github.com/pytorch/rfcs/pull/27) for more# information.from.importmasked# Import removed ops with error message about removalfrom._linalg_utilsimport(# type: ignore[misc]matrix_rank,eig,solve,lstsq,)from._linalg_utilsimport_symeigassymeig# type: ignore[misc]class_TorchCompileInductorWrapper:compiler_name="inductor"def__init__(self,mode,options,dynamic):self.config=dict()self.dynamic=dynamicself.apply_mode(mode)self.apply_options(options)ifdynamic:# cudagraphs conflicts with dynamic shapesself.config["triton.cudagraphs"]=Falseassert"triton.cudagraphs"notin(optionsor()),"triton.cudagraphs does not support dynamic shapes"def__eq__(self,other):return(isinstance(other,_TorchCompileInductorWrapper)andself.config==other.configandself.dynamic==other.dynamic)defapply_mode(self,mode:Optional[str]):ifmodeisNoneormode=="default":passelifmode=="reduce-overhead":self.apply_options({"triton.cudagraphs":True,"size_asserts":False,})elifmode=="max-autotune":self.apply_options({"epilogue_fusion":True,"max_autotune":True,"triton.cudagraphs":True,})else:raiseRuntimeError(f"Unrecognized mode={mode}, should be one of: default, reduce-overhead, max-autotune")defapply_options(self,options:Optional[Dict[str,Any]]):ifnotoptions:returnfromtorch._inductorimportconfigcurrent_config:Dict[str,Any]=config.to_dict()# type: ignore[attr-defined]forkey,valinoptions.items():attr_name=key.replace("-","_")ifattr_namenotincurrent_config:raiseRuntimeError(f"Unexpected optimization option {key}, known options are {list(current_config.keys())}")iftype(val)isnottype(current_config[attr_name]):val_type_str=type(val).__name__expected_type_str=type(current_config[attr_name]).__name__raiseRuntimeError(f"Unexpected type of attr {key}, got {val_type_str} should be {expected_type_str}")self.config[attr_name]=valdef__call__(self,model_,inputs_):fromtorch._inductor.compile_fximportcompile_fxreturncompile_fx(model_,inputs_,config_patches=self.config)
[docs]defcompile(model:Optional[Callable]=None,*,fullgraph:builtins.bool=False,dynamic:builtins.bool=False,backend:Union[str,Callable]="inductor",mode:Union[str,None]=None,options:Optional[Dict[str,Union[str,builtins.int,builtins.bool]]]=None,disable:builtins.bool=False)->Callable:""" Optimizes given model/function using TorchDynamo and specified backend. Args: model (Callable): Module/function to optimize fullgraph (bool): Whether it is ok to break model into several subgraphs dynamic (bool): Use dynamic shape tracing backend (str or Callable): backend to be used mode (str): Can be either "default", "reduce-overhead" or "max-autotune" options (dict): A dictionary of options to pass to the backend. disable (bool): Turn torch.compile() into a no-op for testing Example:: @torch.compile(options={"matmul-padding": True}, fullgraph=True) def foo(x): return torch.sin(x) + torch.cos(x) """_C._log_api_usage_once("torch.compile")# Decorator modeifmodelisNone:deffn(model:Callable):ifmodelisNone:raiseRuntimeError("Model can't be None")returncompile(model,fullgraph=fullgraph,dynamic=dynamic,backend=backend,mode=mode,options=options,disable=disable)returnfnimporttorch._dynamoifmodeisnotNoneandoptionsisnotNone:raiseRuntimeError("Either mode or options can be specified, but both can't be specified at the same time.")ifmodeisNoneandoptionsisNone:mode="default"ifbackend=="inductor":backend=_TorchCompileInductorWrapper(mode,options,dynamic)returntorch._dynamo.optimize(backend=backend,nopython=fullgraph,dynamic=dynamic,disable=disable)(model)
def_register_device_module(device_type,module):r"""Register an external runtime module of the specific :attr:`device_type` supported by torch. After the :attr:`module` is registered correctly, the user can refer the external runtime module as part of torch with attribute torch.xxx. """# Make sure the device_type represent a supported device type for torch.device_type=torch.device(device_type).typem=sys.modules[__name__]ifhasattr(m,device_type):raiseRuntimeError("The runtime module of '{}' has already ""been registered with '{}'".format(device_type,getattr(m,device_type)))setattr(m,device_type,module)torch_module_name='.'.join([__name__,device_type])sys.modules[torch_module_name]=module# expose return_typesfrom.importreturn_typesfrom.importlibraryifnotTYPE_CHECKING:from.import_meta_registrations# Enable CUDA Sanitizerif'TORCH_CUDA_SANITIZER'inos.environ:importtorch.cuda._sanitizerascsancsan.enable_cuda_sanitizer()# Populate magic methods on SymInt and SymFloatimporttorch.fx.experimental.symbolic_shapesfromtorchimportfuncasfuncfromtorch.funcimportvmap# The function _sparse_coo_tensor_unsafe is removed from PyTorch# Python API (v. 1.13), here we temporarily provide its replacement# with a deprecation warning.# TODO: remove the function for PyTorch v 1.15.def_sparse_coo_tensor_unsafe(*args,**kwargs):importwarningswarnings.warn('torch._sparse_coo_tensor_unsafe is deprecated, ''use torch.sparse_coo_tensor(..., check_invariants=False) instead.')kwargs['check_invariants']=Falsereturntorch.sparse_coo_tensor(*args,**kwargs)
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.