You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

25649 lines
1.1 MiB

# @generated from torch/_C/_VariableFunctions.pyi.in
# mypy: disable-error-code="type-arg"
import builtins
from typing import (
Any,
Callable,
ContextManager,
Iterator,
List,
Literal,
NamedTuple,
Optional,
overload,
Sequence,
Tuple,
TypeVar,
Union,
)
import torch
from torch import contiguous_format, Generator, inf, memory_format, strided, SymInt, Tensor
from torch.types import (
_bool,
_complex,
_device,
_dtype,
_float,
_int,
_layout,
_qscheme,
_size,
Device,
Number,
)
from torch._prims_common import DeviceLikeType
@overload
def __and__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __and__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
@overload
def __lshift__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __lshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
@overload
def __or__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __or__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
@overload
def __rshift__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __rshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
@overload
def __xor__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __xor__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
def _adaptive_avg_pool2d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ...
def _adaptive_avg_pool3d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ...
def _add_batch_dim(input: Tensor, batch_dim: _int, level: _int) -> Tensor: ...
@overload
def _add_relu(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
@overload
def _add_relu(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ...
@overload
def _add_relu_(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ...
@overload
def _add_relu_(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ...
def _addmm_activation(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, use_gelu: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
@overload
def _aminmax(input: Tensor) -> Tuple[Tensor, Tensor]: ...
@overload
def _aminmax(input: Tensor, dim: _int, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ...
def _amp_foreach_non_finite_check_and_unscale_(self: Union[Tuple[Tensor, ...], List[Tensor]], found_inf: Tensor, inv_scale: Tensor) -> None: ...
def _amp_update_scale_(input: Tensor, growth_tracker: Tensor, found_inf: Tensor, scale_growth_factor: _float, scale_backoff_factor: _float, growth_interval: _int) -> Tensor: ...
@overload
def _assert_async(input: Tensor) -> None:
r"""
_assert_async(tensor) -> void
Asynchronously assert that the contents of tensor are nonzero. For CPU tensors,
this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for
CUDA tensors, we DO NOT synchronize and you may only find out the assertion
failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for
testing invariants in CUDA tensors without giving up performance. This function
is NOT intended to be used for regular error checking, as it will trash your CUDA
context if the assert fails (forcing you to restart your PyTorch process.)
Args:
tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero
elements (including False for boolean tensors) cause an assertion failure
to be raised.
"""
...
@overload
def _assert_async(input: Tensor, assert_msg: str) -> None:
r"""
_assert_async(tensor) -> void
Asynchronously assert that the contents of tensor are nonzero. For CPU tensors,
this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for
CUDA tensors, we DO NOT synchronize and you may only find out the assertion
failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for
testing invariants in CUDA tensors without giving up performance. This function
is NOT intended to be used for regular error checking, as it will trash your CUDA
context if the assert fails (forcing you to restart your PyTorch process.)
Args:
tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero
elements (including False for boolean tensors) cause an assertion failure
to be raised.
"""
...
def _assert_scalar(self: Union[Number, _complex], assert_msg: str) -> None: ...
def _assert_tensor_metadata(a: Tensor, size: Optional[Sequence[Union[_int, SymInt]]] = None, stride: Optional[Sequence[Union[_int, SymInt]]] = None, dtype: Optional[_dtype] = None) -> None: ...
def _batch_norm_impl_index(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, _int]: ...
def _cast_Byte(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
def _cast_Char(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
def _cast_Double(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
def _cast_Float(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
def _cast_Half(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
def _cast_Int(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
def _cast_Long(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
def _cast_Short(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
def _choose_qparams_per_tensor(input: Tensor, reduce_range: _bool = False) -> Tuple[_float, _int]: ...
def _chunk_cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int, num_chunks: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
def _coalesce(input: Tensor) -> Tensor: ...
def _compute_linear_combination(input: Tensor, coefficients: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
def _conj(input: Tensor) -> Tensor: ...
def _conj_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
def _conj_physical(input: Tensor) -> Tensor: ...
def _convert_indices_from_coo_to_csr(input: Tensor, size: _int, *, out_int32: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
def _convert_indices_from_csr_to_coo(crow_indices: Tensor, col_indices: Tensor, *, out_int32: _bool = False, transpose: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
def _convert_weight_to_int4pack(input: Tensor, innerKTiles: _int) -> Tensor: ...
@overload
def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: _size, groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool) -> Tensor: ...
@overload
def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool, allow_tf32: _bool) -> Tensor: ...
def _convolution_mode(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: str, dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
def _copy_from(input: Tensor, dst: Tensor, non_blocking: _bool = False) -> Tensor: ...
def _copy_from_and_resize(input: Tensor, dst: Tensor) -> Tensor: ...
def _cslt_compress(input: Tensor) -> Tensor: ...
def _cslt_sparse_mm(compressed_A: Tensor, dense_B: Tensor, bias: Optional[Tensor] = None, alpha: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, transpose_result: _bool = False, alg_id: _int = 0) -> Tensor: ...
def _cslt_sparse_mm_search(compressed_A: Tensor, dense_B: Tensor, bias: Optional[Tensor] = None, alpha: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, transpose_result: _bool = False) -> _int: ...
@overload
def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ...
@overload
def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ...
@overload
def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
@overload
def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
def _cudnn_init_dropout_state(dropout: _float, train: _bool, dropout_seed: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
def _cudnn_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, weight_buf: Optional[Tensor], hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: Sequence[Union[_int, SymInt]], dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
def _cudnn_rnn_flatten_weight(weight_arr: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, input_size: Union[_int, SymInt], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, bidirectional: _bool) -> Tensor: ...
def _cufft_clear_plan_cache(device_index: _int) -> None: ...
def _cufft_get_plan_cache_max_size(device_index: _int) -> _int: ...
def _cufft_get_plan_cache_size(device_index: _int) -> _int: ...
def _cufft_set_plan_cache_max_size(device_index: _int, max_size: _int) -> None: ...
def _cummax_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
def _cummin_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
def _debug_has_internal_overlap(input: Tensor) -> _int: ...
def _dim_arange(like: Tensor, dim: _int) -> Tensor: ...
def _dirichlet_grad(x: Tensor, alpha: Tensor, total: Tensor) -> Tensor: ...
def _disable_functionalization(): ...
@overload
def _efficientzerotensor(size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
@overload
def _efficientzerotensor(*size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
def _embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
def _embedding_bag_forward_only(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
@overload
def _empty_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
@overload
def _empty_affine_quantized(*size: _int, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
@overload
def _empty_per_channel_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
@overload
def _empty_per_channel_affine_quantized(*size: _int, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
def _enable_functionalization(*, reapply_views: _bool = False): ...
def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: ...
def _fake_quantize_learnable_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ...
def _fake_quantize_learnable_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ...
def _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(input: Tensor, scale: Tensor, zero_point: Tensor, fake_quant_enabled: Tensor, quant_min: _int, quant_max: _int) -> torch.return_types._fake_quantize_per_tensor_affine_cachemask_tensor_qparams: ...
def _fft_c2c(input: Tensor, dim: Sequence[Union[_int, SymInt]], normalization: _int, forward: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
def _fft_c2r(input: Tensor, dim: _size, normalization: _int, last_dim_size: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: ...
def _fft_r2c(input: Tensor, dim: _size, normalization: _int, onesided: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
def _fill_mem_eff_dropout_mask_(input: Tensor, dropout_p: _float, seed: _int, offset: _int) -> Tensor: ...
def _foobar(input: Tensor, arg1: _bool = True, arg2: _bool = True, *, arg3: _bool = True) -> Tensor: ...
def _foreach_abs(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_abs(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.abs` to each Tensor of the input list.
"""
...
def _foreach_abs_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_abs_(self: List[Tensor]) -> None
Apply :func:`torch.abs` to each Tensor of the input list.
"""
...
def _foreach_acos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_acos(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.acos` to each Tensor of the input list.
"""
...
def _foreach_acos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_acos_(self: List[Tensor]) -> None
Apply :func:`torch.acos` to each Tensor of the input list.
"""
...
@overload
def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
@overload
def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ...
@overload
def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> None: ...
@overload
def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
@overload
def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
@overload
def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
@overload
def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ...
@overload
def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
@overload
def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
@overload
def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ...
def _foreach_asin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_asin(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.asin` to each Tensor of the input list.
"""
...
def _foreach_asin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_asin_(self: List[Tensor]) -> None
Apply :func:`torch.asin` to each Tensor of the input list.
"""
...
def _foreach_atan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_atan(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.atan` to each Tensor of the input list.
"""
...
def _foreach_atan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_atan_(self: List[Tensor]) -> None
Apply :func:`torch.atan` to each Tensor of the input list.
"""
...
def _foreach_ceil(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_ceil(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.ceil` to each Tensor of the input list.
"""
...
def _foreach_ceil_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_ceil_(self: List[Tensor]) -> None
Apply :func:`torch.ceil` to each Tensor of the input list.
"""
...
@overload
def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
@overload
def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
@overload
def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
@overload
def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
@overload
def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_copy_(self: Union[Tuple[Tensor, ...], List[Tensor]], src: Union[Tuple[Tensor, ...], List[Tensor]], non_blocking: _bool = False) -> None: ...
def _foreach_cos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_cos(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.cos` to each Tensor of the input list.
"""
...
def _foreach_cos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_cos_(self: List[Tensor]) -> None
Apply :func:`torch.cos` to each Tensor of the input list.
"""
...
def _foreach_cosh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_cosh(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.cosh` to each Tensor of the input list.
"""
...
def _foreach_cosh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_cosh_(self: List[Tensor]) -> None
Apply :func:`torch.cosh` to each Tensor of the input list.
"""
...
@overload
def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
@overload
def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ...
@overload
def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
@overload
def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_erf(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_erf(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.erf` to each Tensor of the input list.
"""
...
def _foreach_erf_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_erf_(self: List[Tensor]) -> None
Apply :func:`torch.erf` to each Tensor of the input list.
"""
...
def _foreach_erfc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_erfc(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.erfc` to each Tensor of the input list.
"""
...
def _foreach_erfc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_erfc_(self: List[Tensor]) -> None
Apply :func:`torch.erfc` to each Tensor of the input list.
"""
...
def _foreach_exp(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_exp(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.exp` to each Tensor of the input list.
"""
...
def _foreach_exp_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_exp_(self: List[Tensor]) -> None
Apply :func:`torch.exp` to each Tensor of the input list.
"""
...
def _foreach_expm1(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_expm1(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.expm1` to each Tensor of the input list.
"""
...
def _foreach_expm1_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_expm1_(self: List[Tensor]) -> None
Apply :func:`torch.expm1` to each Tensor of the input list.
"""
...
def _foreach_floor(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_floor(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.floor` to each Tensor of the input list.
"""
...
def _foreach_floor_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_floor_(self: List[Tensor]) -> None
Apply :func:`torch.floor` to each Tensor of the input list.
"""
...
def _foreach_frac(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_frac(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.frac` to each Tensor of the input list.
"""
...
def _foreach_frac_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_frac_(self: List[Tensor]) -> None
Apply :func:`torch.frac` to each Tensor of the input list.
"""
...
@overload
def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> None: ...
@overload
def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_lgamma(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_lgamma(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.lgamma` to each Tensor of the input list.
"""
...
def _foreach_lgamma_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_lgamma_(self: List[Tensor]) -> None
Apply :func:`torch.lgamma` to each Tensor of the input list.
"""
...
def _foreach_log(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_log(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.log` to each Tensor of the input list.
"""
...
def _foreach_log10(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_log10(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.log10` to each Tensor of the input list.
"""
...
def _foreach_log10_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_log10_(self: List[Tensor]) -> None
Apply :func:`torch.log10` to each Tensor of the input list.
"""
...
def _foreach_log1p(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_log1p(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.log1p` to each Tensor of the input list.
"""
...
def _foreach_log1p_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_log1p_(self: List[Tensor]) -> None
Apply :func:`torch.log1p` to each Tensor of the input list.
"""
...
def _foreach_log2(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_log2(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.log2` to each Tensor of the input list.
"""
...
def _foreach_log2_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_log2_(self: List[Tensor]) -> None
Apply :func:`torch.log2` to each Tensor of the input list.
"""
...
def _foreach_log_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_log_(self: List[Tensor]) -> None
Apply :func:`torch.log` to each Tensor of the input list.
"""
...
@overload
def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
@overload
def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
@overload
def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
@overload
def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
@overload
def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
@overload
def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ...
@overload
def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
@overload
def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_neg(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_neg(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.neg` to each Tensor of the input list.
"""
...
def _foreach_neg_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_neg_(self: List[Tensor]) -> None
Apply :func:`torch.neg` to each Tensor of the input list.
"""
...
def _foreach_norm(self: Union[Tuple[Tensor, ...], List[Tensor]], ord: Union[Number, _complex] = 2) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_pow(self: Union[Number, _complex], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> None: ...
@overload
def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> None: ...
@overload
def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_reciprocal(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_reciprocal(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.reciprocal` to each Tensor of the input list.
"""
...
def _foreach_reciprocal_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_reciprocal_(self: List[Tensor]) -> None
Apply :func:`torch.reciprocal` to each Tensor of the input list.
"""
...
def _foreach_round(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_round(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.round` to each Tensor of the input list.
"""
...
def _foreach_round_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_round_(self: List[Tensor]) -> None
Apply :func:`torch.round` to each Tensor of the input list.
"""
...
def _foreach_sigmoid(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_sigmoid(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.sigmoid` to each Tensor of the input list.
"""
...
def _foreach_sigmoid_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_sigmoid_(self: List[Tensor]) -> None
Apply :func:`torch.sigmoid` to each Tensor of the input list.
"""
...
def _foreach_sign(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
def _foreach_sign_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_sin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_sin(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.sin` to each Tensor of the input list.
"""
...
def _foreach_sin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_sin_(self: List[Tensor]) -> None
Apply :func:`torch.sin` to each Tensor of the input list.
"""
...
def _foreach_sinh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_sinh(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.sinh` to each Tensor of the input list.
"""
...
def _foreach_sinh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_sinh_(self: List[Tensor]) -> None
Apply :func:`torch.sinh` to each Tensor of the input list.
"""
...
def _foreach_sqrt(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_sqrt(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.sqrt` to each Tensor of the input list.
"""
...
def _foreach_sqrt_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_sqrt_(self: List[Tensor]) -> None
Apply :func:`torch.sqrt` to each Tensor of the input list.
"""
...
@overload
def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
@overload
def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
@overload
def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ...
@overload
def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
def _foreach_tan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_tan(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.tan` to each Tensor of the input list.
"""
...
def _foreach_tan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_tan_(self: List[Tensor]) -> None
Apply :func:`torch.tan` to each Tensor of the input list.
"""
...
def _foreach_tanh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_tanh(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.tanh` to each Tensor of the input list.
"""
...
def _foreach_tanh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_tanh_(self: List[Tensor]) -> None
Apply :func:`torch.tanh` to each Tensor of the input list.
"""
...
def _foreach_trunc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
_foreach_trunc(self: List[Tensor]) -> List[Tensor]
Apply :func:`torch.trunc` to each Tensor of the input list.
"""
...
def _foreach_trunc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_trunc_(self: List[Tensor]) -> None
Apply :func:`torch.trunc` to each Tensor of the input list.
"""
...
def _foreach_zero_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
r"""
_foreach_zero_(self: List[Tensor]) -> None
Apply :func:`torch.zero` to each Tensor of the input list.
"""
...
def _from_functional_tensor(t: Tensor) -> Tensor: ...
def _functional_assert_async(input: Tensor, assert_msg: str, dep_token: Tensor) -> Tensor: ...
def _functional_assert_scalar(self: Union[Number, _complex], assert_msg: str, dep_token: Tensor) -> Tensor: ...
def _functional_sym_constrain_range(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ...
def _functional_sym_constrain_range_for_size(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ...
def _functionalize_are_all_mutations_hidden_from_autograd(t: Tensor) -> _bool: ...
def _functionalize_are_all_mutations_under_no_grad_or_inference_mode(t: Tensor) -> _bool: ...
def _functionalize_commit_update(t: Tensor) -> None: ...
def _functionalize_mark_mutation_hidden_from_autograd(t: Tensor) -> None: ...
def _functionalize_replace(self_: Tensor, other: Tensor) -> None: ...
def _functionalize_sync(t: Tensor) -> None: ...
@overload
def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
@overload
def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
@overload
def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
@overload
def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
def _fused_dropout(input: Tensor, p: _float, generator: Optional[Generator] = None) -> Tuple[Tensor, Tensor]: ...
def _fused_moving_avg_obs_fq_helper(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> torch.return_types._fused_moving_avg_obs_fq_helper: ...
def _fused_sdp_choice(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> _int: ...
@overload
def _fused_sgd_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], momentum_buffer_list: Union[Tuple[Tensor, ...], List[Tensor]], *, weight_decay: _float, momentum: _float, lr: Tensor, dampening: _float, nesterov: _bool, maximize: _bool, is_first_step: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
@overload
def _fused_sgd_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], momentum_buffer_list: Union[Tuple[Tensor, ...], List[Tensor]], *, weight_decay: _float, momentum: _float, lr: _float, dampening: _float, nesterov: _bool, maximize: _bool, is_first_step: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
def _fw_primal_copy(input: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
def _grid_sampler_2d_cpu_fallback(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
def _has_compatible_shallow_copy_type(input: Tensor, from_: Tensor) -> _bool: ...
def _histogramdd_bin_edges(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> Tuple[Tensor, ...]: ...
def _histogramdd_from_bin_cts(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ...
def _histogramdd_from_bin_tensors(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], *, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ...
def _index_put_impl_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False, unsafe: _bool = False) -> Tensor: ...
def _indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
def _int_mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
def _is_all_true(input: Tensor) -> Tensor: ...
def _is_any_true(input: Tensor) -> Tensor: ...
def _is_functional_tensor(t: Tensor) -> _bool: ...
def _is_zerotensor(input: Tensor) -> _bool: ...
def _lazy_clone(input: Tensor) -> Tensor: ...
def _linalg_check_errors(info: Tensor, api_name: str, *, is_matrix: _bool) -> None: ...
def _linalg_det(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_det: ...
def _linalg_eigh(A: Tensor, UPLO: str = "L", compute_v: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_eigh: ...
def _linalg_slogdet(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_slogdet: ...
def _linalg_solve_ex(A: Tensor, B: Tensor, *, left: _bool = True, check_errors: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_solve_ex: ...
def _linalg_svd(A: Tensor, full_matrices: _bool = False, compute_uv: _bool = True, *, driver: Optional[str] = None, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_svd: ...
def _log_softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
def _log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor: ...
def _logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
def _lstm_mps(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: ...
def _lu_with_info(input: Tensor, pivot: _bool = True, check_errors: _bool = True) -> torch.return_types._lu_with_info: ...
def _make_dep_token(*, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
def _make_dual(primal: Tensor, tangent: Tensor, level: _int) -> Tensor: ...
def _make_dual_copy(primal: Tensor, tangent: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
def _make_per_channel_quantized_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int) -> Tensor: ...
def _make_per_tensor_quantized_tensor(input: Tensor, scale: _float, zero_point: _int) -> Tensor: ...
def _masked_scale(input: Tensor, mask: Tensor, scale: _float) -> Tensor: ...
def _masked_softmax(input: Tensor, mask: Tensor, dim: Optional[_int] = None, mask_type: Optional[_int] = None) -> Tensor: ...
def _mixed_dtypes_linear(input: Tensor, weight: Tensor, scale: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None) -> Tensor: ...
def _mkldnn_reshape(input: Tensor, shape: _size) -> Tensor: ...
def _mkldnn_transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
def _mkldnn_transpose_(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
def _mps_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
def _mps_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
@overload
def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ...
@overload
def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ...
def _native_batch_norm_legit_no_training(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, momentum: _float, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
def _native_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None, need_weights: _bool = True, average_attn_weights: _bool = True, mask_type: Optional[_int] = None) -> Tuple[Tensor, Tensor]: ...
def _neg_view(input: Tensor) -> Tensor: ...
def _neg_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
def _nested_from_padded(padded: Tensor, cpu_nested_shape_example: Tensor, fuse_transform_0213: _bool = False) -> Tensor: ...
def _nested_from_padded_and_nested_example(padded: Tensor, nt_example: Tensor) -> Tensor: ...
def _nested_get_jagged_dummy(any: Tensor) -> Tensor: ...
def _nested_get_lengths(input: Tensor) -> Tensor: ...
def _nested_get_offsets(input: Tensor) -> Tensor: ...
def _nested_get_ragged_idx(input: Tensor) -> _int: ...
def _nested_get_values(input: Tensor) -> Tensor: ...
def _nested_get_values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
def _nested_tensor_from_mask(t: Tensor, mask: Tensor, mask_check: _bool = True) -> Tensor: ...
def _nested_tensor_from_mask_left_aligned(t: Tensor, mask: Tensor) -> _bool: ...
def _nested_tensor_from_tensor_list(list: Union[Tuple[Tensor, ...], List[Tensor]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = None) -> Tensor: ...
def _nested_tensor_softmax_with_shape(input: Tensor, query: Tensor) -> Tensor: ...
def _nested_view_from_buffer(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor) -> Tensor: ...
def _nested_view_from_buffer_copy(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
def _nested_view_from_jagged(input: Tensor, offsets: Tensor, dummy: Tensor, lengths: Optional[Tensor] = None, ragged_idx: _int = 1) -> Tensor: ...
def _nested_view_from_jagged_copy(input: Tensor, offsets: Tensor, dummy: Tensor, lengths: Optional[Tensor] = None, ragged_idx: _int = 1, *, out: Optional[Tensor] = None) -> Tensor: ...
def _nnpack_available() -> _bool: ...
def _nnpack_spatial_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
def _pack_padded_sequence(input: Tensor, lengths: Tensor, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
def _pad_packed_sequence(data: Tensor, batch_sizes: Tensor, batch_first: _bool, padding_value: Union[Number, _complex], total_length: _int) -> Tuple[Tensor, Tensor]: ...
def _pin_memory(input: Tensor, device: Optional[Optional[DeviceLikeType]] = None) -> Tensor: ...
def _prelu_kernel(input: Tensor, weight: Tensor) -> Tensor: ...
def _print(s: str) -> None: ...
def _propagate_xla_data(input: Tensor, output: Tensor) -> None: ...
def _remove_batch_dim(input: Tensor, level: _int, batch_size: _int, out_dim: _int) -> Tensor: ...
def _reshape_alias_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor: ...
def _reshape_from_tensor(input: Tensor, shape: Tensor) -> Tensor: ...
def _resize_output_(input: Tensor, size: Sequence[Union[_int, SymInt]], device: Optional[DeviceLikeType]) -> Tensor: ...
def _rowwise_prune(weight: Tensor, mask: Tensor, compressed_indices_dtype: _dtype) -> Tuple[Tensor, Tensor]: ...
def _sample_dirichlet(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ...
def _saturate_weight_to_fp16(weight: Tensor) -> Tensor: ...
def _scaled_dot_product_attention_math(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, dropout_mask: Optional[Tensor] = None, *, scale: Optional[_float] = None) -> Tuple[Tensor, Tensor]: ...
def _scaled_dot_product_cudnn_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, return_debug_mask: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_cudnn_attention: ...
def _scaled_dot_product_efficient_attention(query: Tensor, key: Tensor, value: Tensor, attn_bias: Optional[Tensor], compute_log_sumexp: _bool, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_efficient_attention: ...
def _scaled_dot_product_flash_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, return_debug_mask: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_flash_attention: ...
def _scaled_dot_product_flash_attention_for_cpu(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, *, attn_mask: Optional[Tensor] = None, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_flash_attention_for_cpu: ...
def _scaled_mm(input: Tensor, mat2: Tensor, *, bias: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, scale_a: Optional[Tensor] = None, scale_b: Optional[Tensor] = None, scale_result: Optional[Tensor] = None, use_fast_accum: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor]: ...
def _shape_as_tensor(input: Tensor) -> Tensor: ...
def _sobol_engine_draw(quasi: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int, dtype: Optional[_dtype]) -> Tuple[Tensor, Tensor]: ...
def _sobol_engine_ff_(input: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int) -> Tensor: ...
def _sobol_engine_initialize_state_(input: Tensor, dimension: _int) -> Tensor: ...
def _sobol_engine_scramble_(input: Tensor, ltm: Tensor, dimension: _int) -> Tensor: ...
def _softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
def _softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, grad_input: Optional[Tensor] = None) -> Tensor: ...
def _sparse_broadcast_to(input: Tensor, size: _size) -> Tensor: ...
def _sparse_broadcast_to_copy(input: Tensor, size: _size, *, out: Optional[Tensor] = None) -> Tensor: ...
def _sparse_csr_prod(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ...
def _sparse_csr_sum(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ...
def _sparse_log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
def _sparse_semi_structured_linear(input: Tensor, weight: Tensor, meta: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None, out_dtype: Optional[_dtype] = None) -> Tensor: ...
def _sparse_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
def _sparse_sparse_matmul(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def _sparse_sum(input: Tensor) -> Tensor: ...
@overload
def _sparse_sum(input: Tensor, *, dtype: _dtype) -> Tensor: ...
@overload
def _sparse_sum(input: Tensor, dim: Union[_int, _size]) -> Tensor: ...
@overload
def _sparse_sum(input: Tensor, dim: Union[_int, _size], *, dtype: _dtype) -> Tensor: ...
def _stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ...
def _standard_gamma(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ...
def _standard_gamma_grad(input: Tensor, output: Tensor) -> Tensor: ...
def _sync(t: Tensor) -> None: ...
@overload
def _test_autograd_multiple_dispatch(input: Tensor) -> Tensor: ...
@overload
def _test_autograd_multiple_dispatch(input: Tensor, b: _bool) -> Tensor: ...
def _test_autograd_multiple_dispatch_view(input: Tensor) -> Tensor: ...
def _test_autograd_multiple_dispatch_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
def _test_check_tensor(input: Tensor) -> Tensor: ...
def _test_functorch_fallback(input: Tensor, other: Tensor) -> Tensor: ...
def _test_parallel_materialize(input: Tensor, num_parallel: _int, skip_first: _bool = False) -> Tensor: ...
def _test_serialization_subcmul(input: Tensor, other: Tensor, alpha: Union[Number, _complex] = 1) -> Tensor: ...
def _to_cpu(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
def _to_functional_tensor(t: Tensor) -> Tensor: ...
def _to_sparse_semi_structured(dense: Tensor) -> Tuple[Tensor, Tensor]: ...
def _transform_bias_rescale_qkv(qkv: Tensor, qkv_bias: Tensor, num_heads: _int) -> Tuple[Tensor, Tensor, Tensor]: ...
def _transformer_encoder_layer_fwd(src: Tensor, embed_dim: _int, num_heads: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, use_gelu: _bool, norm_first: _bool, eps: _float, norm_weight_1: Tensor, norm_bias_1: Tensor, norm_weight_2: Tensor, norm_bias_2: Tensor, ffn_weight_1: Tensor, ffn_bias_1: Tensor, ffn_weight_2: Tensor, ffn_bias_2: Tensor, mask: Optional[Tensor] = None, mask_type: Optional[_int] = None) -> Tensor: ...
def _trilinear(i1: Tensor, i2: Tensor, i3: Tensor, expand1: _size, expand2: _size, expand3: _size, sumdim: _size, unroll_dim: _int = 1) -> Tensor: ...
def _triton_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None) -> Tensor: ...
def _triton_scaled_dot_attention(q: Tensor, k: Tensor, v: Tensor, dropout_p: _float = 0.0) -> Tensor: ...
def _unique(input: Tensor, sorted: _bool = True, return_inverse: _bool = False) -> Tuple[Tensor, Tensor]: ...
def _unique2(input: Tensor, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ...
def _unpack_dual(dual: Tensor, level: _int) -> torch.return_types._unpack_dual: ...
def _unsafe_index(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]]) -> Tensor: ...
def _unsafe_index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ...
@overload
def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int) -> _bool: ...
@overload
def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int) -> _bool: ...
def _use_cudnn_rnn_flatten_weight() -> _bool: ...
def _validate_compressed_sparse_indices(is_crow: _bool, compressed_idx: Tensor, plain_idx: Tensor, cdim: _int, dim: _int, nnz: _int) -> None: ...
def _validate_sparse_bsc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ...
def _validate_sparse_bsr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ...
def _validate_sparse_compressed_tensor_args(compressed_indices: Tensor, plain_indices: Tensor, values: Tensor, size: _size, layout: _layout) -> None: ...
def _validate_sparse_coo_tensor_args(indices: Tensor, values: Tensor, size: _size, is_coalesced: Optional[_bool] = None) -> None: ...
def _validate_sparse_csc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ...
def _validate_sparse_csr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ...
def _values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
def _weight_int4pack_mm(input: Tensor, mat2: Tensor, qGroupSize: _int, qScaleAndZeros: Tensor) -> Tensor: ...
def _weight_int8pack_mm(input: Tensor, mat2: Tensor, scales: Tensor) -> Tensor: ...
def _weight_norm(v: Tensor, g: Tensor, dim: _int = 0) -> Tensor: ...
def _weight_norm_interface(v: Tensor, g: Tensor, dim: _int = 0) -> Tuple[Tensor, Tensor]: ...
def abs(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
abs(input, *, out=None) -> Tensor
Computes the absolute value of each element in :attr:`input`.
.. math::
\text{out}_{i} = |\text{input}_{i}|
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.abs(torch.tensor([-1, -2, 3]))
tensor([ 1, 2, 3])
"""
...
def abs_(input: Tensor) -> Tensor: ...
def absolute(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
absolute(input, *, out=None) -> Tensor
Alias for :func:`torch.abs`
"""
...
def acos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
acos(input, *, out=None) -> Tensor
Computes the inverse cosine of each element in :attr:`input`.
.. math::
\text{out}_{i} = \cos^{-1}(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.3348, -0.5889, 0.2005, -0.1584])
>>> torch.acos(a)
tensor([ 1.2294, 2.2004, 1.3690, 1.7298])
"""
...
def acos_(input: Tensor) -> Tensor: ...
def acosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
acosh(input, *, out=None) -> Tensor
Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \cosh^{-1}(\text{input}_{i})
Note:
The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range
will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`.
Args:
input (Tensor): the input tensor.
Keyword arguments:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4).uniform_(1, 2)
>>> a
tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ])
>>> torch.acosh(a)
tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ])
"""
...
def acosh_(input: Tensor) -> Tensor: ...
def adaptive_avg_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ...
def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
@overload
def add(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor:
r"""
add(input, other, *, alpha=1, out=None) -> Tensor
Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
.. math::
\text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Args:
input (Tensor): the input tensor.
other (Tensor or Number): the tensor or number to add to :attr:`input`.
Keyword arguments:
alpha (Number): the multiplier for :attr:`other`.
out (Tensor, optional): the output tensor.
Examples::
>>> a = torch.randn(4)
>>> a
tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
>>> torch.add(a, 20)
tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
>>> b = torch.randn(4)
>>> b
tensor([-0.9732, -0.3497, 0.6245, 0.4022])
>>> c = torch.randn(4, 1)
>>> c
tensor([[ 0.3743],
[-1.7724],
[-0.5811],
[-0.8017]])
>>> torch.add(b, c, alpha=10)
tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
[-18.6971, -18.0736, -17.0994, -17.3216],
[ -6.7845, -6.1610, -5.1868, -5.4090],
[ -8.9902, -8.3667, -7.3925, -7.6147]])
"""
...
@overload
def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor:
r"""
add(input, other, *, alpha=1, out=None) -> Tensor
Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
.. math::
\text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Args:
input (Tensor): the input tensor.
other (Tensor or Number): the tensor or number to add to :attr:`input`.
Keyword arguments:
alpha (Number): the multiplier for :attr:`other`.
out (Tensor, optional): the output tensor.
Examples::
>>> a = torch.randn(4)
>>> a
tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
>>> torch.add(a, 20)
tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
>>> b = torch.randn(4)
>>> b
tensor([-0.9732, -0.3497, 0.6245, 0.4022])
>>> c = torch.randn(4, 1)
>>> c
tensor([[ 0.3743],
[-1.7724],
[-0.5811],
[-0.8017]])
>>> torch.add(b, c, alpha=10)
tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
[-18.6971, -18.0736, -17.0994, -17.3216],
[ -6.7845, -6.1610, -5.1868, -5.4090],
[ -8.9902, -8.3667, -7.3925, -7.6147]])
"""
...
@overload
def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor:
r"""
add(input, other, *, alpha=1, out=None) -> Tensor
Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
.. math::
\text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Args:
input (Tensor): the input tensor.
other (Tensor or Number): the tensor or number to add to :attr:`input`.
Keyword arguments:
alpha (Number): the multiplier for :attr:`other`.
out (Tensor, optional): the output tensor.
Examples::
>>> a = torch.randn(4)
>>> a
tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
>>> torch.add(a, 20)
tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
>>> b = torch.randn(4)
>>> b
tensor([-0.9732, -0.3497, 0.6245, 0.4022])
>>> c = torch.randn(4, 1)
>>> c
tensor([[ 0.3743],
[-1.7724],
[-0.5811],
[-0.8017]])
>>> torch.add(b, c, alpha=10)
tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
[-18.6971, -18.0736, -17.0994, -17.3216],
[ -6.7845, -6.1610, -5.1868, -5.4090],
[ -8.9902, -8.3667, -7.3925, -7.6147]])
"""
...
@overload
def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor:
r"""
addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored
in :attr:`batch1` and :attr:`batch2`,
with a reduced add step (all matrix multiplications get accumulated
along the first dimension).
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
same number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
.. math::
out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
must be real numbers, otherwise they should be integers.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
input (Tensor): matrix to be added
alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.addbmm(M, batch1, batch2)
tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
[ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
[ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
"""
...
@overload
def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor:
r"""
addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored
in :attr:`batch1` and :attr:`batch2`,
with a reduced add step (all matrix multiplications get accumulated
along the first dimension).
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
same number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
.. math::
out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
must be real numbers, otherwise they should be integers.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
input (Tensor): matrix to be added
alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.addbmm(M, batch1, batch2)
tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
[ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
[ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
"""
...
@overload
def addbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
r"""
addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored
in :attr:`batch1` and :attr:`batch2`,
with a reduced add step (all matrix multiplications get accumulated
along the first dimension).
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
same number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
.. math::
out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
must be real numbers, otherwise they should be integers.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
input (Tensor): matrix to be added
alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.addbmm(M, batch1, batch2)
tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
[ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
[ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
"""
...
@overload
def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor:
r"""
addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored
in :attr:`batch1` and :attr:`batch2`,
with a reduced add step (all matrix multiplications get accumulated
along the first dimension).
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
same number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
.. math::
out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
must be real numbers, otherwise they should be integers.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
input (Tensor): matrix to be added
alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.addbmm(M, batch1, batch2)
tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
[ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
[ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
"""
...
@overload
def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor:
r"""
addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored
in :attr:`batch1` and :attr:`batch2`,
with a reduced add step (all matrix multiplications get accumulated
along the first dimension).
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
same number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
.. math::
out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
must be real numbers, otherwise they should be integers.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
input (Tensor): matrix to be added
alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.addbmm(M, batch1, batch2)
tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
[ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
[ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
"""
...
@overload
def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor:
r"""
addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
.. warning::
Integer division with addcdiv is no longer supported, and in a future
release addcdiv will perform a true division of tensor1 and tensor2.
The historic addcdiv behavior can be implemented as
(input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
The future addcdiv behavior is just the latter implementation:
(input + value * tensor1 / tensor2), for all dtypes.
.. math::
\text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
input (Tensor): the tensor to be added
tensor1 (Tensor): the numerator tensor
tensor2 (Tensor): the denominator tensor
Keyword args:
value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}`
out (Tensor, optional): the output tensor.
Example::
>>> t = torch.randn(1, 3)
>>> t1 = torch.randn(3, 1)
>>> t2 = torch.randn(1, 3)
>>> torch.addcdiv(t, t1, t2, value=0.1)
tensor([[-0.2312, -3.6496, 0.1312],
[-1.0428, 3.4292, -0.1030],
[-0.5369, -0.9829, 0.0430]])
"""
...
@overload
def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor:
r"""
addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
.. warning::
Integer division with addcdiv is no longer supported, and in a future
release addcdiv will perform a true division of tensor1 and tensor2.
The historic addcdiv behavior can be implemented as
(input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
The future addcdiv behavior is just the latter implementation:
(input + value * tensor1 / tensor2), for all dtypes.
.. math::
\text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
input (Tensor): the tensor to be added
tensor1 (Tensor): the numerator tensor
tensor2 (Tensor): the denominator tensor
Keyword args:
value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}`
out (Tensor, optional): the output tensor.
Example::
>>> t = torch.randn(1, 3)
>>> t1 = torch.randn(3, 1)
>>> t2 = torch.randn(1, 3)
>>> torch.addcdiv(t, t1, t2, value=0.1)
tensor([[-0.2312, -3.6496, 0.1312],
[-1.0428, 3.4292, -0.1030],
[-0.5369, -0.9829, 0.0430]])
"""
...
@overload
def addcdiv(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
r"""
addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
.. warning::
Integer division with addcdiv is no longer supported, and in a future
release addcdiv will perform a true division of tensor1 and tensor2.
The historic addcdiv behavior can be implemented as
(input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
The future addcdiv behavior is just the latter implementation:
(input + value * tensor1 / tensor2), for all dtypes.
.. math::
\text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
input (Tensor): the tensor to be added
tensor1 (Tensor): the numerator tensor
tensor2 (Tensor): the denominator tensor
Keyword args:
value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}`
out (Tensor, optional): the output tensor.
Example::
>>> t = torch.randn(1, 3)
>>> t1 = torch.randn(3, 1)
>>> t2 = torch.randn(1, 3)
>>> torch.addcdiv(t, t1, t2, value=0.1)
tensor([[-0.2312, -3.6496, 0.1312],
[-1.0428, 3.4292, -0.1030],
[-0.5369, -0.9829, 0.0430]])
"""
...
@overload
def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor:
r"""
addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise multiplication of :attr:`tensor1`
by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
and adds it to :attr:`input`.
.. math::
\text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
input (Tensor): the tensor to be added
tensor1 (Tensor): the tensor to be multiplied
tensor2 (Tensor): the tensor to be multiplied
Keyword args:
value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
out (Tensor, optional): the output tensor.
Example::
>>> t = torch.randn(1, 3)
>>> t1 = torch.randn(3, 1)
>>> t2 = torch.randn(1, 3)
>>> torch.addcmul(t, t1, t2, value=0.1)
tensor([[-0.8635, -0.6391, 1.6174],
[-0.7617, -0.5879, 1.7388],
[-0.8353, -0.6249, 1.6511]])
"""
...
@overload
def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor:
r"""
addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise multiplication of :attr:`tensor1`
by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
and adds it to :attr:`input`.
.. math::
\text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
input (Tensor): the tensor to be added
tensor1 (Tensor): the tensor to be multiplied
tensor2 (Tensor): the tensor to be multiplied
Keyword args:
value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
out (Tensor, optional): the output tensor.
Example::
>>> t = torch.randn(1, 3)
>>> t1 = torch.randn(3, 1)
>>> t2 = torch.randn(1, 3)
>>> torch.addcmul(t, t1, t2, value=0.1)
tensor([[-0.8635, -0.6391, 1.6174],
[-0.7617, -0.5879, 1.7388],
[-0.8353, -0.6249, 1.6511]])
"""
...
@overload
def addcmul(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
r"""
addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise multiplication of :attr:`tensor1`
by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
and adds it to :attr:`input`.
.. math::
\text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
input (Tensor): the tensor to be added
tensor1 (Tensor): the tensor to be multiplied
tensor2 (Tensor): the tensor to be multiplied
Keyword args:
value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
out (Tensor, optional): the output tensor.
Example::
>>> t = torch.randn(1, 3)
>>> t1 = torch.randn(3, 1)
>>> t2 = torch.randn(1, 3)
>>> torch.addcmul(t, t1, t2, value=0.1)
tensor([[-0.8635, -0.6391, 1.6174],
[-0.7617, -0.5879, 1.7388],
[-0.8353, -0.6249, 1.6511]])
"""
...
@overload
def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor:
r"""
addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
The matrix :attr:`input` is added to the final result.
If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
:attr:`input` is sparse the result will have the same layout and if :attr:`out`
is provided it must have the same layout as :attr:`input`.
.. warning::
Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
or may not have autograd support. If you notice missing functionality please
open a feature request.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
input (Tensor): matrix to be added
mat1 (Tensor): the first matrix to be matrix multiplied
mat2 (Tensor): the second matrix to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(2, 3)
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.addmm(M, mat1, mat2)
tensor([[-4.8716, 1.4671, -1.3746],
[ 0.7573, -3.9555, -2.8681]])
"""
...
@overload
def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor:
r"""
addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
The matrix :attr:`input` is added to the final result.
If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
:attr:`input` is sparse the result will have the same layout and if :attr:`out`
is provided it must have the same layout as :attr:`input`.
.. warning::
Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
or may not have autograd support. If you notice missing functionality please
open a feature request.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
input (Tensor): matrix to be added
mat1 (Tensor): the first matrix to be matrix multiplied
mat2 (Tensor): the second matrix to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(2, 3)
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.addmm(M, mat1, mat2)
tensor([[-4.8716, 1.4671, -1.3746],
[ 0.7573, -3.9555, -2.8681]])
"""
...
@overload
def addmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
r"""
addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
The matrix :attr:`input` is added to the final result.
If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
:attr:`input` is sparse the result will have the same layout and if :attr:`out`
is provided it must have the same layout as :attr:`input`.
.. warning::
Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
or may not have autograd support. If you notice missing functionality please
open a feature request.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
input (Tensor): matrix to be added
mat1 (Tensor): the first matrix to be matrix multiplied
mat2 (Tensor): the second matrix to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(2, 3)
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.addmm(M, mat1, mat2)
tensor([[-4.8716, 1.4671, -1.3746],
[ 0.7573, -3.9555, -2.8681]])
"""
...
@overload
def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor:
r"""
addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
The matrix :attr:`input` is added to the final result.
If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
:attr:`input` is sparse the result will have the same layout and if :attr:`out`
is provided it must have the same layout as :attr:`input`.
.. warning::
Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
or may not have autograd support. If you notice missing functionality please
open a feature request.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
input (Tensor): matrix to be added
mat1 (Tensor): the first matrix to be matrix multiplied
mat2 (Tensor): the second matrix to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(2, 3)
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.addmm(M, mat1, mat2)
tensor([[-4.8716, 1.4671, -1.3746],
[ 0.7573, -3.9555, -2.8681]])
"""
...
@overload
def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor:
r"""
addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
The matrix :attr:`input` is added to the final result.
If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
:attr:`input` is sparse the result will have the same layout and if :attr:`out`
is provided it must have the same layout as :attr:`input`.
.. warning::
Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
or may not have autograd support. If you notice missing functionality please
open a feature request.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
input (Tensor): matrix to be added
mat1 (Tensor): the first matrix to be matrix multiplied
mat2 (Tensor): the second matrix to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(2, 3)
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.addmm(M, mat1, mat2)
tensor([[-4.8716, 1.4671, -1.3746],
[ 0.7573, -3.9555, -2.8681]])
"""
...
@overload
def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor:
r"""
addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`mat` and
the vector :attr:`vec`.
The vector :attr:`input` is added to the final result.
If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
:attr:`out` will be 1-D tensor of size `n`.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
Args:
input (Tensor): vector to be added
mat (Tensor): matrix to be matrix multiplied
vec (Tensor): vector to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(2)
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.addmv(M, mat, vec)
tensor([-0.3768, -5.5565])
"""
...
@overload
def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor:
r"""
addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`mat` and
the vector :attr:`vec`.
The vector :attr:`input` is added to the final result.
If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
:attr:`out` will be 1-D tensor of size `n`.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
Args:
input (Tensor): vector to be added
mat (Tensor): matrix to be matrix multiplied
vec (Tensor): vector to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(2)
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.addmv(M, mat, vec)
tensor([-0.3768, -5.5565])
"""
...
@overload
def addmv(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
r"""
addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`mat` and
the vector :attr:`vec`.
The vector :attr:`input` is added to the final result.
If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
:attr:`out` will be 1-D tensor of size `n`.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
Args:
input (Tensor): vector to be added
mat (Tensor): matrix to be matrix multiplied
vec (Tensor): vector to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(2)
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.addmv(M, mat, vec)
tensor([-0.3768, -5.5565])
"""
...
@overload
def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor:
r"""
addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`mat` and
the vector :attr:`vec`.
The vector :attr:`input` is added to the final result.
If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
:attr:`out` will be 1-D tensor of size `n`.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
Args:
input (Tensor): vector to be added
mat (Tensor): matrix to be matrix multiplied
vec (Tensor): vector to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(2)
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.addmv(M, mat, vec)
tensor([-0.3768, -5.5565])
"""
...
@overload
def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor:
r"""
addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`mat` and
the vector :attr:`vec`.
The vector :attr:`input` is added to the final result.
If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
:attr:`out` will be 1-D tensor of size `n`.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
Args:
input (Tensor): vector to be added
mat (Tensor): matrix to be matrix multiplied
vec (Tensor): vector to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(2)
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.addmv(M, mat, vec)
tensor([-0.3768, -5.5565])
"""
...
@overload
def addmv_(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor: ...
@overload
def addmv_(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ...
@overload
def addmv_(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ...
@overload
def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor) -> Tensor:
r"""
addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
and adds it to the matrix :attr:`input`.
Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
:attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
of size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a matrix of size
:math:`(n \times m)` and :attr:`out` will be a matrix of size
:math:`(n \times m)`.
Args:
input (Tensor): matrix to be added
vec1 (Tensor): the first vector of the outer product
vec2 (Tensor): the second vector of the outer product
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> vec1 = torch.arange(1., 4.)
>>> vec2 = torch.arange(1., 3.)
>>> M = torch.zeros(3, 2)
>>> torch.addr(M, vec1, vec2)
tensor([[ 1., 2.],
[ 2., 4.],
[ 3., 6.]])
"""
...
@overload
def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor:
r"""
addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
and adds it to the matrix :attr:`input`.
Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
:attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
of size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a matrix of size
:math:`(n \times m)` and :attr:`out` will be a matrix of size
:math:`(n \times m)`.
Args:
input (Tensor): matrix to be added
vec1 (Tensor): the first vector of the outer product
vec2 (Tensor): the second vector of the outer product
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> vec1 = torch.arange(1., 4.)
>>> vec2 = torch.arange(1., 3.)
>>> M = torch.zeros(3, 2)
>>> torch.addr(M, vec1, vec2)
tensor([[ 1., 2.],
[ 2., 4.],
[ 3., 6.]])
"""
...
@overload
def addr(input: Tensor, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
r"""
addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
and adds it to the matrix :attr:`input`.
Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
:attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
of size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a matrix of size
:math:`(n \times m)` and :attr:`out` will be a matrix of size
:math:`(n \times m)`.
Args:
input (Tensor): matrix to be added
vec1 (Tensor): the first vector of the outer product
vec2 (Tensor): the second vector of the outer product
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> vec1 = torch.arange(1., 4.)
>>> vec2 = torch.arange(1., 3.)
>>> M = torch.zeros(3, 2)
>>> torch.addr(M, vec1, vec2)
tensor([[ 1., 2.],
[ 2., 4.],
[ 3., 6.]])
"""
...
@overload
def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor) -> Tensor:
r"""
addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
and adds it to the matrix :attr:`input`.
Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
:attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
of size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a matrix of size
:math:`(n \times m)` and :attr:`out` will be a matrix of size
:math:`(n \times m)`.
Args:
input (Tensor): matrix to be added
vec1 (Tensor): the first vector of the outer product
vec2 (Tensor): the second vector of the outer product
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> vec1 = torch.arange(1., 4.)
>>> vec2 = torch.arange(1., 3.)
>>> M = torch.zeros(3, 2)
>>> torch.addr(M, vec1, vec2)
tensor([[ 1., 2.],
[ 2., 4.],
[ 3., 6.]])
"""
...
@overload
def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor:
r"""
addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
and adds it to the matrix :attr:`input`.
Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
:attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
of size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a matrix of size
:math:`(n \times m)` and :attr:`out` will be a matrix of size
:math:`(n \times m)`.
Args:
input (Tensor): matrix to be added
vec1 (Tensor): the first vector of the outer product
vec2 (Tensor): the second vector of the outer product
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> vec1 = torch.arange(1., 4.)
>>> vec2 = torch.arange(1., 3.)
>>> M = torch.zeros(3, 2)
>>> torch.addr(M, vec1, vec2)
tensor([[ 1., 2.],
[ 2., 4.],
[ 3., 6.]])
"""
...
def adjoint(input: Tensor) -> Tensor:
r"""
adjoint(Tensor) -> Tensor
Returns a view of the tensor conjugated and with the last two dimensions transposed.
``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and
to ``x.transpose(-2, -1)`` for real tensors.
Example::
>>> x = torch.arange(4, dtype=torch.float)
>>> A = torch.complex(x, x).reshape(2, 2)
>>> A
tensor([[0.+0.j, 1.+1.j],
[2.+2.j, 3.+3.j]])
>>> A.adjoint()
tensor([[0.-0.j, 2.-2.j],
[1.-1.j, 3.-3.j]])
>>> (A.adjoint() == A.mH).all()
tensor(True)
"""
...
def affine_grid_generator(theta: Tensor, size: Sequence[Union[_int, SymInt]], align_corners: _bool) -> Tensor: ...
def alias_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.alias`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
@overload
def all(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
all(input) -> Tensor
Tests if all elements in :attr:`input` evaluate to `True`.
.. note:: This function matches the behaviour of NumPy in returning
output of dtype `bool` for all supported dtypes except `uint8`.
For `uint8` the dtype of output is `uint8` itself.
Example::
>>> a = torch.rand(1, 2).bool()
>>> a
tensor([[False, True]], dtype=torch.bool)
>>> torch.all(a)
tensor(False, dtype=torch.bool)
>>> a = torch.arange(0, 3)
>>> a
tensor([0, 1, 2])
>>> torch.all(a)
tensor(False)
.. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
:noindex:
For each row of :attr:`input` in the given dimension :attr:`dim`,
returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.rand(4, 2).bool()
>>> a
tensor([[True, True],
[True, False],
[True, True],
[True, True]], dtype=torch.bool)
>>> torch.all(a, dim=1)
tensor([ True, False, True, True], dtype=torch.bool)
>>> torch.all(a, dim=0)
tensor([ True, False], dtype=torch.bool)
"""
...
@overload
def all(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
all(input) -> Tensor
Tests if all elements in :attr:`input` evaluate to `True`.
.. note:: This function matches the behaviour of NumPy in returning
output of dtype `bool` for all supported dtypes except `uint8`.
For `uint8` the dtype of output is `uint8` itself.
Example::
>>> a = torch.rand(1, 2).bool()
>>> a
tensor([[False, True]], dtype=torch.bool)
>>> torch.all(a)
tensor(False, dtype=torch.bool)
>>> a = torch.arange(0, 3)
>>> a
tensor([0, 1, 2])
>>> torch.all(a)
tensor(False)
.. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
:noindex:
For each row of :attr:`input` in the given dimension :attr:`dim`,
returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.rand(4, 2).bool()
>>> a
tensor([[True, True],
[True, False],
[True, True],
[True, True]], dtype=torch.bool)
>>> torch.all(a, dim=1)
tensor([ True, False, True, True], dtype=torch.bool)
>>> torch.all(a, dim=0)
tensor([ True, False], dtype=torch.bool)
"""
...
@overload
def all(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
all(input) -> Tensor
Tests if all elements in :attr:`input` evaluate to `True`.
.. note:: This function matches the behaviour of NumPy in returning
output of dtype `bool` for all supported dtypes except `uint8`.
For `uint8` the dtype of output is `uint8` itself.
Example::
>>> a = torch.rand(1, 2).bool()
>>> a
tensor([[False, True]], dtype=torch.bool)
>>> torch.all(a)
tensor(False, dtype=torch.bool)
>>> a = torch.arange(0, 3)
>>> a
tensor([0, 1, 2])
>>> torch.all(a)
tensor(False)
.. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
:noindex:
For each row of :attr:`input` in the given dimension :attr:`dim`,
returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.rand(4, 2).bool()
>>> a
tensor([[True, True],
[True, False],
[True, True],
[True, True]], dtype=torch.bool)
>>> torch.all(a, dim=1)
tensor([ True, False, True, True], dtype=torch.bool)
>>> torch.all(a, dim=0)
tensor([ True, False], dtype=torch.bool)
"""
...
@overload
def all(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
all(input) -> Tensor
Tests if all elements in :attr:`input` evaluate to `True`.
.. note:: This function matches the behaviour of NumPy in returning
output of dtype `bool` for all supported dtypes except `uint8`.
For `uint8` the dtype of output is `uint8` itself.
Example::
>>> a = torch.rand(1, 2).bool()
>>> a
tensor([[False, True]], dtype=torch.bool)
>>> torch.all(a)
tensor(False, dtype=torch.bool)
>>> a = torch.arange(0, 3)
>>> a
tensor([0, 1, 2])
>>> torch.all(a)
tensor(False)
.. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
:noindex:
For each row of :attr:`input` in the given dimension :attr:`dim`,
returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.rand(4, 2).bool()
>>> a
tensor([[True, True],
[True, False],
[True, True],
[True, True]], dtype=torch.bool)
>>> torch.all(a, dim=1)
tensor([ True, False, True, True], dtype=torch.bool)
>>> torch.all(a, dim=0)
tensor([ True, False], dtype=torch.bool)
"""
...
def allclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool:
r"""
allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool
This function checks if :attr:`input` and :attr:`other` satisfy the condition:
.. math::
\lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to
`numpy.allclose <https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html>`_
Args:
input (Tensor): first tensor to compare
other (Tensor): second tensor to compare
atol (float, optional): absolute tolerance. Default: 1e-08
rtol (float, optional): relative tolerance. Default: 1e-05
equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
Example::
>>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08]))
False
>>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09]))
True
>>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]))
False
>>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True)
True
"""
...
def alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def amax(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
amax(input, dim, keepdim=False, *, out=None) -> Tensor
Returns the maximum value of each slice of the :attr:`input` tensor in the given
dimension(s) :attr:`dim`.
.. note::
The difference between ``max``/``min`` and ``amax``/``amin`` is:
- ``amax``/``amin`` supports reducing on multiple dimensions,
- ``amax``/``amin`` does not return indices,
- ``amax``/``amin`` evenly distributes gradient between equal values,
while ``max(dim)``/``min(dim)`` propagates gradient only to a single
index in the source tensor.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.8177, 1.4878, -0.2491, 0.9130],
[-0.7158, 1.1775, 2.0992, 0.4817],
[-0.0053, 0.0164, -1.3738, -0.0507],
[ 1.9700, 1.1106, -1.0318, -1.0816]])
>>> torch.amax(a, 1)
tensor([1.4878, 2.0992, 0.0164, 1.9700])
"""
...
def amin(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
amin(input, dim, keepdim=False, *, out=None) -> Tensor
Returns the minimum value of each slice of the :attr:`input` tensor in the given
dimension(s) :attr:`dim`.
.. note::
The difference between ``max``/``min`` and ``amax``/``amin`` is:
- ``amax``/``amin`` supports reducing on multiple dimensions,
- ``amax``/``amin`` does not return indices,
- ``amax``/``amin`` evenly distributes gradient between equal values,
while ``max(dim)``/``min(dim)`` propagates gradient only to a single
index in the source tensor.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.6451, -0.4866, 0.2987, -1.3312],
[-0.5744, 1.2980, 1.8397, -0.2713],
[ 0.9128, 0.9214, -1.7268, -0.2995],
[ 0.9023, 0.4853, 0.9075, -1.6165]])
>>> torch.amin(a, 1)
tensor([-1.3312, -0.5744, -1.7268, -1.6165])
"""
...
def aminmax(input: Tensor, *, dim: Optional[_int] = None, keepdim: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.aminmax:
r"""
aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max)
Computes the minimum and maximum values of the :attr:`input` tensor.
Args:
input (Tensor):
The input tensor
Keyword Args:
dim (Optional[int]):
The dimension along which to compute the values. If `None`,
computes the values over the entire :attr:`input` tensor.
Default is `None`.
keepdim (bool):
If `True`, the reduced dimensions will be kept in the output
tensor as dimensions with size 1 for broadcasting, otherwise
they will be removed, as if calling (:func:`torch.squeeze`).
Default is `False`.
out (Optional[Tuple[Tensor, Tensor]]):
Optional tensors on which to write the result. Must have the same
shape and dtype as the expected output.
Default is `None`.
Returns:
A named tuple `(min, max)` containing the minimum and maximum values.
Raises:
RuntimeError
If any of the dimensions to compute the values over has size 0.
.. note::
NaN values are propagated to the output if at least one value is NaN.
.. seealso::
:func:`torch.amin` computes just the minimum value
:func:`torch.amax` computes just the maximum value
Example::
>>> torch.aminmax(torch.tensor([1, -3, 5]))
torch.return_types.aminmax(
min=tensor(-3),
max=tensor(5))
>>> # aminmax propagates NaNs
>>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan]))
torch.return_types.aminmax(
min=tensor(nan),
max=tensor(nan))
>>> t = torch.arange(10).view(2, 5)
>>> t
tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> t.aminmax(dim=0, keepdim=True)
torch.return_types.aminmax(
min=tensor([[0, 1, 2, 3, 4]]),
max=tensor([[5, 6, 7, 8, 9]]))
"""
...
def angle(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
angle(input, *, out=None) -> Tensor
Computes the element-wise angle (in radians) of the given :attr:`input` tensor.
.. math::
\text{out}_{i} = angle(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
.. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers,
zero for non-negative real numbers, and propagates NaNs. Previously
the function would return zero for all real numbers and not propagate
floating-point NaNs.
Example::
>>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
tensor([ 135., 135, -45])
"""
...
@overload
def any(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
any(input) -> Tensor
Tests if any element in :attr:`input` evaluates to `True`.
.. note:: This function matches the behaviour of NumPy in returning
output of dtype `bool` for all supported dtypes except `uint8`.
For `uint8` the dtype of output is `uint8` itself.
Example::
>>> a = torch.rand(1, 2).bool()
>>> a
tensor([[False, True]], dtype=torch.bool)
>>> torch.any(a)
tensor(True, dtype=torch.bool)
>>> a = torch.arange(0, 3)
>>> a
tensor([0, 1, 2])
>>> torch.any(a)
tensor(True)
.. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
:noindex:
For each row of :attr:`input` in the given dimension :attr:`dim`,
returns `True` if any element in the row evaluate to `True` and `False` otherwise.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4, 2) < 0
>>> a
tensor([[ True, True],
[False, True],
[ True, True],
[False, False]])
>>> torch.any(a, 1)
tensor([ True, True, True, False])
>>> torch.any(a, 0)
tensor([True, True])
"""
...
@overload
def any(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
any(input) -> Tensor
Tests if any element in :attr:`input` evaluates to `True`.
.. note:: This function matches the behaviour of NumPy in returning
output of dtype `bool` for all supported dtypes except `uint8`.
For `uint8` the dtype of output is `uint8` itself.
Example::
>>> a = torch.rand(1, 2).bool()
>>> a
tensor([[False, True]], dtype=torch.bool)
>>> torch.any(a)
tensor(True, dtype=torch.bool)
>>> a = torch.arange(0, 3)
>>> a
tensor([0, 1, 2])
>>> torch.any(a)
tensor(True)
.. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
:noindex:
For each row of :attr:`input` in the given dimension :attr:`dim`,
returns `True` if any element in the row evaluate to `True` and `False` otherwise.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4, 2) < 0
>>> a
tensor([[ True, True],
[False, True],
[ True, True],
[False, False]])
>>> torch.any(a, 1)
tensor([ True, True, True, False])
>>> torch.any(a, 0)
tensor([True, True])
"""
...
@overload
def any(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
any(input) -> Tensor
Tests if any element in :attr:`input` evaluates to `True`.
.. note:: This function matches the behaviour of NumPy in returning
output of dtype `bool` for all supported dtypes except `uint8`.
For `uint8` the dtype of output is `uint8` itself.
Example::
>>> a = torch.rand(1, 2).bool()
>>> a
tensor([[False, True]], dtype=torch.bool)
>>> torch.any(a)
tensor(True, dtype=torch.bool)
>>> a = torch.arange(0, 3)
>>> a
tensor([0, 1, 2])
>>> torch.any(a)
tensor(True)
.. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
:noindex:
For each row of :attr:`input` in the given dimension :attr:`dim`,
returns `True` if any element in the row evaluate to `True` and `False` otherwise.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4, 2) < 0
>>> a
tensor([[ True, True],
[False, True],
[ True, True],
[False, False]])
>>> torch.any(a, 1)
tensor([ True, True, True, False])
>>> torch.any(a, 0)
tensor([True, True])
"""
...
@overload
def any(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
any(input) -> Tensor
Tests if any element in :attr:`input` evaluates to `True`.
.. note:: This function matches the behaviour of NumPy in returning
output of dtype `bool` for all supported dtypes except `uint8`.
For `uint8` the dtype of output is `uint8` itself.
Example::
>>> a = torch.rand(1, 2).bool()
>>> a
tensor([[False, True]], dtype=torch.bool)
>>> torch.any(a)
tensor(True, dtype=torch.bool)
>>> a = torch.arange(0, 3)
>>> a
tensor([0, 1, 2])
>>> torch.any(a)
tensor(True)
.. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
:noindex:
For each row of :attr:`input` in the given dimension :attr:`dim`,
returns `True` if any element in the row evaluate to `True` and `False` otherwise.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4, 2) < 0
>>> a
tensor([[ True, True],
[False, True],
[ True, True],
[False, False]])
>>> torch.any(a, 1)
tensor([ True, True, True, False])
>>> torch.any(a, 0)
tensor([True, True])
"""
...
@overload
def arange(start: Number, end: Number, step: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
r"""
arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
with values from the interval ``[start, end)`` taken with common difference
:attr:`step` beginning from `start`.
Note that non-integer :attr:`step` is subject to floating point rounding errors when
comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
in such cases.
.. math::
\text{out}_{{i+1}} = \text{out}_{i} + \text{step}
Args:
start (Number): the starting value for the set of points. Default: ``0``.
end (Number): the ending value for the set of points
step (Number): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
arguments. If any of `start`, `end`, or `stop` are floating-point, the
`dtype` is inferred to be the default dtype, see
:meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
be `torch.int64`.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.arange(5)
tensor([ 0, 1, 2, 3, 4])
>>> torch.arange(1, 4)
tensor([ 1, 2, 3])
>>> torch.arange(1, 2.5, 0.5)
tensor([ 1.0000, 1.5000, 2.0000])
"""
...
@overload
def arange(start: Number, end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
r"""
arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
with values from the interval ``[start, end)`` taken with common difference
:attr:`step` beginning from `start`.
Note that non-integer :attr:`step` is subject to floating point rounding errors when
comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
in such cases.
.. math::
\text{out}_{{i+1}} = \text{out}_{i} + \text{step}
Args:
start (Number): the starting value for the set of points. Default: ``0``.
end (Number): the ending value for the set of points
step (Number): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
arguments. If any of `start`, `end`, or `stop` are floating-point, the
`dtype` is inferred to be the default dtype, see
:meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
be `torch.int64`.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.arange(5)
tensor([ 0, 1, 2, 3, 4])
>>> torch.arange(1, 4)
tensor([ 1, 2, 3])
>>> torch.arange(1, 2.5, 0.5)
tensor([ 1.0000, 1.5000, 2.0000])
"""
...
@overload
def arange(end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
r"""
arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
with values from the interval ``[start, end)`` taken with common difference
:attr:`step` beginning from `start`.
Note that non-integer :attr:`step` is subject to floating point rounding errors when
comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
in such cases.
.. math::
\text{out}_{{i+1}} = \text{out}_{i} + \text{step}
Args:
start (Number): the starting value for the set of points. Default: ``0``.
end (Number): the ending value for the set of points
step (Number): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
arguments. If any of `start`, `end`, or `stop` are floating-point, the
`dtype` is inferred to be the default dtype, see
:meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
be `torch.int64`.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.arange(5)
tensor([ 0, 1, 2, 3, 4])
>>> torch.arange(1, 4)
tensor([ 1, 2, 3])
>>> torch.arange(1, 2.5, 0.5)
tensor([ 1.0000, 1.5000, 2.0000])
"""
...
@overload
def arange(end: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
with values from the interval ``[start, end)`` taken with common difference
:attr:`step` beginning from `start`.
Note that non-integer :attr:`step` is subject to floating point rounding errors when
comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
in such cases.
.. math::
\text{out}_{{i+1}} = \text{out}_{i} + \text{step}
Args:
start (Number): the starting value for the set of points. Default: ``0``.
end (Number): the ending value for the set of points
step (Number): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
arguments. If any of `start`, `end`, or `stop` are floating-point, the
`dtype` is inferred to be the default dtype, see
:meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
be `torch.int64`.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.arange(5)
tensor([ 0, 1, 2, 3, 4])
>>> torch.arange(1, 4)
tensor([ 1, 2, 3])
>>> torch.arange(1, 2.5, 0.5)
tensor([ 1.0000, 1.5000, 2.0000])
"""
...
@overload
def arange(start: Union[Number, _complex], end: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
with values from the interval ``[start, end)`` taken with common difference
:attr:`step` beginning from `start`.
Note that non-integer :attr:`step` is subject to floating point rounding errors when
comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
in such cases.
.. math::
\text{out}_{{i+1}} = \text{out}_{i} + \text{step}
Args:
start (Number): the starting value for the set of points. Default: ``0``.
end (Number): the ending value for the set of points
step (Number): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
arguments. If any of `start`, `end`, or `stop` are floating-point, the
`dtype` is inferred to be the default dtype, see
:meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
be `torch.int64`.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.arange(5)
tensor([ 0, 1, 2, 3, 4])
>>> torch.arange(1, 4)
tensor([ 1, 2, 3])
>>> torch.arange(1, 2.5, 0.5)
tensor([ 1.0000, 1.5000, 2.0000])
"""
...
@overload
def arange(start: Union[Number, _complex], end: Union[Number, _complex], step: Union[Number, _complex] = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
with values from the interval ``[start, end)`` taken with common difference
:attr:`step` beginning from `start`.
Note that non-integer :attr:`step` is subject to floating point rounding errors when
comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
in such cases.
.. math::
\text{out}_{{i+1}} = \text{out}_{i} + \text{step}
Args:
start (Number): the starting value for the set of points. Default: ``0``.
end (Number): the ending value for the set of points
step (Number): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
arguments. If any of `start`, `end`, or `stop` are floating-point, the
`dtype` is inferred to be the default dtype, see
:meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
be `torch.int64`.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.arange(5)
tensor([ 0, 1, 2, 3, 4])
>>> torch.arange(1, 4)
tensor([ 1, 2, 3])
>>> torch.arange(1, 2.5, 0.5)
tensor([ 1.0000, 1.5000, 2.0000])
"""
...
def arccos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
arccos(input, *, out=None) -> Tensor
Alias for :func:`torch.acos`.
"""
...
def arccos_(input: Tensor) -> Tensor: ...
def arccosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
arccosh(input, *, out=None) -> Tensor
Alias for :func:`torch.acosh`.
"""
...
def arccosh_(input: Tensor) -> Tensor: ...
def arcsin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
arcsin(input, *, out=None) -> Tensor
Alias for :func:`torch.asin`.
"""
...
def arcsin_(input: Tensor) -> Tensor: ...
def arcsinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
arcsinh(input, *, out=None) -> Tensor
Alias for :func:`torch.asinh`.
"""
...
def arcsinh_(input: Tensor) -> Tensor: ...
def arctan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
arctan(input, *, out=None) -> Tensor
Alias for :func:`torch.atan`.
"""
...
def arctan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
arctan2(input, other, *, out=None) -> Tensor
Alias for :func:`torch.atan2`.
"""
...
def arctan_(input: Tensor) -> Tensor: ...
def arctanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
arctanh(input, *, out=None) -> Tensor
Alias for :func:`torch.atanh`.
"""
...
def arctanh_(input: Tensor) -> Tensor: ...
def argmax(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
argmax(input) -> LongTensor
Returns the indices of the maximum value of all elements in the :attr:`input` tensor.
This is the second value returned by :meth:`torch.max`. See its
documentation for the exact semantics of this method.
.. note:: If there are multiple maximal values then the indices of the first maximal value are returned.
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
[-0.7401, -0.8805, -0.3402, -1.1936],
[ 0.4907, -1.3948, -1.0691, -0.3132],
[-1.6092, 0.5419, -0.2993, 0.3195]])
>>> torch.argmax(a)
tensor(0)
.. function:: argmax(input, dim, keepdim=False) -> LongTensor
:noindex:
Returns the indices of the maximum values of a tensor across a dimension.
This is the second value returned by :meth:`torch.max`. See its
documentation for the exact semantics of this method.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce. If ``None``, the argmax of the flattened input is returned.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
[-0.7401, -0.8805, -0.3402, -1.1936],
[ 0.4907, -1.3948, -1.0691, -0.3132],
[-1.6092, 0.5419, -0.2993, 0.3195]])
>>> torch.argmax(a, dim=1)
tensor([ 0, 2, 0, 1])
"""
...
def argmin(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
argmin(input, dim=None, keepdim=False) -> LongTensor
Returns the indices of the minimum value(s) of the flattened tensor or along a dimension
This is the second value returned by :meth:`torch.min`. See its
documentation for the exact semantics of this method.
.. note:: If there are multiple minimal values then the indices of the first minimal value are returned.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce. If ``None``, the argmin of the flattened input is returned.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.1139, 0.2254, -0.1381, 0.3687],
[ 1.0100, -1.1975, -0.0102, -0.4732],
[-0.9240, 0.1207, -0.7506, -1.0213],
[ 1.7809, -1.2960, 0.9384, 0.1438]])
>>> torch.argmin(a)
tensor(13)
>>> torch.argmin(a, dim=1)
tensor([ 2, 1, 3, 1])
>>> torch.argmin(a, dim=1, keepdim=True)
tensor([[2],
[1],
[3],
[1]])
"""
...
@overload
def argsort(input: Tensor, *, stable: _bool, dim: _int = -1, descending: _bool = False) -> Tensor:
r"""
argsort(input, dim=-1, descending=False, stable=False) -> Tensor
Returns the indices that sort a tensor along a given dimension in ascending
order by value.
This is the second value returned by :meth:`torch.sort`. See its documentation
for the exact semantics of this method.
If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
the order of equivalent elements. If ``False``, the relative order of values
which compare equal is not guaranteed. ``True`` is slower.
Args:
input (Tensor): the input tensor.
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
stable (bool, optional): controls the relative order of equivalent elements
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
[ 0.1598, 0.0788, -0.0745, -1.2700],
[ 1.2208, 1.0722, -0.7064, 1.2564],
[ 0.0669, -0.2318, -0.8229, -0.9280]])
>>> torch.argsort(a, dim=1)
tensor([[2, 0, 3, 1],
[3, 2, 1, 0],
[2, 1, 0, 3],
[3, 2, 1, 0]])
"""
...
@overload
def argsort(input: Tensor, dim: _int = -1, descending: _bool = False) -> Tensor:
r"""
argsort(input, dim=-1, descending=False, stable=False) -> Tensor
Returns the indices that sort a tensor along a given dimension in ascending
order by value.
This is the second value returned by :meth:`torch.sort`. See its documentation
for the exact semantics of this method.
If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
the order of equivalent elements. If ``False``, the relative order of values
which compare equal is not guaranteed. ``True`` is slower.
Args:
input (Tensor): the input tensor.
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
stable (bool, optional): controls the relative order of equivalent elements
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
[ 0.1598, 0.0788, -0.0745, -1.2700],
[ 1.2208, 1.0722, -0.7064, 1.2564],
[ 0.0669, -0.2318, -0.8229, -0.9280]])
>>> torch.argsort(a, dim=1)
tensor([[2, 0, 3, 1],
[3, 2, 1, 0],
[2, 1, 0, 3],
[3, 2, 1, 0]])
"""
...
@overload
def argsort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False) -> Tensor:
r"""
argsort(input, dim=-1, descending=False, stable=False) -> Tensor
Returns the indices that sort a tensor along a given dimension in ascending
order by value.
This is the second value returned by :meth:`torch.sort`. See its documentation
for the exact semantics of this method.
If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
the order of equivalent elements. If ``False``, the relative order of values
which compare equal is not guaranteed. ``True`` is slower.
Args:
input (Tensor): the input tensor.
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
stable (bool, optional): controls the relative order of equivalent elements
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
[ 0.1598, 0.0788, -0.0745, -1.2700],
[ 1.2208, 1.0722, -0.7064, 1.2564],
[ 0.0669, -0.2318, -0.8229, -0.9280]])
>>> torch.argsort(a, dim=1)
tensor([[2, 0, 3, 1],
[3, 2, 1, 0],
[2, 1, 0, 3],
[3, 2, 1, 0]])
"""
...
def argwhere(input: Tensor) -> Tensor:
r"""
argwhere(input) -> Tensor
Returns a tensor containing the indices of all non-zero elements of
:attr:`input`. Each row in the result contains the indices of a non-zero
element in :attr:`input`. The result is sorted lexicographically, with
the last index changing the fastest (C-style).
If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
.. note::
This function is similar to NumPy's `argwhere`.
When :attr:`input` is on CUDA, this function causes host-device synchronization.
Args:
{input}
Example::
>>> t = torch.tensor([1, 0, 1])
>>> torch.argwhere(t)
tensor([[0],
[2]])
>>> t = torch.tensor([[1, 0, 1], [0, 1, 1]])
>>> torch.argwhere(t)
tensor([[0, 0],
[0, 2],
[1, 1],
[1, 2]])
"""
...
def as_strided(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor:
r"""
as_strided(input, size, stride, storage_offset=None) -> Tensor
Create a view of an existing `torch.Tensor` :attr:`input` with specified
:attr:`size`, :attr:`stride` and :attr:`storage_offset`.
.. warning::
Prefer using other view functions, like :meth:`torch.Tensor.expand`,
to setting a view's strides manually with `as_strided`, as this
function's behavior depends on the implementation of a tensor's storage.
The constructed view of the storage must only refer to elements within
the storage or a runtime error will be thrown, and if the view is
"overlapped" (with multiple indices referring to the same element in
memory) its behavior is undefined.
Args:
input (Tensor): the input tensor.
size (tuple or ints): the shape of the output tensor
stride (tuple or ints): the stride of the output tensor
storage_offset (int, optional): the offset in the underlying storage of the output tensor.
If ``None``, the storage_offset of the output tensor will match the input tensor.
Example::
>>> x = torch.randn(3, 3)
>>> x
tensor([[ 0.9039, 0.6291, 1.0795],
[ 0.1586, 2.1939, -0.4900],
[-0.1909, -0.7503, 1.9355]])
>>> t = torch.as_strided(x, (2, 2), (1, 2))
>>> t
tensor([[0.9039, 1.0795],
[0.6291, 0.1586]])
>>> t = torch.as_strided(x, (2, 2), (1, 2), 1)
tensor([[0.6291, 0.1586],
[1.0795, 2.1939]])
"""
...
def as_strided_(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ...
def as_strided_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.as_strided`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def as_strided_scatter(input: Tensor, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor:
r"""
as_strided_scatter(input, src, size, stride, storage_offset=None) -> Tensor
Embeds the values of the :attr:`src` tensor into :attr:`input` along
the elements corresponding to the result of calling
input.as_strided(size, stride, storage_offset).
This function returns a tensor with fresh storage; it does not
return a view.
Args:
input (Tensor): the input tensor.
size (tuple or ints): the shape of the output tensor
stride (tuple or ints): the stride of the output tensor
storage_offset (int, optional): the offset in the underlying storage of the output tensor
.. note::
:attr:`src` must be of the proper size in order to be embedded
into :attr:`input`. Specifically, it should have the same shape as
`torch.as_strided(input, size, stride, storage_offset)`
Example::
>>> a = torch.arange(4).reshape(2, 2) + 1
>>> a
tensor([[1, 2],
[3, 4]])
>>> b = torch.zeros(3, 3)
>>> b
tensor([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
>>> torch.as_strided_scatter(b, a, (2, 2), (1, 2))
tensor([[1., 3., 2.],
[4., 0., 0.],
[0., 0., 0.]])
"""
...
def as_tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None) -> Tensor:
r"""
as_tensor(data, dtype=None, device=None) -> Tensor
Converts :attr:`data` into a tensor, sharing data and preserving autograd
history if possible.
If :attr:`data` is already a tensor with the requested dtype and device
then :attr:`data` itself is returned, but if :attr:`data` is a
tensor with a different dtype or device then it's copied as if using
`data.to(dtype=dtype, device=device)`.
If :attr:`data` is a NumPy array (an ndarray) with the same dtype and device then a
tensor is constructed using :func:`torch.from_numpy`.
.. seealso::
:func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`).
Args:
data (array_like): Initial data for the tensor. Can be a list, tuple,
NumPy ``ndarray``, scalar, and other types.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, infers data type from :attr:`data`.
device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
then the device of data is used. If None and data is not a tensor then
the result tensor is constructed on the current device.
Example::
>>> a = numpy.array([1, 2, 3])
>>> t = torch.as_tensor(a)
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([-1, 2, 3])
>>> a = numpy.array([1, 2, 3])
>>> t = torch.as_tensor(a, device=torch.device('cuda'))
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([1, 2, 3])
"""
...
def asarray(obj: Any, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, copy: Optional[_bool] = None, requires_grad: _bool = False) -> Tensor:
r"""
asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor
Converts :attr:`obj` to a tensor.
:attr:`obj` can be one of:
1. a tensor
2. a NumPy array or a NumPy scalar
3. a DLPack capsule
4. an object that implements Python's buffer protocol
5. a scalar
6. a sequence of scalars
When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will,
by default, not require a gradient, have the same datatype as :attr:`obj`, be on the
same device, and share memory with it. These properties can be controlled with the
:attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments.
If the returned tensor is of a different datatype, on a different device, or a copy is
requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad`
is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is
also a tensor with an autograd history then the returned tensor will have the same history.
When :attr:`obj` is not a tensor, NumPy array, or DLPack capsule but implements Python's
buffer protocol then the buffer is interpreted as an array of bytes grouped according to
the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is
passed then the default floating point datatype is used, instead.) The returned tensor
will have the specified datatype (or default floating point datatype if none is specified)
and, by default, be on the CPU device and share memory with the buffer.
When :attr:`obj` is a NumPy scalar, the returned tensor will be a 0-dimensional tensor on
the CPU and that doesn't share its memory (i.e. ``copy=True``). By default datatype will
be the PyTorch datatype corresponding to the NumPy's scalar's datatype.
When :attr:`obj` is none of the above but a scalar, or a sequence of scalars then the
returned tensor will, by default, infer its datatype from the scalar values, be on the
current default device, and not share its memory.
.. seealso::
:func:`torch.tensor` creates a tensor that always copies the data from the input object.
:func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays.
:func:`torch.frombuffer` creates a tensor that always shares memory from objects that
implement the buffer protocol.
:func:`torch.from_dlpack` creates a tensor that always shares memory from
DLPack capsules.
Args:
obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's
buffer protocol, scalar, or sequence of scalars.
Keyword args:
dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor.
Default: ``None``, which causes the datatype of the returned tensor to be
inferred from :attr:`obj`.
copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`.
Default: ``None``, which causes the returned tensor to share memory with :attr:`obj`
whenever possible. If ``True`` then the returned tensor does not share its memory.
If ``False`` then the returned tensor shares its memory with :attr:`obj` and an
error is thrown if it cannot.
device (:class:`torch.device`, optional): the device of the returned tensor.
Default: ``None``, which causes the device of :attr:`obj` to be used. Or, if
:attr:`obj` is a Python sequence, the current default device will be used.
requires_grad (bool, optional): whether the returned tensor requires grad.
Default: ``False``, which causes the returned tensor not to require a gradient.
If ``True``, then the returned tensor will require a gradient, and if :attr:`obj`
is also a tensor with an autograd history then the returned tensor will have
the same history.
Example::
>>> a = torch.tensor([1, 2, 3])
>>> # Shares memory with tensor 'a'
>>> b = torch.asarray(a)
>>> a.data_ptr() == b.data_ptr()
True
>>> # Forces memory copy
>>> c = torch.asarray(a, copy=True)
>>> a.data_ptr() == c.data_ptr()
False
>>> a = torch.tensor([1., 2., 3.], requires_grad=True)
>>> b = a + 2
>>> b
tensor([3., 4., 5.], grad_fn=<AddBackward0>)
>>> # Shares memory with tensor 'b', with no grad
>>> c = torch.asarray(b)
>>> c
tensor([3., 4., 5.])
>>> # Shares memory with tensor 'b', retaining autograd history
>>> d = torch.asarray(b, requires_grad=True)
>>> d
tensor([3., 4., 5.], grad_fn=<AddBackward0>)
>>> array = numpy.array([1, 2, 3])
>>> # Shares memory with array 'array'
>>> t1 = torch.asarray(array)
>>> array.__array_interface__['data'][0] == t1.data_ptr()
True
>>> # Copies memory due to dtype mismatch
>>> t2 = torch.asarray(array, dtype=torch.float32)
>>> array.__array_interface__['data'][0] == t2.data_ptr()
False
>>> scalar = numpy.float64(0.5)
>>> torch.asarray(scalar)
tensor(0.5000, dtype=torch.float64)
"""
...
def asin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
asin(input, *, out=None) -> Tensor
Returns a new tensor with the arcsine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sin^{-1}(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.5962, 1.4985, -0.4396, 1.4525])
>>> torch.asin(a)
tensor([-0.6387, nan, -0.4552, nan])
"""
...
def asin_(input: Tensor) -> Tensor: ...
def asinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
asinh(input, *, out=None) -> Tensor
Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sinh^{-1}(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword arguments:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ])
>>> torch.asinh(a)
tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ])
"""
...
def asinh_(input: Tensor) -> Tensor: ...
def atan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
atan(input, *, out=None) -> Tensor
Returns a new tensor with the arctangent of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \tan^{-1}(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.2341, 0.2539, -0.6256, -0.6448])
>>> torch.atan(a)
tensor([ 0.2299, 0.2487, -0.5591, -0.5727])
"""
...
def atan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
atan2(input, other, *, out=None) -> Tensor
Element-wise arctangent of :math:`\text{input}_{i} / \text{other}_{i}`
with consideration of the quadrant. Returns a new tensor with the signed angles
in radians between vector :math:`(\text{other}_{i}, \text{input}_{i})`
and vector :math:`(1, 0)`. (Note that :math:`\text{other}_{i}`, the second
parameter, is the x-coordinate, while :math:`\text{input}_{i}`, the first
parameter, is the y-coordinate.)
The shapes of ``input`` and ``other`` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the first input tensor
other (Tensor): the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.9041, 0.0196, -0.3108, -2.4423])
>>> torch.atan2(a, torch.randn(4))
tensor([ 0.9833, 0.0811, -1.9743, -1.4151])
"""
...
def atan_(input: Tensor) -> Tensor: ...
def atanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
atanh(input, *, out=None) -> Tensor
Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.
Note:
The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range
will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is
mapped to `+/-INF` respectively.
.. math::
\text{out}_{i} = \tanh^{-1}(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword arguments:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4).uniform_(-1, 1)
>>> a
tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ])
>>> torch.atanh(a)
tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ])
"""
...
def atanh_(input: Tensor) -> Tensor: ...
def avg_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, ceil_mode: _bool = False, count_include_pad: _bool = True) -> Tensor: ...
@overload
def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor:
r"""
baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices in :attr:`batch1`
and :attr:`batch2`.
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a
:math:`(b \times n \times p)` tensor and :attr:`out` will be a
:math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
same as the scaling factors used in :meth:`torch.addbmm`.
.. math::
\text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
input (Tensor): the tensor to be added
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(10, 3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.baddbmm(M, batch1, batch2).size()
torch.Size([10, 3, 5])
"""
...
@overload
def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor:
r"""
baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices in :attr:`batch1`
and :attr:`batch2`.
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a
:math:`(b \times n \times p)` tensor and :attr:`out` will be a
:math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
same as the scaling factors used in :meth:`torch.addbmm`.
.. math::
\text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
input (Tensor): the tensor to be added
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(10, 3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.baddbmm(M, batch1, batch2).size()
torch.Size([10, 3, 5])
"""
...
@overload
def baddbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
r"""
baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices in :attr:`batch1`
and :attr:`batch2`.
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a
:math:`(b \times n \times p)` tensor and :attr:`out` will be a
:math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
same as the scaling factors used in :meth:`torch.addbmm`.
.. math::
\text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
input (Tensor): the tensor to be added
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(10, 3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.baddbmm(M, batch1, batch2).size()
torch.Size([10, 3, 5])
"""
...
@overload
def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor:
r"""
baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices in :attr:`batch1`
and :attr:`batch2`.
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a
:math:`(b \times n \times p)` tensor and :attr:`out` will be a
:math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
same as the scaling factors used in :meth:`torch.addbmm`.
.. math::
\text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
input (Tensor): the tensor to be added
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(10, 3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.baddbmm(M, batch1, batch2).size()
torch.Size([10, 3, 5])
"""
...
@overload
def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor:
r"""
baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices in :attr:`batch1`
and :attr:`batch2`.
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a
:math:`(b \times n \times p)` tensor and :attr:`out` will be a
:math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
same as the scaling factors used in :meth:`torch.addbmm`.
.. math::
\text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
input (Tensor): the tensor to be added
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
Example::
>>> M = torch.randn(10, 3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.baddbmm(M, batch1, batch2).size()
torch.Size([10, 3, 5])
"""
...
@overload
def bartlett_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
bartlett_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Bartlett window function.
.. math::
w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
\frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
\end{cases},
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.bartlett_window(L, periodic=True)`` equal to
``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Returns:
Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
"""
...
@overload
def bartlett_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
bartlett_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Bartlett window function.
.. math::
w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
\frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
\end{cases},
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.bartlett_window(L, periodic=True)`` equal to
``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Returns:
Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
"""
...
def batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
def batch_norm_backward_elemt(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], sum_dy: Tensor, sum_dy_xmu: Tensor, count: Tensor) -> Tensor: ...
def batch_norm_backward_reduce(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], input_g: _bool, weight_g: _bool, bias_g: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
def batch_norm_elemt(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, invstd: Tensor, eps: _float, *, out: Optional[Tensor] = None) -> Tensor: ...
def batch_norm_gather_stats(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, count: _int) -> Tuple[Tensor, Tensor]: ...
def batch_norm_gather_stats_with_counts(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, counts: Tensor) -> Tuple[Tensor, Tensor]: ...
def batch_norm_stats(input: Tensor, eps: _float) -> Tuple[Tensor, Tensor]: ...
def batch_norm_update_stats(input: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float) -> Tuple[Tensor, Tensor]: ...
@overload
def bernoulli(input: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
bernoulli(input, *, generator=None, out=None) -> Tensor
Draws binary random numbers (0 or 1) from a Bernoulli distribution.
The :attr:`input` tensor should be a tensor containing probabilities
to be used for drawing the binary random number.
Hence, all values in :attr:`input` have to be in the range:
:math:`0 \leq \text{input}_i \leq 1`.
The :math:`\text{i}^{th}` element of the output tensor will draw a
value :math:`1` according to the :math:`\text{i}^{th}` probability value given
in :attr:`input`.
.. math::
\text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
The returned :attr:`out` tensor only has values 0 or 1 and is of the same
shape as :attr:`input`.
:attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
point ``dtype``.
Args:
input (Tensor): the input tensor of probability values for the Bernoulli distribution
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
>>> a
tensor([[ 0.1737, 0.0950, 0.3609],
[ 0.7148, 0.0289, 0.2676],
[ 0.9456, 0.8937, 0.7202]])
>>> torch.bernoulli(a)
tensor([[ 1., 0., 0.],
[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> a = torch.ones(3, 3) # probability of drawing "1" is 1
>>> torch.bernoulli(a)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
>>> torch.bernoulli(a)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])
"""
...
@overload
def bernoulli(input: Tensor, p: _float, *, generator: Optional[Generator] = None) -> Tensor:
r"""
bernoulli(input, *, generator=None, out=None) -> Tensor
Draws binary random numbers (0 or 1) from a Bernoulli distribution.
The :attr:`input` tensor should be a tensor containing probabilities
to be used for drawing the binary random number.
Hence, all values in :attr:`input` have to be in the range:
:math:`0 \leq \text{input}_i \leq 1`.
The :math:`\text{i}^{th}` element of the output tensor will draw a
value :math:`1` according to the :math:`\text{i}^{th}` probability value given
in :attr:`input`.
.. math::
\text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
The returned :attr:`out` tensor only has values 0 or 1 and is of the same
shape as :attr:`input`.
:attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
point ``dtype``.
Args:
input (Tensor): the input tensor of probability values for the Bernoulli distribution
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
>>> a
tensor([[ 0.1737, 0.0950, 0.3609],
[ 0.7148, 0.0289, 0.2676],
[ 0.9456, 0.8937, 0.7202]])
>>> torch.bernoulli(a)
tensor([[ 1., 0., 0.],
[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> a = torch.ones(3, 3) # probability of drawing "1" is 1
>>> torch.bernoulli(a)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
>>> torch.bernoulli(a)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])
"""
...
def bilinear(input1: Tensor, input2: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ...
def binary_cross_entropy_with_logits(input: Tensor, target: Tensor, weight: Optional[Tensor] = None, pos_weight: Optional[Tensor] = None, reduction: _int = 1) -> Tensor: ...
def bincount(input: Tensor, weights: Optional[Tensor] = None, minlength: _int = 0) -> Tensor:
r"""
bincount(input, weights=None, minlength=0) -> Tensor
Count the frequency of each value in an array of non-negative ints.
The number of bins (size 1) is one larger than the largest value in
:attr:`input` unless :attr:`input` is empty, in which case the result is a
tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least
:attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size
:attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``,
``out[n] += weights[i]`` if :attr:`weights` is specified else
``out[n] += 1``.
Note:
This operation may produce nondeterministic gradients when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information.
Arguments:
input (Tensor): 1-d int tensor
weights (Tensor): optional, weight for each value in the input tensor.
Should be of same size as input tensor.
minlength (int): optional, minimum number of bins. Should be non-negative.
Returns:
output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if
:attr:`input` is non-empty, else ``Size(0)``
Example::
>>> input = torch.randint(0, 8, (5,), dtype=torch.int64)
>>> weights = torch.linspace(0, 1, steps=5)
>>> input, weights
(tensor([4, 3, 6, 3, 4]),
tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
>>> torch.bincount(input)
tensor([0, 0, 0, 2, 2, 0, 1])
>>> input.bincount(weights)
tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000])
"""
...
def binomial(count: Tensor, prob: Tensor, generator: Optional[Generator] = None) -> Tensor: ...
@overload
def bitwise_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
bitwise_and(input, other, *, out=None) -> Tensor
Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical AND.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([1, 0, 3], dtype=torch.int8)
>>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ False, True, False])
"""
...
@overload
def bitwise_and(self: Union[Number, _complex], other: Tensor) -> Tensor:
r"""
bitwise_and(input, other, *, out=None) -> Tensor
Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical AND.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([1, 0, 3], dtype=torch.int8)
>>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ False, True, False])
"""
...
@overload
def bitwise_and(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
bitwise_and(input, other, *, out=None) -> Tensor
Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical AND.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([1, 0, 3], dtype=torch.int8)
>>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ False, True, False])
"""
...
@overload
def bitwise_left_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
bitwise_left_shift(input, other, *, out=None) -> Tensor
Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
The input tensor must be of integral type. This operator supports
:ref:`broadcasting to a common shape <broadcasting-semantics>` and
:ref:`type promotion <type-promotion-doc>`.
The operation applied is:
.. math::
\text{out}_i = \text{input}_i << \text{other}_i
Args:
input (Tensor or Scalar): the first input tensor
other (Tensor or Scalar): the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-2, -2, 24], dtype=torch.int8)
"""
...
@overload
def bitwise_left_shift(self: Union[Number, _complex], other: Tensor) -> Tensor:
r"""
bitwise_left_shift(input, other, *, out=None) -> Tensor
Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
The input tensor must be of integral type. This operator supports
:ref:`broadcasting to a common shape <broadcasting-semantics>` and
:ref:`type promotion <type-promotion-doc>`.
The operation applied is:
.. math::
\text{out}_i = \text{input}_i << \text{other}_i
Args:
input (Tensor or Scalar): the first input tensor
other (Tensor or Scalar): the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-2, -2, 24], dtype=torch.int8)
"""
...
@overload
def bitwise_left_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
bitwise_left_shift(input, other, *, out=None) -> Tensor
Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
The input tensor must be of integral type. This operator supports
:ref:`broadcasting to a common shape <broadcasting-semantics>` and
:ref:`type promotion <type-promotion-doc>`.
The operation applied is:
.. math::
\text{out}_i = \text{input}_i << \text{other}_i
Args:
input (Tensor or Scalar): the first input tensor
other (Tensor or Scalar): the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-2, -2, 24], dtype=torch.int8)
"""
...
def bitwise_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
bitwise_not(input, *, out=None) -> Tensor
Computes the bitwise NOT of the given input tensor. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical NOT.
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8))
tensor([ 0, 1, -4], dtype=torch.int8)
"""
...
@overload
def bitwise_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
bitwise_or(input, other, *, out=None) -> Tensor
Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical OR.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-1, -2, 3], dtype=torch.int8)
>>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ True, True, False])
"""
...
@overload
def bitwise_or(self: Union[Number, _complex], other: Tensor) -> Tensor:
r"""
bitwise_or(input, other, *, out=None) -> Tensor
Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical OR.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-1, -2, 3], dtype=torch.int8)
>>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ True, True, False])
"""
...
@overload
def bitwise_or(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
bitwise_or(input, other, *, out=None) -> Tensor
Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical OR.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-1, -2, 3], dtype=torch.int8)
>>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ True, True, False])
"""
...
@overload
def bitwise_right_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
bitwise_right_shift(input, other, *, out=None) -> Tensor
Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
The input tensor must be of integral type. This operator supports
:ref:`broadcasting to a common shape <broadcasting-semantics>` and
:ref:`type promotion <type-promotion-doc>`.
In any case, if the value of the right operand is negative or is greater
or equal to the number of bits in the promoted left operand, the behavior is undefined.
The operation applied is:
.. math::
\text{out}_i = \text{input}_i >> \text{other}_i
Args:
input (Tensor or Scalar): the first input tensor
other (Tensor or Scalar): the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-1, -7, 3], dtype=torch.int8)
"""
...
@overload
def bitwise_right_shift(self: Union[Number, _complex], other: Tensor) -> Tensor:
r"""
bitwise_right_shift(input, other, *, out=None) -> Tensor
Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
The input tensor must be of integral type. This operator supports
:ref:`broadcasting to a common shape <broadcasting-semantics>` and
:ref:`type promotion <type-promotion-doc>`.
In any case, if the value of the right operand is negative or is greater
or equal to the number of bits in the promoted left operand, the behavior is undefined.
The operation applied is:
.. math::
\text{out}_i = \text{input}_i >> \text{other}_i
Args:
input (Tensor or Scalar): the first input tensor
other (Tensor or Scalar): the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-1, -7, 3], dtype=torch.int8)
"""
...
@overload
def bitwise_right_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
bitwise_right_shift(input, other, *, out=None) -> Tensor
Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
The input tensor must be of integral type. This operator supports
:ref:`broadcasting to a common shape <broadcasting-semantics>` and
:ref:`type promotion <type-promotion-doc>`.
In any case, if the value of the right operand is negative or is greater
or equal to the number of bits in the promoted left operand, the behavior is undefined.
The operation applied is:
.. math::
\text{out}_i = \text{input}_i >> \text{other}_i
Args:
input (Tensor or Scalar): the first input tensor
other (Tensor or Scalar): the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-1, -7, 3], dtype=torch.int8)
"""
...
@overload
def bitwise_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
bitwise_xor(input, other, *, out=None) -> Tensor
Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical XOR.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-2, -2, 0], dtype=torch.int8)
>>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ True, False, False])
"""
...
@overload
def bitwise_xor(self: Union[Number, _complex], other: Tensor) -> Tensor:
r"""
bitwise_xor(input, other, *, out=None) -> Tensor
Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical XOR.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-2, -2, 0], dtype=torch.int8)
>>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ True, False, False])
"""
...
@overload
def bitwise_xor(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
bitwise_xor(input, other, *, out=None) -> Tensor
Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical XOR.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-2, -2, 0], dtype=torch.int8)
>>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ True, False, False])
"""
...
@overload
def blackman_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
blackman_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Blackman window function.
.. math::
w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.blackman_window(L, periodic=True)`` equal to
``torch.blackman_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Returns:
Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
"""
...
@overload
def blackman_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
blackman_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Blackman window function.
.. math::
w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.blackman_window(L, periodic=True)`` equal to
``torch.blackman_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Returns:
Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
"""
...
def bmm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
bmm(input, mat2, *, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored in :attr:`input`
and :attr:`mat2`.
:attr:`input` and :attr:`mat2` must be 3-D tensors each containing
the same number of matrices.
If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a
:math:`(b \times m \times p)` tensor, :attr:`out` will be a
:math:`(b \times n \times p)` tensor.
.. math::
\text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
For broadcasting matrix products, see :func:`torch.matmul`.
Args:
input (Tensor): the first batch of matrices to be multiplied
mat2 (Tensor): the second batch of matrices to be multiplied
Keyword Args:
out (Tensor, optional): the output tensor.
Example::
>>> input = torch.randn(10, 3, 4)
>>> mat2 = torch.randn(10, 4, 5)
>>> res = torch.bmm(input, mat2)
>>> res.size()
torch.Size([10, 3, 5])
"""
...
def broadcast_to(input: Tensor, size: Sequence[Union[_int, SymInt]]) -> Tensor:
r"""
broadcast_to(input, shape) -> Tensor
Broadcasts :attr:`input` to the shape :attr:`\shape`.
Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details.
Args:
input (Tensor): the input tensor.
shape (list, tuple, or :class:`torch.Size`): the new shape.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> torch.broadcast_to(x, (3, 3))
tensor([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
...
@overload
def bucketize(input: Tensor, boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False, out: Optional[Tensor] = None) -> Tensor:
r"""
bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor
Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that
this behavior is opposite the behavior of
`numpy.digitize <https://docs.scipy.org/doc/numpy/reference/generated/numpy.digitize.html>`_.
More formally, the returned index satisfies the following rules:
.. list-table::
:widths: 15 85
:header-rows: 1
* - :attr:`right`
- *returned index satisfies*
* - False
- ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
* - True
- ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
Args:
input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined.
Keyword args:
out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
Default value is False, i.e. default output data type is torch.int64.
right (bool, optional): if False, return the first suitable location that is found. If True, return the
last such index. If no suitable index found, return 0 for non-numerical value
(eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
In other words, if False, gets the lower bound index for each value in :attr:`input`
from :attr:`boundaries`. If True, gets the upper bound index instead.
Default value is False.
out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
Example::
>>> boundaries = torch.tensor([1, 3, 5, 7, 9])
>>> boundaries
tensor([1, 3, 5, 7, 9])
>>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
>>> v
tensor([[3, 6, 9],
[3, 6, 9]])
>>> torch.bucketize(v, boundaries)
tensor([[1, 3, 4],
[1, 3, 4]])
>>> torch.bucketize(v, boundaries, right=True)
tensor([[2, 3, 5],
[2, 3, 5]])
"""
...
@overload
def bucketize(self: Union[Number, _complex], boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False) -> Tensor:
r"""
bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor
Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that
this behavior is opposite the behavior of
`numpy.digitize <https://docs.scipy.org/doc/numpy/reference/generated/numpy.digitize.html>`_.
More formally, the returned index satisfies the following rules:
.. list-table::
:widths: 15 85
:header-rows: 1
* - :attr:`right`
- *returned index satisfies*
* - False
- ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
* - True
- ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
Args:
input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined.
Keyword args:
out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
Default value is False, i.e. default output data type is torch.int64.
right (bool, optional): if False, return the first suitable location that is found. If True, return the
last such index. If no suitable index found, return 0 for non-numerical value
(eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
In other words, if False, gets the lower bound index for each value in :attr:`input`
from :attr:`boundaries`. If True, gets the upper bound index instead.
Default value is False.
out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
Example::
>>> boundaries = torch.tensor([1, 3, 5, 7, 9])
>>> boundaries
tensor([1, 3, 5, 7, 9])
>>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
>>> v
tensor([[3, 6, 9],
[3, 6, 9]])
>>> torch.bucketize(v, boundaries)
tensor([[1, 3, 4],
[1, 3, 4]])
>>> torch.bucketize(v, boundaries, right=True)
tensor([[2, 3, 5],
[2, 3, 5]])
"""
...
def can_cast(from_: _dtype, to: _dtype) -> _bool:
r"""
can_cast(from, to) -> bool
Determines if a type conversion is allowed under PyTorch casting rules
described in the type promotion :ref:`documentation <type-promotion-doc>`.
Args:
from (dtype): The original :class:`torch.dtype`.
to (dtype): The target :class:`torch.dtype`.
Example::
>>> torch.can_cast(torch.double, torch.float)
True
>>> torch.can_cast(torch.float, torch.int)
False
"""
...
@overload
def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
r"""
cat(tensors, dim=0, *, out=None) -> Tensor
Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
All tensors must either have the same shape (except in the concatenating
dimension) or be a 1-D empty tensor with size ``(0,)``.
:func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
and :func:`torch.chunk`.
:func:`torch.cat` can be best understood via examples.
.. seealso::
:func:`torch.stack` concatenates the given sequence along a new dimension.
Args:
tensors (sequence of Tensors): any python sequence of tensors of the same type.
Non-empty tensors provided must have the same shape, except in the
cat dimension.
dim (int, optional): the dimension over which the tensors are concatenated
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497]])
>>> torch.cat((x, x, x), 0)
tensor([[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497],
[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497],
[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497]])
>>> torch.cat((x, x, x), 1)
tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
-1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
-0.5790, 0.1497]])
"""
...
@overload
def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor:
r"""
cat(tensors, dim=0, *, out=None) -> Tensor
Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
All tensors must either have the same shape (except in the concatenating
dimension) or be a 1-D empty tensor with size ``(0,)``.
:func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
and :func:`torch.chunk`.
:func:`torch.cat` can be best understood via examples.
.. seealso::
:func:`torch.stack` concatenates the given sequence along a new dimension.
Args:
tensors (sequence of Tensors): any python sequence of tensors of the same type.
Non-empty tensors provided must have the same shape, except in the
cat dimension.
dim (int, optional): the dimension over which the tensors are concatenated
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497]])
>>> torch.cat((x, x, x), 0)
tensor([[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497],
[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497],
[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497]])
>>> torch.cat((x, x, x), 1)
tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
-1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
-0.5790, 0.1497]])
"""
...
def ccol_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
def ceil(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
ceil(input, *, out=None) -> Tensor
Returns a new tensor with the ceil of the elements of :attr:`input`,
the smallest integer greater than or equal to each element.
For integer inputs, follows the array-api convention of returning a
copy of the input tensor.
.. math::
\text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.6341, -1.4208, -1.0900, 0.5826])
>>> torch.ceil(a)
tensor([-0., -1., -1., 1.])
"""
...
def ceil_(input: Tensor) -> Tensor: ...
def celu(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ...
def celu_(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ...
def channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ...
def cholesky(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
cholesky(input, upper=False, *, out=None) -> Tensor
Computes the Cholesky decomposition of a symmetric positive-definite
matrix :math:`A` or for batches of symmetric positive-definite matrices.
If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and
the decomposition has the form:
.. math::
A = U^TU
If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and
the decomposition has the form:
.. math::
A = LL^T
If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite
matrices, then the returned tensor will be composed of upper-triangular Cholesky factors
of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned
tensor will be composed of lower-triangular Cholesky factors of each of the individual
matrices.
.. warning::
:func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky`
and will be removed in a future PyTorch release.
``L = torch.cholesky(A)`` should be replaced with
.. code:: python
L = torch.linalg.cholesky(A)
``U = torch.cholesky(A, upper=True)`` should be replaced with
.. code:: python
U = torch.linalg.cholesky(A).mH
This transform will produce equivalent results for all valid (symmetric positive definite) inputs.
Args:
input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more
batch dimensions consisting of symmetric positive-definite matrices.
upper (bool, optional): flag that indicates whether to return a
upper or lower triangular matrix. Default: ``False``
Keyword args:
out (Tensor, optional): the output matrix
Example::
>>> a = torch.randn(3, 3)
>>> a = a @ a.mT + 1e-3 # make symmetric positive-definite
>>> l = torch.cholesky(a)
>>> a
tensor([[ 2.4112, -0.7486, 1.4551],
[-0.7486, 1.3544, 0.1294],
[ 1.4551, 0.1294, 1.6724]])
>>> l
tensor([[ 1.5528, 0.0000, 0.0000],
[-0.4821, 1.0592, 0.0000],
[ 0.9371, 0.5487, 0.7023]])
>>> l @ l.mT
tensor([[ 2.4112, -0.7486, 1.4551],
[-0.7486, 1.3544, 0.1294],
[ 1.4551, 0.1294, 1.6724]])
>>> a = torch.randn(3, 2, 2) # Example for batched input
>>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
>>> l = torch.cholesky(a)
>>> z = l @ l.mT
>>> torch.dist(z, a)
tensor(2.3842e-07)
"""
...
def cholesky_inverse(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
cholesky_inverse(L, upper=False, *, out=None) -> Tensor
Computes the inverse of a complex Hermitian or real symmetric
positive-definite matrix given its Cholesky decomposition.
Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix,
and :math:`L` its Cholesky decomposition such that:
.. math::
A = LL^{\text{H}}
where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex,
and the transpose when :math:`L` is real-valued.
Computes the inverse matrix :math:`A^{-1}`.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :math:`A` is a batch of matrices
then the output has the same batch dimensions.
Args:
L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of lower or upper triangular Cholesky decompositions of
symmetric or Hermitian positive-definite matrices.
upper (bool, optional): flag that indicates whether :math:`L` is lower triangular
or upper triangular. Default: ``False``
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Example::
>>> A = torch.randn(3, 3)
>>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix
>>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition
>>> torch.cholesky_inverse(L)
tensor([[ 1.9314, 1.2251, -0.0889],
[ 1.2251, 2.4439, 0.2122],
[-0.0889, 0.2122, 0.1412]])
>>> A.inverse()
tensor([[ 1.9314, 1.2251, -0.0889],
[ 1.2251, 2.4439, 0.2122],
[-0.0889, 0.2122, 0.1412]])
>>> A = torch.randn(3, 2, 2, dtype=torch.complex64)
>>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices
>>> L = torch.linalg.cholesky(A)
>>> torch.dist(torch.inverse(A), torch.cholesky_inverse(L))
tensor(5.6358e-7)
"""
...
def cholesky_solve(input: Tensor, input2: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
cholesky_solve(B, L, upper=False, *, out=None) -> Tensor
Computes the solution of a system of linear equations with complex Hermitian
or real symmetric positive-definite lhs given its Cholesky decomposition.
Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix,
and :math:`L` its Cholesky decomposition such that:
.. math::
A = LL^{\text{H}}
where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex,
and the transpose when :math:`L` is real-valued.
Returns the solution :math:`X` of the following linear system:
.. math::
AX = B
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :math:`A` or :math:`B` is a batch of matrices
then the output has the same batch dimensions.
Args:
B (Tensor): right-hand side tensor of shape `(*, n, k)`
where :math:`*` is zero or more batch dimensions
L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of lower or upper triangular Cholesky decompositions of
symmetric or Hermitian positive-definite matrices.
upper (bool, optional): flag that indicates whether :math:`L` is lower triangular
or upper triangular. Default: ``False``.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Example::
>>> A = torch.randn(3, 3)
>>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix
>>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition
>>> B = torch.randn(3, 2)
>>> torch.cholesky_solve(B, L)
tensor([[ -8.1625, 19.6097],
[ -5.8398, 14.2387],
[ -4.3771, 10.4173]])
>>> A.inverse() @ B
tensor([[ -8.1626, 19.6097],
[ -5.8398, 14.2387],
[ -4.3771, 10.4173]])
>>> A = torch.randn(3, 2, 2, dtype=torch.complex64)
>>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices
>>> L = torch.linalg.cholesky(A)
>>> B = torch.randn(2, 1, dtype=torch.complex64)
>>> X = torch.cholesky_solve(B, L)
>>> torch.dist(X, A.inverse() @ B)
tensor(1.6881e-5)
"""
...
def choose_qparams_optimized(input: Tensor, numel: _int, n_bins: _int, ratio: _float, bit_width: _int) -> Tuple[Tensor, Tensor]: ...
def chunk(input: Tensor, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]:
r"""
chunk(input, chunks, dim=0) -> List of Tensors
Attempts to split a tensor into the specified number of chunks. Each chunk is a view of
the input tensor.
.. note::
This function may return fewer than the specified number of chunks!
.. seealso::
:func:`torch.tensor_split` a function that always returns exactly the specified number of chunks
If the tensor size along the given dimension :attr:`dim` is divisible by :attr:`chunks`,
all returned chunks will be the same size.
If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`,
all returned chunks will be the same size, except the last one.
If such division is not possible, this function may return fewer
than the specified number of chunks.
Arguments:
input (Tensor): the tensor to split
chunks (int): number of chunks to return
dim (int): dimension along which to split the tensor
Example:
>>> torch.arange(11).chunk(6)
(tensor([0, 1]),
tensor([2, 3]),
tensor([4, 5]),
tensor([6, 7]),
tensor([8, 9]),
tensor([10]))
>>> torch.arange(12).chunk(6)
(tensor([0, 1]),
tensor([2, 3]),
tensor([4, 5]),
tensor([6, 7]),
tensor([8, 9]),
tensor([10, 11]))
>>> torch.arange(13).chunk(6)
(tensor([0, 1, 2]),
tensor([3, 4, 5]),
tensor([6, 7, 8]),
tensor([ 9, 10, 11]),
tensor([12]))
"""
...
@overload
def clamp(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor:
r"""
clamp(input, min=None, max=None, *, out=None) -> Tensor
Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`.
Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns:
.. math::
y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i)
If :attr:`min` is ``None``, there is no lower bound.
Or, if :attr:`max` is ``None`` there is no upper bound.
.. note::
If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) <torch.clamp>`
sets all elements in :attr:`input` to the value of :attr:`max`.
Args:
input (Tensor): the input tensor.
min (Number or Tensor, optional): lower-bound of the range to be clamped to
max (Number or Tensor, optional): upper-bound of the range to be clamped to
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([-1.7120, 0.1734, -0.0478, -0.0922])
>>> torch.clamp(a, min=-0.5, max=0.5)
tensor([-0.5000, 0.1734, -0.0478, -0.0922])
>>> min = torch.linspace(-1, 1, steps=4)
>>> torch.clamp(a, min=min)
tensor([-1.0000, 0.1734, 0.3333, 1.0000])
"""
...
@overload
def clamp(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor:
r"""
clamp(input, min=None, max=None, *, out=None) -> Tensor
Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`.
Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns:
.. math::
y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i)
If :attr:`min` is ``None``, there is no lower bound.
Or, if :attr:`max` is ``None`` there is no upper bound.
.. note::
If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) <torch.clamp>`
sets all elements in :attr:`input` to the value of :attr:`max`.
Args:
input (Tensor): the input tensor.
min (Number or Tensor, optional): lower-bound of the range to be clamped to
max (Number or Tensor, optional): upper-bound of the range to be clamped to
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([-1.7120, 0.1734, -0.0478, -0.0922])
>>> torch.clamp(a, min=-0.5, max=0.5)
tensor([-0.5000, 0.1734, -0.0478, -0.0922])
>>> min = torch.linspace(-1, 1, steps=4)
>>> torch.clamp(a, min=min)
tensor([-1.0000, 0.1734, 0.3333, 1.0000])
"""
...
@overload
def clamp_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ...
@overload
def clamp_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ...
@overload
def clamp_max(input: Tensor, max: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
@overload
def clamp_max(input: Tensor, max: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
@overload
def clamp_max_(input: Tensor, max: Tensor) -> Tensor: ...
@overload
def clamp_max_(input: Tensor, max: Union[Number, _complex]) -> Tensor: ...
@overload
def clamp_min(input: Tensor, min: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
@overload
def clamp_min(input: Tensor, min: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
@overload
def clamp_min_(input: Tensor, min: Tensor) -> Tensor: ...
@overload
def clamp_min_(input: Tensor, min: Union[Number, _complex]) -> Tensor: ...
@overload
def clip(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor:
r"""
clip(input, min=None, max=None, *, out=None) -> Tensor
Alias for :func:`torch.clamp`.
"""
...
@overload
def clip(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor:
r"""
clip(input, min=None, max=None, *, out=None) -> Tensor
Alias for :func:`torch.clamp`.
"""
...
@overload
def clip_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ...
@overload
def clip_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ...
def clone(input: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor:
r"""
clone(input, *, memory_format=torch.preserve_format) -> Tensor
Returns a copy of :attr:`input`.
.. note::
This function is differentiable, so gradients will flow back from the
result of this operation to :attr:`input`. To create a tensor without an
autograd relationship to :attr:`input` see :meth:`~Tensor.detach`.
Args:
input (Tensor): the input tensor.
Keyword args:
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned tensor. Default: ``torch.preserve_format``.
"""
...
def col_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.col_indices`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def column_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
r"""
column_stack(tensors, *, out=None) -> Tensor
Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`.
Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t``
in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally.
Args:
tensors (sequence of Tensors): sequence of tensors to concatenate
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([1, 2, 3])
>>> b = torch.tensor([4, 5, 6])
>>> torch.column_stack((a, b))
tensor([[1, 4],
[2, 5],
[3, 6]])
>>> a = torch.arange(5)
>>> b = torch.arange(10).reshape(5, 2)
>>> torch.column_stack((a, b, b))
tensor([[0, 0, 1, 0, 1],
[1, 2, 3, 2, 3],
[2, 4, 5, 4, 5],
[3, 6, 7, 6, 7],
[4, 8, 9, 8, 9]])
"""
...
def combinations(input: Tensor, r: _int = 2, with_replacement: _bool = False) -> Tensor:
r"""
combinations(input, r=2, with_replacement=False) -> seq
Compute combinations of length :math:`r` of the given tensor. The behavior is similar to
python's `itertools.combinations` when `with_replacement` is set to `False`, and
`itertools.combinations_with_replacement` when `with_replacement` is set to `True`.
Arguments:
input (Tensor): 1D vector.
r (int, optional): number of elements to combine
with_replacement (bool, optional): whether to allow duplication in combination
Returns:
Tensor: A tensor equivalent to converting all the input tensors into lists, do
`itertools.combinations` or `itertools.combinations_with_replacement` on these
lists, and finally convert the resulting list into tensor.
Example::
>>> a = [1, 2, 3]
>>> list(itertools.combinations(a, r=2))
[(1, 2), (1, 3), (2, 3)]
>>> list(itertools.combinations(a, r=3))
[(1, 2, 3)]
>>> list(itertools.combinations_with_replacement(a, r=2))
[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
>>> tensor_a = torch.tensor(a)
>>> torch.combinations(tensor_a)
tensor([[1, 2],
[1, 3],
[2, 3]])
>>> torch.combinations(tensor_a, r=3)
tensor([[1, 2, 3]])
>>> torch.combinations(tensor_a, with_replacement=True)
tensor([[1, 1],
[1, 2],
[1, 3],
[2, 2],
[2, 3],
[3, 3]])
"""
...
def complex(real: Tensor, imag: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
complex(real, imag, *, out=None) -> Tensor
Constructs a complex tensor with its real part equal to :attr:`real` and its
imaginary part equal to :attr:`imag`.
Args:
real (Tensor): The real part of the complex tensor. Must be half, float or double.
imag (Tensor): The imaginary part of the complex tensor. Must be same dtype
as :attr:`real`.
Keyword args:
out (Tensor): If the inputs are ``torch.float32``, must be
``torch.complex64``. If the inputs are ``torch.float64``, must be
``torch.complex128``.
Example::
>>> real = torch.tensor([1, 2], dtype=torch.float32)
>>> imag = torch.tensor([3, 4], dtype=torch.float32)
>>> z = torch.complex(real, imag)
>>> z
tensor([(1.+3.j), (2.+4.j)])
>>> z.dtype
torch.complex64
"""
...
@overload
def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
r"""
concat(tensors, dim=0, *, out=None) -> Tensor
Alias of :func:`torch.cat`.
"""
...
@overload
def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor:
r"""
concat(tensors, dim=0, *, out=None) -> Tensor
Alias of :func:`torch.cat`.
"""
...
@overload
def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
r"""
concatenate(tensors, axis=0, out=None) -> Tensor
Alias of :func:`torch.cat`.
"""
...
@overload
def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor:
r"""
concatenate(tensors, axis=0, out=None) -> Tensor
Alias of :func:`torch.cat`.
"""
...
def conj(input: Tensor) -> Tensor:
r"""
conj(input) -> Tensor
Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype,
this function just returns :attr:`input`.
.. note::
:func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized
at any time using :func:`torch.resolve_conj`.
.. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of
non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
when :attr:`input` is of non-complex dtype to be compatible with this change.
Args:
input (Tensor): the input tensor.
Example::
>>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
>>> x.is_conj()
False
>>> y = torch.conj(x)
>>> y.is_conj()
True
"""
...
def conj_physical(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
conj_physical(input, *, out=None) -> Tensor
Computes the element-wise conjugate of the given :attr:`input` tensor.
If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`.
.. note::
This performs the conjugate operation regardless of the fact conjugate bit is set or not.
.. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of
non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
when :attr:`input` is of non-complex dtype to be compatible with this change.
.. math::
\text{out}_{i} = conj(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))
tensor([-1 - 1j, -2 - 2j, 3 + 3j])
"""
...
def conj_physical_(input: Tensor) -> Tensor: ...
def constant_pad_nd(input: Tensor, pad: Sequence[Union[_int, SymInt]], value: Union[Number, _complex] = 0) -> Tensor: ...
@overload
def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
@overload
def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
@overload
def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
@overload
def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
@overload
def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
@overload
def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
def conv_tbc(input: Tensor, weight: Tensor, bias: Tensor, pad: _int = 0) -> Tensor: ...
def conv_transpose1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
def conv_transpose2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
def conv_transpose3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
def convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
@overload
def copysign(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
copysign(input, other, *, out=None) -> Tensor
Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise.
.. math::
\text{out}_{i} = \begin{cases}
-|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\
|\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\
\end{cases}
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
and integer and float inputs.
Args:
input (Tensor): magnitudes.
other (Tensor or Number): contains value(s) whose signbit(s) are
applied to the magnitudes in :attr:`input`.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(5)
>>> a
tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244])
>>> torch.copysign(a, 1)
tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244])
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.7079, 0.2778, -1.0249, 0.5719],
[-0.0059, -0.2600, -0.4475, -1.3948],
[ 0.3667, -0.9567, -2.5757, -0.1751],
[ 0.2046, -0.0742, 0.2998, -0.1054]])
>>> b = torch.randn(4)
tensor([ 0.2373, 0.3120, 0.3190, -1.1128])
>>> torch.copysign(a, b)
tensor([[ 0.7079, 0.2778, 1.0249, -0.5719],
[ 0.0059, 0.2600, 0.4475, -1.3948],
[ 0.3667, 0.9567, 2.5757, -0.1751],
[ 0.2046, 0.0742, 0.2998, -0.1054]])
>>> a = torch.tensor([1.])
>>> b = torch.tensor([-0.])
>>> torch.copysign(a, b)
tensor([-1.])
.. note::
copysign handles signed zeros. If the other argument has a negative zero (-0),
the corresponding output value will be negative.
"""
...
@overload
def copysign(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
copysign(input, other, *, out=None) -> Tensor
Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise.
.. math::
\text{out}_{i} = \begin{cases}
-|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\
|\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\
\end{cases}
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
and integer and float inputs.
Args:
input (Tensor): magnitudes.
other (Tensor or Number): contains value(s) whose signbit(s) are
applied to the magnitudes in :attr:`input`.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(5)
>>> a
tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244])
>>> torch.copysign(a, 1)
tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244])
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.7079, 0.2778, -1.0249, 0.5719],
[-0.0059, -0.2600, -0.4475, -1.3948],
[ 0.3667, -0.9567, -2.5757, -0.1751],
[ 0.2046, -0.0742, 0.2998, -0.1054]])
>>> b = torch.randn(4)
tensor([ 0.2373, 0.3120, 0.3190, -1.1128])
>>> torch.copysign(a, b)
tensor([[ 0.7079, 0.2778, 1.0249, -0.5719],
[ 0.0059, 0.2600, 0.4475, -1.3948],
[ 0.3667, 0.9567, 2.5757, -0.1751],
[ 0.2046, 0.0742, 0.2998, -0.1054]])
>>> a = torch.tensor([1.])
>>> b = torch.tensor([-0.])
>>> torch.copysign(a, b)
tensor([-1.])
.. note::
copysign handles signed zeros. If the other argument has a negative zero (-0),
the corresponding output value will be negative.
"""
...
def corrcoef(input: Tensor) -> Tensor:
r"""
corrcoef(input) -> Tensor
Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix,
where rows are the variables and columns are the observations.
.. note::
The correlation coefficient matrix R is computed using the covariance matrix C as given by
:math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }`
.. note::
Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1.
The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation.
Args:
input (Tensor): A 2D matrix containing multiple variables and observations, or a
Scalar or 1D vector representing a single variable.
Returns:
(Tensor) The correlation coefficient matrix of the variables.
.. seealso::
:func:`torch.cov` covariance matrix.
Example::
>>> x = torch.tensor([[0, 1, 2], [2, 1, 0]])
>>> torch.corrcoef(x)
tensor([[ 1., -1.],
[-1., 1.]])
>>> x = torch.randn(2, 4)
>>> x
tensor([[-0.2678, -0.0908, -0.3766, 0.2780],
[-0.5812, 0.1535, 0.2387, 0.2350]])
>>> torch.corrcoef(x)
tensor([[1.0000, 0.3582],
[0.3582, 1.0000]])
>>> torch.corrcoef(x[0])
tensor(1.)
"""
...
def cos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
cos(input, *, out=None) -> Tensor
Returns a new tensor with the cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \cos(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 1.4309, 1.2706, -0.8562, 0.9796])
>>> torch.cos(a)
tensor([ 0.1395, 0.2957, 0.6553, 0.5574])
"""
...
def cos_(input: Tensor) -> Tensor: ...
def cosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
cosh(input, *, out=None) -> Tensor
Returns a new tensor with the hyperbolic cosine of the elements of
:attr:`input`.
.. math::
\text{out}_{i} = \cosh(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.1632, 1.1835, -0.6979, -0.7325])
>>> torch.cosh(a)
tensor([ 1.0133, 1.7860, 1.2536, 1.2805])
.. note::
When :attr:`input` is on the CPU, the implementation of torch.cosh may use
the Sleef library, which rounds very large results to infinity or negative
infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
"""
...
def cosh_(input: Tensor) -> Tensor: ...
def cosine_embedding_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ...
def cosine_similarity(x1: Tensor, x2: Tensor, dim: _int = 1, eps: _float = 1e-08) -> Tensor: ...
@overload
def count_nonzero(input: Tensor, dim: Optional[_int] = None) -> Tensor:
r"""
count_nonzero(input, dim=None) -> Tensor
Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
If no dim is specified then all non-zeros in the tensor are counted.
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
Example::
>>> x = torch.zeros(3,3)
>>> x[torch.randn(3,3) > 0.5] = 1
>>> x
tensor([[0., 1., 1.],
[0., 0., 0.],
[0., 0., 1.]])
>>> torch.count_nonzero(x)
tensor(3)
>>> torch.count_nonzero(x, dim=0)
tensor([0, 1, 2])
"""
...
@overload
def count_nonzero(input: Tensor, dim: _size) -> Tensor:
r"""
count_nonzero(input, dim=None) -> Tensor
Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
If no dim is specified then all non-zeros in the tensor are counted.
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
Example::
>>> x = torch.zeros(3,3)
>>> x[torch.randn(3,3) > 0.5] = 1
>>> x
tensor([[0., 1., 1.],
[0., 0., 0.],
[0., 0., 1.]])
>>> torch.count_nonzero(x)
tensor(3)
>>> torch.count_nonzero(x, dim=0)
tensor([0, 1, 2])
"""
...
def cov(input: Tensor, *, correction: _int = 1, fweights: Optional[Tensor] = None, aweights: Optional[Tensor] = None) -> Tensor:
r"""
cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor
Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are
the variables and columns are the observations.
A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains
the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents
a single variable (Scalar or 1D) then its variance is returned.
The sample covariance of the variables :math:`x` and :math:`y` is given by:
.. math::
\text{cov}(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{\max(0,~N~-~\delta N)}
where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively, and
:math:`\delta N` is the :attr:`correction`.
If :attr:`fweights` and/or :attr:`aweights` are provided, the weighted covariance
is calculated, which is given by:
.. math::
\text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}
{\max(0,~\sum^{N}_{i = 1}w_i~-~\frac{\sum^{N}_{i = 1}w_ia_i}{\sum^{N}_{i = 1}w_i}~\delta N)}
where :math:`w` denotes :attr:`fweights` or :attr:`aweights` (``f`` and ``a`` for brevity) based on whichever is
provided, or :math:`w = f \times a` if both are provided, and
:math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable. If not
provided, ``f`` and/or ``a`` can be seen as a :math:`\mathbb{1}` vector of appropriate size.
Args:
input (Tensor): A 2D matrix containing multiple variables and observations, or a
Scalar or 1D vector representing a single variable.
Keyword Args:
correction (int, optional): difference between the sample size and sample degrees of freedom.
Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate,
even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0``
will return the simple average. Defaults to ``1``.
fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of
times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`.
Must have integral dtype. Ignored if ``None``. Defaults to ``None``.
aweights (tensor, optional): A Scalar or 1D array of observation vector weights.
These relative weights are typically large for observations considered "important" and smaller for
observations considered less "important". Its numel must equal the number of columns of :attr:`input`.
Must have floating point dtype. Ignored if ``None``. Defaults to ``None``.
Returns:
(Tensor) The covariance matrix of the variables.
.. seealso::
:func:`torch.corrcoef` normalized covariance matrix.
Example::
>>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T
>>> x
tensor([[0, 1, 2],
[2, 1, 0]])
>>> torch.cov(x)
tensor([[ 1., -1.],
[-1., 1.]])
>>> torch.cov(x, correction=0)
tensor([[ 0.6667, -0.6667],
[-0.6667, 0.6667]])
>>> fw = torch.randint(1, 10, (3,))
>>> fw
tensor([1, 6, 9])
>>> aw = torch.rand(3)
>>> aw
tensor([0.4282, 0.0255, 0.4144])
>>> torch.cov(x, fweights=fw, aweights=aw)
tensor([[ 0.4169, -0.4169],
[-0.4169, 0.4169]])
"""
...
def cross(input: Tensor, other: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor:
r"""
cross(input, other, dim=None, *, out=None) -> Tensor
Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input`
and :attr:`other`.
Supports input of float, double, cfloat and cdouble dtypes. Also supports batches
of vectors, for which it computes the product along the dimension :attr:`dim`.
In this case, the output has the same batch dimensions as the inputs.
.. warning::
If :attr:`dim` is not given, it defaults to the first dimension found
with the size 3. Note that this might be unexpected.
This behavior is deprecated and will be changed to match that of :func:`torch.linalg.cross`
in a future release.
.. seealso::
:func:`torch.linalg.cross` which has dim=-1 as default.
Args:
input (Tensor): the input tensor.
other (Tensor): the second input tensor
dim (int, optional): the dimension to take the cross-product in.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4, 3)
>>> a
tensor([[-0.3956, 1.1455, 1.6895],
[-0.5849, 1.3672, 0.3599],
[-1.1626, 0.7180, -0.0521],
[-0.1339, 0.9902, -2.0225]])
>>> b = torch.randn(4, 3)
>>> b
tensor([[-0.0257, -1.4725, -1.2251],
[-1.1479, -0.7005, -1.9757],
[-1.3904, 0.3726, -1.1836],
[-0.9688, -0.7153, 0.2159]])
>>> torch.cross(a, b, dim=1)
tensor([[ 1.0844, -0.5281, 0.6120],
[-2.4490, -1.5687, 1.9792],
[-0.8304, -1.3037, 0.5650],
[-1.2329, 1.9883, 1.0551]])
>>> torch.cross(a, b)
tensor([[ 1.0844, -0.5281, 0.6120],
[-2.4490, -1.5687, 1.9792],
[-0.8304, -1.3037, 0.5650],
[-1.2329, 1.9883, 1.0551]])
"""
...
def crow_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.crow_indices`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
@overload
def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ...
@overload
def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ...
def cudnn_affine_grid_generator(theta: Tensor, N: _int, C: _int, H: _int, W: _int) -> Tensor: ...
def cudnn_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
def cudnn_convolution(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
def cudnn_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
def cudnn_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
def cudnn_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ...
def cudnn_grid_sampler(input: Tensor, grid: Tensor) -> Tensor: ...
def cudnn_is_acceptable(input: Tensor) -> _bool: ...
@overload
def cummax(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax:
r"""
cummax(input, dim, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
location of each maximum value found in the dimension :attr:`dim`.
.. math::
y_i = max(x_1, x_2, x_3, \dots, x_i)
Args:
input (Tensor): the input tensor.
dim (int): the dimension to do the operation over
Keyword args:
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> a = torch.randn(10)
>>> a
tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284,
1.9946, -0.8209])
>>> torch.cummax(a, dim=0)
torch.return_types.cummax(
values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696,
1.9946, 1.9946]),
indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
"""
...
@overload
def cummax(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax:
r"""
cummax(input, dim, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
location of each maximum value found in the dimension :attr:`dim`.
.. math::
y_i = max(x_1, x_2, x_3, \dots, x_i)
Args:
input (Tensor): the input tensor.
dim (int): the dimension to do the operation over
Keyword args:
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> a = torch.randn(10)
>>> a
tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284,
1.9946, -0.8209])
>>> torch.cummax(a, dim=0)
torch.return_types.cummax(
values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696,
1.9946, 1.9946]),
indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
"""
...
@overload
def cummin(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin:
r"""
cummin(input, dim, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
location of each maximum value found in the dimension :attr:`dim`.
.. math::
y_i = min(x_1, x_2, x_3, \dots, x_i)
Args:
input (Tensor): the input tensor.
dim (int): the dimension to do the operation over
Keyword args:
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> a = torch.randn(10)
>>> a
tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762,
0.9165, 1.6684])
>>> torch.cummin(a, dim=0)
torch.return_types.cummin(
values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
-1.3298, -1.3298]),
indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
"""
...
@overload
def cummin(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin:
r"""
cummin(input, dim, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
location of each maximum value found in the dimension :attr:`dim`.
.. math::
y_i = min(x_1, x_2, x_3, \dots, x_i)
Args:
input (Tensor): the input tensor.
dim (int): the dimension to do the operation over
Keyword args:
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> a = torch.randn(10)
>>> a
tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762,
0.9165, 1.6684])
>>> torch.cummin(a, dim=0)
torch.return_types.cummin(
values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
-1.3298, -1.3298]),
indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
"""
...
@overload
def cumprod(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
cumprod(input, dim, *, dtype=None, out=None) -> Tensor
Returns the cumulative product of elements of :attr:`input` in the dimension
:attr:`dim`.
For example, if :attr:`input` is a vector of size N, the result will also be
a vector of size N, with elements.
.. math::
y_i = x_1 \times x_2\times x_3\times \dots \times x_i
Args:
input (Tensor): the input tensor.
dim (int): the dimension to do the operation over
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(10)
>>> a
tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126,
-0.2129, -0.4206, 0.1968])
>>> torch.cumprod(a, dim=0)
tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
0.0014, -0.0006, -0.0001])
>>> a[5] = 0.0
>>> torch.cumprod(a, dim=0)
tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
0.0000, -0.0000, -0.0000])
"""
...
@overload
def cumprod(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
cumprod(input, dim, *, dtype=None, out=None) -> Tensor
Returns the cumulative product of elements of :attr:`input` in the dimension
:attr:`dim`.
For example, if :attr:`input` is a vector of size N, the result will also be
a vector of size N, with elements.
.. math::
y_i = x_1 \times x_2\times x_3\times \dots \times x_i
Args:
input (Tensor): the input tensor.
dim (int): the dimension to do the operation over
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(10)
>>> a
tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126,
-0.2129, -0.4206, 0.1968])
>>> torch.cumprod(a, dim=0)
tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
0.0014, -0.0006, -0.0001])
>>> a[5] = 0.0
>>> torch.cumprod(a, dim=0)
tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
0.0000, -0.0000, -0.0000])
"""
...
@overload
def cumsum(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
cumsum(input, dim, *, dtype=None, out=None) -> Tensor
Returns the cumulative sum of elements of :attr:`input` in the dimension
:attr:`dim`.
For example, if :attr:`input` is a vector of size N, the result will also be
a vector of size N, with elements.
.. math::
y_i = x_1 + x_2 + x_3 + \dots + x_i
Args:
input (Tensor): the input tensor.
dim (int): the dimension to do the operation over
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randint(1, 20, (10,))
>>> a
tensor([13, 7, 3, 10, 13, 3, 15, 10, 9, 10])
>>> torch.cumsum(a, dim=0)
tensor([13, 20, 23, 33, 46, 49, 64, 74, 83, 93])
"""
...
@overload
def cumsum(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
cumsum(input, dim, *, dtype=None, out=None) -> Tensor
Returns the cumulative sum of elements of :attr:`input` in the dimension
:attr:`dim`.
For example, if :attr:`input` is a vector of size N, the result will also be
a vector of size N, with elements.
.. math::
y_i = x_1 + x_2 + x_3 + \dots + x_i
Args:
input (Tensor): the input tensor.
dim (int): the dimension to do the operation over
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randint(1, 20, (10,))
>>> a
tensor([13, 7, 3, 10, 13, 3, 15, 10, 9, 10])
>>> torch.cumsum(a, dim=0)
tensor([13, 20, 23, 33, 46, 49, 64, 74, 83, 93])
"""
...
@overload
def cumulative_trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor:
r"""
cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
Cumulatively computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_
along :attr:`dim`. By default the spacing between elements is assumed to be 1, but
:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
used to specify arbitrary spacing along :attr:`dim`.
For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid`
and this function is that, :func:`torch.trapezoid` returns a value for each integration,
where as this function returns a cumulative value for every spacing within the integration. This
is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum.
Arguments:
y (Tensor): Values to use when computing the trapezoidal rule.
x (Tensor): If specified, defines spacing between values as specified above.
Keyword arguments:
dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
are specified then this defaults to 1. Effectively multiplies the result by its value.
dim (int): The dimension along which to compute the trapezoidal rule.
The last (inner-most) dimension by default.
Examples::
>>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1.
>>> y = torch.tensor([1, 5, 10])
>>> torch.cumulative_trapezoid(y)
tensor([3., 10.5])
>>> # Computes the same trapezoidal rule directly up to each element to verify
>>> (1 + 5) / 2
3.0
>>> (1 + 10 + 10) / 2
10.5
>>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2
>>> # NOTE: the result is the same as before, but multiplied by 2
>>> torch.cumulative_trapezoid(y, dx=2)
tensor([6., 21.])
>>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing
>>> x = torch.tensor([1, 3, 6])
>>> torch.cumulative_trapezoid(y, x)
tensor([6., 28.5])
>>> # Computes the same trapezoidal rule directly up to each element to verify
>>> ((3 - 1) * (1 + 5)) / 2
6.0
>>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
28.5
>>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix
>>> y = torch.arange(9).reshape(3, 3)
tensor([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> torch.cumulative_trapezoid(y)
tensor([[ 0.5, 2.],
[ 3.5, 8.],
[ 6.5, 14.]])
>>> # Cumulatively computes the trapezoidal rule for each column of the matrix
>>> torch.cumulative_trapezoid(y, dim=0)
tensor([[ 1.5, 2.5, 3.5],
[ 6.0, 8.0, 10.0]])
>>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with the same arbitrary spacing
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([1, 3, 6])
>>> torch.cumulative_trapezoid(y, x)
tensor([[2., 5.],
[2., 5.],
[2., 5.]])
>>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with different arbitrary spacing per row
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
>>> torch.cumulative_trapezoid(y, x)
tensor([[1., 2.],
[2., 4.],
[3., 6.]])
"""
...
@overload
def cumulative_trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor:
r"""
cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
Cumulatively computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_
along :attr:`dim`. By default the spacing between elements is assumed to be 1, but
:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
used to specify arbitrary spacing along :attr:`dim`.
For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid`
and this function is that, :func:`torch.trapezoid` returns a value for each integration,
where as this function returns a cumulative value for every spacing within the integration. This
is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum.
Arguments:
y (Tensor): Values to use when computing the trapezoidal rule.
x (Tensor): If specified, defines spacing between values as specified above.
Keyword arguments:
dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
are specified then this defaults to 1. Effectively multiplies the result by its value.
dim (int): The dimension along which to compute the trapezoidal rule.
The last (inner-most) dimension by default.
Examples::
>>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1.
>>> y = torch.tensor([1, 5, 10])
>>> torch.cumulative_trapezoid(y)
tensor([3., 10.5])
>>> # Computes the same trapezoidal rule directly up to each element to verify
>>> (1 + 5) / 2
3.0
>>> (1 + 10 + 10) / 2
10.5
>>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2
>>> # NOTE: the result is the same as before, but multiplied by 2
>>> torch.cumulative_trapezoid(y, dx=2)
tensor([6., 21.])
>>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing
>>> x = torch.tensor([1, 3, 6])
>>> torch.cumulative_trapezoid(y, x)
tensor([6., 28.5])
>>> # Computes the same trapezoidal rule directly up to each element to verify
>>> ((3 - 1) * (1 + 5)) / 2
6.0
>>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
28.5
>>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix
>>> y = torch.arange(9).reshape(3, 3)
tensor([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> torch.cumulative_trapezoid(y)
tensor([[ 0.5, 2.],
[ 3.5, 8.],
[ 6.5, 14.]])
>>> # Cumulatively computes the trapezoidal rule for each column of the matrix
>>> torch.cumulative_trapezoid(y, dim=0)
tensor([[ 1.5, 2.5, 3.5],
[ 6.0, 8.0, 10.0]])
>>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with the same arbitrary spacing
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([1, 3, 6])
>>> torch.cumulative_trapezoid(y, x)
tensor([[2., 5.],
[2., 5.],
[2., 5.]])
>>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with different arbitrary spacing per row
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
>>> torch.cumulative_trapezoid(y, x)
tensor([[1., 2.],
[2., 4.],
[3., 6.]])
"""
...
def deg2rad(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
deg2rad(input, *, out=None) -> Tensor
Returns a new tensor with each of the elements of :attr:`input`
converted from angles in degrees to radians.
Args:
input (Tensor): the input tensor.
Keyword arguments:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])
>>> torch.deg2rad(a)
tensor([[ 3.1416, -3.1416],
[ 6.2832, -6.2832],
[ 1.5708, -1.5708]])
"""
...
def deg2rad_(input: Tensor) -> Tensor: ...
@overload
def dequantize(input: Tensor) -> Tensor:
r"""
dequantize(tensor) -> Tensor
Returns an fp32 Tensor by dequantizing a quantized Tensor
Args:
tensor (Tensor): A quantized Tensor
.. function:: dequantize(tensors) -> sequence of Tensors
:noindex:
Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
Args:
tensors (sequence of Tensors): A list of quantized Tensors
"""
...
@overload
def dequantize(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
r"""
dequantize(tensor) -> Tensor
Returns an fp32 Tensor by dequantizing a quantized Tensor
Args:
tensor (Tensor): A quantized Tensor
.. function:: dequantize(tensors) -> sequence of Tensors
:noindex:
Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
Args:
tensors (sequence of Tensors): A list of quantized Tensors
"""
...
def det(input: Tensor) -> Tensor:
r"""
det(input) -> Tensor
Alias for :func:`torch.linalg.det`
"""
...
def detach(input: Tensor) -> Tensor: ...
def detach_(input: Tensor) -> Tensor: ...
def detach_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.detach`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def diag(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
r"""
diag(input, diagonal=0, *, out=None) -> Tensor
- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
with the elements of :attr:`input` as the diagonal.
- If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with
the diagonal elements of :attr:`input`.
The argument :attr:`diagonal` controls which diagonal to consider:
- If :attr:`diagonal` = 0, it is the main diagonal.
- If :attr:`diagonal` > 0, it is above the main diagonal.
- If :attr:`diagonal` < 0, it is below the main diagonal.
Args:
input (Tensor): the input tensor.
diagonal (int, optional): the diagonal to consider
Keyword args:
out (Tensor, optional): the output tensor.
.. seealso::
:func:`torch.diagonal` always returns the diagonal of its input.
:func:`torch.diagflat` always constructs a tensor with diagonal elements
specified by the input.
Examples:
Get the square matrix where the input vector is the diagonal::
>>> a = torch.randn(3)
>>> a
tensor([ 0.5950,-0.0872, 2.3298])
>>> torch.diag(a)
tensor([[ 0.5950, 0.0000, 0.0000],
[ 0.0000,-0.0872, 0.0000],
[ 0.0000, 0.0000, 2.3298]])
>>> torch.diag(a, 1)
tensor([[ 0.0000, 0.5950, 0.0000, 0.0000],
[ 0.0000, 0.0000,-0.0872, 0.0000],
[ 0.0000, 0.0000, 0.0000, 2.3298],
[ 0.0000, 0.0000, 0.0000, 0.0000]])
Get the k-th diagonal of a given matrix::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-0.4264, 0.0255,-0.1064],
[ 0.8795,-0.2429, 0.1374],
[ 0.1029,-0.6482,-1.6300]])
>>> torch.diag(a, 0)
tensor([-0.4264,-0.2429,-1.6300])
>>> torch.diag(a, 1)
tensor([ 0.0255, 0.1374])
"""
...
def diag_embed(input: Tensor, offset: _int = 0, dim1: _int = -2, dim2: _int = -1) -> Tensor:
r"""
diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor
Creates a tensor whose diagonals of certain 2D planes (specified by
:attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`.
To facilitate creating batched diagonal matrices, the 2D planes formed by
the last two dimensions of the returned tensor are chosen by default.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
The size of the new matrix will be calculated to make the specified diagonal
of the size of the last input dimension.
Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1`
and :attr:`dim2` matters. Exchanging them is equivalent to changing the
sign of :attr:`offset`.
Applying :meth:`torch.diagonal` to the output of this function with
the same arguments yields a matrix identical to input. However,
:meth:`torch.diagonal` has different default dimensions, so those
need to be explicitly specified.
Args:
input (Tensor): the input tensor. Must be at least 1-dimensional.
offset (int, optional): which diagonal to consider. Default: 0
(main diagonal).
dim1 (int, optional): first dimension with respect to which to
take diagonal. Default: -2.
dim2 (int, optional): second dimension with respect to which to
take diagonal. Default: -1.
Example::
>>> a = torch.randn(2, 3)
>>> torch.diag_embed(a)
tensor([[[ 1.5410, 0.0000, 0.0000],
[ 0.0000, -0.2934, 0.0000],
[ 0.0000, 0.0000, -2.1788]],
[[ 0.5684, 0.0000, 0.0000],
[ 0.0000, -1.0845, 0.0000],
[ 0.0000, 0.0000, -1.3986]]])
>>> torch.diag_embed(a, offset=1, dim1=0, dim2=2)
tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000],
[ 0.0000, 0.5684, 0.0000, 0.0000]],
[[ 0.0000, 0.0000, -0.2934, 0.0000],
[ 0.0000, 0.0000, -1.0845, 0.0000]],
[[ 0.0000, 0.0000, 0.0000, -2.1788],
[ 0.0000, 0.0000, 0.0000, -1.3986]],
[[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000]]])
"""
...
def diagflat(input: Tensor, offset: _int = 0) -> Tensor:
r"""
diagflat(input, offset=0) -> Tensor
- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
with the elements of :attr:`input` as the diagonal.
- If :attr:`input` is a tensor with more than one dimension, then returns a
2-D tensor with diagonal elements equal to a flattened :attr:`input`.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
Args:
input (Tensor): the input tensor.
offset (int, optional): the diagonal to consider. Default: 0 (main
diagonal).
Examples::
>>> a = torch.randn(3)
>>> a
tensor([-0.2956, -0.9068, 0.1695])
>>> torch.diagflat(a)
tensor([[-0.2956, 0.0000, 0.0000],
[ 0.0000, -0.9068, 0.0000],
[ 0.0000, 0.0000, 0.1695]])
>>> torch.diagflat(a, 1)
tensor([[ 0.0000, -0.2956, 0.0000, 0.0000],
[ 0.0000, 0.0000, -0.9068, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.1695],
[ 0.0000, 0.0000, 0.0000, 0.0000]])
>>> a = torch.randn(2, 2)
>>> a
tensor([[ 0.2094, -0.3018],
[-0.1516, 1.9342]])
>>> torch.diagflat(a)
tensor([[ 0.2094, 0.0000, 0.0000, 0.0000],
[ 0.0000, -0.3018, 0.0000, 0.0000],
[ 0.0000, 0.0000, -0.1516, 0.0000],
[ 0.0000, 0.0000, 0.0000, 1.9342]])
"""
...
@overload
def diagonal(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor:
r"""
diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
Returns a partial view of :attr:`input` with the its diagonal elements
with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
at the end of the shape.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
Applying :meth:`torch.diag_embed` to the output of this function with
the same arguments yields a diagonal matrix with the diagonal entries
of the input. However, :meth:`torch.diag_embed` has different default
dimensions, so those need to be explicitly specified.
Args:
input (Tensor): the input tensor. Must be at least 2-dimensional.
offset (int, optional): which diagonal to consider. Default: 0
(main diagonal).
dim1 (int, optional): first dimension with respect to which to
take diagonal. Default: 0.
dim2 (int, optional): second dimension with respect to which to
take diagonal. Default: 1.
.. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1.
Examples::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-1.0854, 1.1431, -0.1752],
[ 0.8536, -0.0905, 0.0360],
[ 0.6927, -0.3735, -0.4945]])
>>> torch.diagonal(a, 0)
tensor([-1.0854, -0.0905, -0.4945])
>>> torch.diagonal(a, 1)
tensor([ 1.1431, 0.0360])
>>> x = torch.randn(2, 5, 4, 2)
>>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
tensor([[[-1.2631, 0.3755, -1.5977, -1.8172],
[-1.1065, 1.0401, -0.2235, -0.7938]],
[[-1.7325, -0.3081, 0.6166, 0.2335],
[ 1.0500, 0.7336, -0.3836, -1.1015]]])
"""
...
@overload
def diagonal(input: Tensor, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int = 0) -> Tensor:
r"""
diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
Returns a partial view of :attr:`input` with the its diagonal elements
with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
at the end of the shape.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
Applying :meth:`torch.diag_embed` to the output of this function with
the same arguments yields a diagonal matrix with the diagonal entries
of the input. However, :meth:`torch.diag_embed` has different default
dimensions, so those need to be explicitly specified.
Args:
input (Tensor): the input tensor. Must be at least 2-dimensional.
offset (int, optional): which diagonal to consider. Default: 0
(main diagonal).
dim1 (int, optional): first dimension with respect to which to
take diagonal. Default: 0.
dim2 (int, optional): second dimension with respect to which to
take diagonal. Default: 1.
.. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1.
Examples::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-1.0854, 1.1431, -0.1752],
[ 0.8536, -0.0905, 0.0360],
[ 0.6927, -0.3735, -0.4945]])
>>> torch.diagonal(a, 0)
tensor([-1.0854, -0.0905, -0.4945])
>>> torch.diagonal(a, 1)
tensor([ 1.1431, 0.0360])
>>> x = torch.randn(2, 5, 4, 2)
>>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
tensor([[[-1.2631, 0.3755, -1.5977, -1.8172],
[-1.1065, 1.0401, -0.2235, -0.7938]],
[[-1.7325, -0.3081, 0.6166, 0.2335],
[ 1.0500, 0.7336, -0.3836, -1.1015]]])
"""
...
def diagonal_copy(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.diagonal`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def diagonal_scatter(input: Tensor, src: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor:
r"""
diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor
Embeds the values of the :attr:`src` tensor into :attr:`input` along
the diagonal elements of :attr:`input`, with respect to :attr:`dim1`
and :attr:`dim2`.
This function returns a tensor with fresh storage; it does not
return a view.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
Args:
input (Tensor): the input tensor. Must be at least 2-dimensional.
src (Tensor): the tensor to embed into :attr:`input`.
offset (int, optional): which diagonal to consider. Default: 0
(main diagonal).
dim1 (int, optional): first dimension with respect to which to
take diagonal. Default: 0.
dim2 (int, optional): second dimension with respect to which to
take diagonal. Default: 1.
.. note::
:attr:`src` must be of the proper size in order to be embedded
into :attr:`input`. Specifically, it should have the same shape as
``torch.diagonal(input, offset, dim1, dim2)``
Examples::
>>> a = torch.zeros(3, 3)
>>> a
tensor([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
>>> torch.diagonal_scatter(a, torch.ones(3), 0)
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> torch.diagonal_scatter(a, torch.ones(2), 1)
tensor([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
...
def diff(input: Tensor, n: _int = 1, dim: _int = -1, prepend: Optional[Tensor] = None, append: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor:
r"""
diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor
Computes the n-th forward difference along the given dimension.
The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order
differences are calculated by using :func:`torch.diff` recursively.
Args:
input (Tensor): the tensor to compute the differences on
n (int, optional): the number of times to recursively compute the difference
dim (int, optional): the dimension to compute the difference along.
Default is the last dimension.
prepend, append (Tensor, optional): values to prepend or append to
:attr:`input` along :attr:`dim` before computing the difference.
Their dimensions must be equivalent to that of input, and their shapes
must match input's shape except on :attr:`dim`.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([1, 3, 2])
>>> torch.diff(a)
tensor([ 2, -1])
>>> b = torch.tensor([4, 5])
>>> torch.diff(a, append=b)
tensor([ 2, -1, 2, 1])
>>> c = torch.tensor([[1, 2, 3], [3, 4, 5]])
>>> torch.diff(c, dim=0)
tensor([[2, 2, 2]])
>>> torch.diff(c, dim=1)
tensor([[1, 1],
[1, 1]])
"""
...
def digamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
digamma(input, *, out=None) -> Tensor
Alias for :func:`torch.special.digamma`.
"""
...
def dist(input: Tensor, other: Tensor, p: Union[Number, _complex] = 2) -> Tensor:
r"""
dist(input, other, p=2) -> Tensor
Returns the p-norm of (:attr:`input` - :attr:`other`)
The shapes of :attr:`input` and :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the input tensor.
other (Tensor): the Right-hand-side input tensor
p (float, optional): the norm to be computed
Example::
>>> x = torch.randn(4)
>>> x
tensor([-1.5393, -0.8675, 0.5916, 1.6321])
>>> y = torch.randn(4)
>>> y
tensor([ 0.0967, -1.0511, 0.6295, 0.8360])
>>> torch.dist(x, y, 3.5)
tensor(1.6727)
>>> torch.dist(x, y, 3)
tensor(1.6973)
>>> torch.dist(x, y, 0)
tensor(4.)
>>> torch.dist(x, y, 1)
tensor(2.6537)
"""
...
def div(input: Union[Tensor, Number], other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
div(input, other, *, rounding_mode=None, out=None) -> Tensor
Divides each element of the input ``input`` by the corresponding element of
:attr:`other`.
.. math::
\text{out}_i = \frac{\text{input}_i}{\text{other}_i}
.. note::
By default, this performs a "true" division like Python 3.
See the :attr:`rounding_mode` argument for floor division.
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Always promotes integer types to the default scalar type.
Args:
input (Tensor): the dividend
other (Tensor or Number): the divisor
Keyword args:
rounding_mode (str, optional): Type of rounding applied to the result:
* None - default behavior. Performs no rounding and, if both :attr:`input` and
:attr:`other` are integer types, promotes the inputs to the default scalar type.
Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``.
* ``"trunc"`` - rounds the results of the division towards zero.
Equivalent to C-style integer division.
* ``"floor"`` - rounds the results of the division down.
Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``.
out (Tensor, optional): the output tensor.
Examples::
>>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
>>> torch.div(x, 0.5)
tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274])
>>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
... [ 0.1815, -1.0111, 0.9805, -1.5923],
... [ 0.1062, 1.4581, 0.7759, -1.2344],
... [-0.1830, -0.0313, 1.1908, -1.4757]])
>>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308])
>>> torch.div(a, b)
tensor([[-0.4620, -6.6051, 0.5676, 1.2639],
[ 0.2260, -3.4509, -1.2086, 6.8990],
[ 0.1322, 4.9764, -0.9564, 5.3484],
[-0.2278, -0.1068, -1.4678, 6.3938]])
>>> torch.div(a, b, rounding_mode='trunc')
tensor([[-0., -6., 0., 1.],
[ 0., -3., -1., 6.],
[ 0., 4., -0., 5.],
[-0., -0., -1., 6.]])
>>> torch.div(a, b, rounding_mode='floor')
tensor([[-1., -7., 0., 1.],
[ 0., -4., -2., 6.],
[ 0., 4., -1., 5.],
[-1., -1., -2., 6.]])
"""
...
@overload
def divide(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
divide(input, other, *, rounding_mode=None, out=None) -> Tensor
Alias for :func:`torch.div`.
"""
...
@overload
def divide(input: Tensor, other: Tensor, *, rounding_mode: Optional[str], out: Optional[Tensor] = None) -> Tensor:
r"""
divide(input, other, *, rounding_mode=None, out=None) -> Tensor
Alias for :func:`torch.div`.
"""
...
@overload
def divide(input: Tensor, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor:
r"""
divide(input, other, *, rounding_mode=None, out=None) -> Tensor
Alias for :func:`torch.div`.
"""
...
@overload
def divide(input: Tensor, other: Union[Number, _complex]) -> Tensor:
r"""
divide(input, other, *, rounding_mode=None, out=None) -> Tensor
Alias for :func:`torch.div`.
"""
...
def dot(input: Tensor, tensor: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
dot(input, other, *, out=None) -> Tensor
Computes the dot product of two 1D tensors.
.. note::
Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product
of two 1D tensors with the same number of elements.
Args:
input (Tensor): first tensor in the dot product, must be 1D.
other (Tensor): second tensor in the dot product, must be 1D.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1]))
tensor(7)
"""
...
def dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def dsmm(input: Tensor, mat2: Tensor) -> Tensor: ...
@overload
def dsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]:
r"""
dsplit(input, indices_or_sections) -> List of Tensors
Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors
depthwise according to :attr:`indices_or_sections`. Each split is a view of
:attr:`input`.
This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2)
(the split dimension is 2), except that if :attr:`indices_or_sections` is an integer
it must evenly divide the split dimension or a runtime error will be thrown.
This function is based on NumPy's :func:`numpy.dsplit`.
Args:
input (Tensor): tensor to split.
indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
Example::
>>> t = torch.arange(16.0).reshape(2, 2, 4)
>>> t
tensor([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> torch.dsplit(t, 2)
(tensor([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]),
tensor([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]]))
>>> torch.dsplit(t, [3, 6])
(tensor([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
tensor([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
tensor([], size=(2, 2, 0)))
"""
...
@overload
def dsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]:
r"""
dsplit(input, indices_or_sections) -> List of Tensors
Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors
depthwise according to :attr:`indices_or_sections`. Each split is a view of
:attr:`input`.
This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2)
(the split dimension is 2), except that if :attr:`indices_or_sections` is an integer
it must evenly divide the split dimension or a runtime error will be thrown.
This function is based on NumPy's :func:`numpy.dsplit`.
Args:
input (Tensor): tensor to split.
indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
Example::
>>> t = torch.arange(16.0).reshape(2, 2, 4)
>>> t
tensor([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> torch.dsplit(t, 2)
(tensor([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]),
tensor([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]]))
>>> torch.dsplit(t, [3, 6])
(tensor([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
tensor([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
tensor([], size=(2, 2, 0)))
"""
...
def dstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
r"""
dstack(tensors, *, out=None) -> Tensor
Stack tensors in sequence depthwise (along third axis).
This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`.
Args:
tensors (sequence of Tensors): sequence of tensors to concatenate
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([1, 2, 3])
>>> b = torch.tensor([4, 5, 6])
>>> torch.dstack((a,b))
tensor([[[1, 4],
[2, 5],
[3, 6]]])
>>> a = torch.tensor([[1],[2],[3]])
>>> b = torch.tensor([[4],[5],[6]])
>>> torch.dstack((a,b))
tensor([[[1, 4]],
[[2, 5]],
[[3, 6]]])
"""
...
def embedding(weight: Tensor, indices: Tensor, padding_idx: Union[_int, SymInt] = -1, scale_grad_by_freq: _bool = False, sparse: _bool = False) -> Tensor: ...
@overload
def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool, mode: _int, sparse: _bool, per_sample_weights: Optional[Tensor], include_last_offset: _bool, padding_idx: Optional[_int]) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
@overload
def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
def embedding_renorm_(input: Tensor, indices: Tensor, max_norm: _float, norm_type: _float) -> Tensor: ...
@overload
def empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor
Returns a tensor filled with uninitialized data. The shape of the tensor is
defined by the variable argument :attr:`size`.
.. note::
If :func:`torch.use_deterministic_algorithms()` and
:attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
``True``, the output tensor is initialized to prevent any possible
nondeterministic behavior from using the data as an input to an operation.
Floating point and complex tensors are filled with NaN, and integer tensors
are filled with the maximum value.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.contiguous_format``.
Example::
>>> torch.empty((2,3), dtype=torch.int64)
tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
[ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
"""
...
@overload
def empty(*size: _int, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor
Returns a tensor filled with uninitialized data. The shape of the tensor is
defined by the variable argument :attr:`size`.
.. note::
If :func:`torch.use_deterministic_algorithms()` and
:attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
``True``, the output tensor is initialized to prevent any possible
nondeterministic behavior from using the data as an input to an operation.
Floating point and complex tensors are filled with NaN, and integer tensors
are filled with the maximum value.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.contiguous_format``.
Example::
>>> torch.empty((2,3), dtype=torch.int64)
tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
[ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
"""
...
@overload
def empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor
Returns a tensor filled with uninitialized data. The shape of the tensor is
defined by the variable argument :attr:`size`.
.. note::
If :func:`torch.use_deterministic_algorithms()` and
:attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
``True``, the output tensor is initialized to prevent any possible
nondeterministic behavior from using the data as an input to an operation.
Floating point and complex tensors are filled with NaN, and integer tensors
are filled with the maximum value.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.contiguous_format``.
Example::
>>> torch.empty((2,3), dtype=torch.int64)
tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
[ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
"""
...
@overload
def empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor
Returns a tensor filled with uninitialized data. The shape of the tensor is
defined by the variable argument :attr:`size`.
.. note::
If :func:`torch.use_deterministic_algorithms()` and
:attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
``True``, the output tensor is initialized to prevent any possible
nondeterministic behavior from using the data as an input to an operation.
Floating point and complex tensors are filled with NaN, and integer tensors
are filled with the maximum value.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.contiguous_format``.
Example::
>>> torch.empty((2,3), dtype=torch.int64)
tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
[ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
"""
...
def empty_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns an uninitialized tensor with the same size as :attr:`input`.
``torch.empty_like(input)`` is equivalent to
``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
.. note::
If :func:`torch.use_deterministic_algorithms()` and
:attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
``True``, the output tensor is initialized to prevent any possible
nondeterministic behavior from using the data as an input to an operation.
Floating point and complex tensors are filled with NaN, and integer tensors
are filled with the maximum value.
Args:
input (Tensor): the size of :attr:`input` will determine size of the output tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
Default: if ``None``, defaults to the dtype of :attr:`input`.
layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
Default: if ``None``, defaults to the layout of :attr:`input`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, defaults to the device of :attr:`input`.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
Example::
>>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda')
>>> torch.empty_like(a)
tensor([[0, 0, 0],
[0, 0, 0]], device='cuda:0', dtype=torch.int32)
"""
...
def empty_permuted(size: Sequence[Union[_int, SymInt]], physical_layout: _size, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
empty_permuted(size, physical_layout, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
Creates an uninitialized, non-overlapping and dense tensor with the
specified :attr:`size`, with :attr:`physical_layout` specifying how the
dimensions are physically laid out in memory (each logical dimension is listed
from outermost to innermost). :attr:`physical_layout` is a generalization
of NCHW/NHWC notation: if each dimension is assigned a number according to
what order they occur in size (N=0, C=1, H=2, W=3), then NCHW is ``(0, 1, 2, 3)``
while NHWC is ``(0, 2, 3, 1)``. Equivalently, the strides of the output
tensor ``t`` are such that ``t.stride(physical_layout[i]) == contiguous_strides[i]``
(notably, this function is *not* equivalent to ``torch.empty(size).permute(physical_layout)``).
Unlike :func:`torch.empty_strided`, this is guaranteed to produce a dense
tensor with no overlaps. If possible, prefer using this function over
:func:`torch.empty_strided` or manual use of :func:`torch.as_strided`.
.. note::
If :func:`torch.use_deterministic_algorithms()` and
:attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
``True``, the output tensor is initialized to prevent any possible
nondeterministic behavior from using the data as an input to an operation.
Floating point and complex tensors are filled with NaN, and integer tensors
are filled with the maximum value.
Args:
size (tuple of int): the shape of the output tensor
physical_layout (tuple of int): the ordering of dimensions physically in memory
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Examples:
>>> torch.empty((2, 3, 5, 7)).stride()
(105, 35, 7, 1)
>>> torch.empty_permuted((2, 3, 5, 7), (0, 1, 2, 3)).stride()
(105, 35, 7, 1)
>>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).stride()
(105, 1, 21, 3)
>>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).stride()
(105, 1, 21, 3)
>>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).dim_order()
(0, 2, 3, 1)
"""
...
def empty_quantized(size: _size, qtensor: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
def empty_strided(size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data.
.. warning::
If the constructed tensor is "overlapped" (with multiple indices referring to the same element
in memory) its behavior is undefined.
.. note::
If :func:`torch.use_deterministic_algorithms()` and
:attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
``True``, the output tensor is initialized to prevent any possible
nondeterministic behavior from using the data as an input to an operation.
Floating point and complex tensors are filled with NaN, and integer tensors
are filled with the maximum value.
Args:
size (tuple of int): the shape of the output tensor
stride (tuple of int): the strides of the output tensor
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> a = torch.empty_strided((2, 3), (1, 2))
>>> a
tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07],
[0.0000e+00, 0.0000e+00, 3.0705e-41]])
>>> a.stride()
(1, 2)
>>> a.size()
torch.Size([2, 3])
"""
...
@overload
def eq(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
eq(input, other, *, out=None) -> Tensor
Computes element-wise equality
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
Example::
>>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[ True, False],
[False, True]])
"""
...
@overload
def eq(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
eq(input, other, *, out=None) -> Tensor
Computes element-wise equality
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
Example::
>>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[ True, False],
[False, True]])
"""
...
def equal(input: Tensor, other: Tensor) -> _bool:
r"""
equal(input, other) -> bool
``True`` if two tensors have the same size and elements, ``False`` otherwise.
Example::
>>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2]))
True
"""
...
def erf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
erf(input, *, out=None) -> Tensor
Alias for :func:`torch.special.erf`.
"""
...
def erf_(input: Tensor) -> Tensor: ...
def erfc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
erfc(input, *, out=None) -> Tensor
Alias for :func:`torch.special.erfc`.
"""
...
def erfc_(input: Tensor) -> Tensor: ...
def erfinv(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
erfinv(input, *, out=None) -> Tensor
Alias for :func:`torch.special.erfinv`.
"""
...
def exp(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
exp(input, *, out=None) -> Tensor
Returns a new tensor with the exponential of the elements
of the input tensor :attr:`input`.
.. math::
y_{i} = e^{x_{i}}
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.exp(torch.tensor([0, math.log(2.)]))
tensor([ 1., 2.])
"""
...
def exp2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
exp2(input, *, out=None) -> Tensor
Alias for :func:`torch.special.exp2`.
"""
...
def exp2_(input: Tensor) -> Tensor: ...
def exp_(input: Tensor) -> Tensor: ...
def expand_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, implicit: _bool = False, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.expand`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def expm1(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
expm1(input, *, out=None) -> Tensor
Alias for :func:`torch.special.expm1`.
"""
...
def expm1_(input: Tensor) -> Tensor: ...
@overload
def eye(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
Args:
n (int): the number of rows
m (int, optional): the number of columns with default being :attr:`n`
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Returns:
Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
Example::
>>> torch.eye(3)
tensor([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
...
@overload
def eye(n: Union[_int, SymInt], m: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
Args:
n (int): the number of rows
m (int, optional): the number of columns with default being :attr:`n`
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Returns:
Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
Example::
>>> torch.eye(3)
tensor([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
...
def fake_quantize_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int) -> Tensor:
r"""
fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max) -> Tensor
Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`,
:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`.
.. math::
\text{output} = (
min(
\text{quant\_max},
max(
\text{quant\_min},
\text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
)
) - \text{zero\_point}
) \times \text{scale}
Args:
input (Tensor): the input value(s), in ``torch.float32``
scale (Tensor): quantization scale, per channel in ``torch.float32``
zero_point (Tensor): quantization zero_point, per channel in ``torch.int32`` or ``torch.half`` or ``torch.float32``
axis (int32): channel axis
quant_min (int64): lower bound of the quantized domain
quant_max (int64): upper bound of the quantized domain
Returns:
Tensor: A newly fake_quantized per channel ``torch.float32`` tensor
Example::
>>> x = torch.randn(2, 2, 2)
>>> x
tensor([[[-0.2525, -0.0466],
[ 0.3491, -0.2168]],
[[-0.5906, 1.6258],
[ 0.6444, -0.0542]]])
>>> scales = (torch.randn(2) + 1) * 0.05
>>> scales
tensor([0.0475, 0.0486])
>>> zero_points = torch.zeros(2).to(torch.int32)
>>> zero_points
tensor([0, 0])
>>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255)
tensor([[[0.0000, 0.0000],
[0.3405, 0.0000]],
[[0.0000, 1.6134],
[0.6323, 0.0000]]])
"""
...
@overload
def fake_quantize_per_tensor_affine(input: Tensor, scale: _float, zero_point: _int, quant_min: _int, quant_max: _int) -> Tensor:
r"""
fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`,
:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`.
.. math::
\text{output} = (
min(
\text{quant\_max},
max(
\text{quant\_min},
\text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
)
) - \text{zero\_point}
) \times \text{scale}
Args:
input (Tensor): the input value(s), ``torch.float32`` tensor
scale (double scalar or ``float32`` Tensor): quantization scale
zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point
quant_min (int64): lower bound of the quantized domain
quant_max (int64): upper bound of the quantized domain
Returns:
Tensor: A newly fake_quantized ``torch.float32`` tensor
Example::
>>> x = torch.randn(4)
>>> x
tensor([ 0.0552, 0.9730, 0.3973, -1.0780])
>>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255)
tensor([0.1000, 1.0000, 0.4000, 0.0000])
>>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255)
tensor([0.1000, 1.0000, 0.4000, 0.0000])
"""
...
@overload
def fake_quantize_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int) -> Tensor:
r"""
fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`,
:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`.
.. math::
\text{output} = (
min(
\text{quant\_max},
max(
\text{quant\_min},
\text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
)
) - \text{zero\_point}
) \times \text{scale}
Args:
input (Tensor): the input value(s), ``torch.float32`` tensor
scale (double scalar or ``float32`` Tensor): quantization scale
zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point
quant_min (int64): lower bound of the quantized domain
quant_max (int64): upper bound of the quantized domain
Returns:
Tensor: A newly fake_quantized ``torch.float32`` tensor
Example::
>>> x = torch.randn(4)
>>> x
tensor([ 0.0552, 0.9730, 0.3973, -1.0780])
>>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255)
tensor([0.1000, 1.0000, 0.4000, 0.0000])
>>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255)
tensor([0.1000, 1.0000, 0.4000, 0.0000])
"""
...
def fbgemm_linear_fp16_weight(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
def fbgemm_linear_fp16_weight_fp32_activation(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
def fbgemm_linear_int8_weight(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ...
def fbgemm_linear_int8_weight_fp32_activation(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ...
def fbgemm_linear_quantize_weight(input: Tensor) -> Tuple[Tensor, Tensor, _float, _int]: ...
def fbgemm_pack_gemm_matrix_fp16(input: Tensor) -> Tensor: ...
@overload
def fbgemm_pack_quantized_matrix(input: Tensor) -> Tensor: ...
@overload
def fbgemm_pack_quantized_matrix(input: Tensor, K: _int, N: _int) -> Tensor: ...
def feature_alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def feature_alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def feature_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
def feature_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
@overload
def fill(input: Tensor, value: Tensor) -> Tensor: ...
@overload
def fill(input: Tensor, value: Union[Number, _complex]) -> Tensor: ...
@overload
def fill_(input: Tensor, value: Tensor) -> Tensor: ...
@overload
def fill_(input: Tensor, value: Union[Number, _complex]) -> Tensor: ...
def fix(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
fix(input, *, out=None) -> Tensor
Alias for :func:`torch.trunc`
"""
...
def fix_(input: Tensor) -> Tensor: ...
@overload
def flatten(input: Tensor, start_dim: _int = 0, end_dim: _int = -1) -> Tensor:
r"""
flatten(input, start_dim=0, end_dim=-1) -> Tensor
Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
The order of elements in :attr:`input` is unchanged.
Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
.. note::
Flattening a zero-dimensional tensor will return a one-dimensional view.
Args:
input (Tensor): the input tensor.
start_dim (int): the first dim to flatten
end_dim (int): the last dim to flatten
Example::
>>> t = torch.tensor([[[1, 2],
... [3, 4]],
... [[5, 6],
... [7, 8]]])
>>> torch.flatten(t)
tensor([1, 2, 3, 4, 5, 6, 7, 8])
>>> torch.flatten(t, start_dim=1)
tensor([[1, 2, 3, 4],
[5, 6, 7, 8]])
"""
...
@overload
def flatten(input: Tensor, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor:
r"""
flatten(input, start_dim=0, end_dim=-1) -> Tensor
Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
The order of elements in :attr:`input` is unchanged.
Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
.. note::
Flattening a zero-dimensional tensor will return a one-dimensional view.
Args:
input (Tensor): the input tensor.
start_dim (int): the first dim to flatten
end_dim (int): the last dim to flatten
Example::
>>> t = torch.tensor([[[1, 2],
... [3, 4]],
... [[5, 6],
... [7, 8]]])
>>> torch.flatten(t)
tensor([1, 2, 3, 4, 5, 6, 7, 8])
>>> torch.flatten(t, start_dim=1)
tensor([[1, 2, 3, 4],
[5, 6, 7, 8]])
"""
...
@overload
def flatten(input: Tensor, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor:
r"""
flatten(input, start_dim=0, end_dim=-1) -> Tensor
Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
The order of elements in :attr:`input` is unchanged.
Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
.. note::
Flattening a zero-dimensional tensor will return a one-dimensional view.
Args:
input (Tensor): the input tensor.
start_dim (int): the first dim to flatten
end_dim (int): the last dim to flatten
Example::
>>> t = torch.tensor([[[1, 2],
... [3, 4]],
... [[5, 6],
... [7, 8]]])
>>> torch.flatten(t)
tensor([1, 2, 3, 4, 5, 6, 7, 8])
>>> torch.flatten(t, start_dim=1)
tensor([[1, 2, 3, 4],
[5, 6, 7, 8]])
"""
...
@overload
def flatten(input: Tensor, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor:
r"""
flatten(input, start_dim=0, end_dim=-1) -> Tensor
Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
The order of elements in :attr:`input` is unchanged.
Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
.. note::
Flattening a zero-dimensional tensor will return a one-dimensional view.
Args:
input (Tensor): the input tensor.
start_dim (int): the first dim to flatten
end_dim (int): the last dim to flatten
Example::
>>> t = torch.tensor([[[1, 2],
... [3, 4]],
... [[5, 6],
... [7, 8]]])
>>> torch.flatten(t)
tensor([1, 2, 3, 4, 5, 6, 7, 8])
>>> torch.flatten(t, start_dim=1)
tensor([[1, 2, 3, 4],
[5, 6, 7, 8]])
"""
...
def flip(input: Tensor, dims: _size) -> Tensor:
r"""
flip(input, dims) -> Tensor
Reverse the order of an n-D tensor along given axis in dims.
.. note::
`torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`,
which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
`torch.flip` is expected to be slower than `np.flip`.
Args:
input (Tensor): the input tensor.
dims (a list or tuple): axis to flip on
Example::
>>> x = torch.arange(8).view(2, 2, 2)
>>> x
tensor([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]]])
>>> torch.flip(x, [0, 1])
tensor([[[ 6, 7],
[ 4, 5]],
[[ 2, 3],
[ 0, 1]]])
"""
...
def fliplr(input: Tensor) -> Tensor:
r"""
fliplr(input) -> Tensor
Flip tensor in the left/right direction, returning a new tensor.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Note:
Requires the tensor to be at least 2-D.
.. note::
`torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`,
which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
`torch.fliplr` is expected to be slower than `np.fliplr`.
Args:
input (Tensor): Must be at least 2-dimensional.
Example::
>>> x = torch.arange(4).view(2, 2)
>>> x
tensor([[0, 1],
[2, 3]])
>>> torch.fliplr(x)
tensor([[1, 0],
[3, 2]])
"""
...
def flipud(input: Tensor) -> Tensor:
r"""
flipud(input) -> Tensor
Flip tensor in the up/down direction, returning a new tensor.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Note:
Requires the tensor to be at least 1-D.
.. note::
`torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`,
which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
`torch.flipud` is expected to be slower than `np.flipud`.
Args:
input (Tensor): Must be at least 1-dimensional.
Example::
>>> x = torch.arange(4).view(2, 2)
>>> x
tensor([[0, 1],
[2, 3]])
>>> torch.flipud(x)
tensor([[2, 3],
[0, 1]])
"""
...
@overload
def float_power(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
float_power(input, exponent, *, out=None) -> Tensor
Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
If neither input is complex returns a ``torch.float64`` tensor,
and if one or more inputs is complex returns a ``torch.complex128`` tensor.
.. note::
This function always computes in double precision, unlike :func:`torch.pow`,
which implements more typical :ref:`type promotion <type-promotion-doc>`.
This is useful when the computation needs to be performed in a wider or more precise dtype,
or the results of the computation may contain fractional values not representable in the input dtypes,
like when an integer base is raised to a negative integer exponent.
Args:
input (Tensor or Number): the base value(s)
exponent (Tensor or Number): the exponent value(s)
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randint(10, (4,))
>>> a
tensor([6, 4, 7, 1])
>>> torch.float_power(a, 2)
tensor([36., 16., 49., 1.], dtype=torch.float64)
>>> a = torch.arange(1, 5)
>>> a
tensor([ 1, 2, 3, 4])
>>> exp = torch.tensor([2, -3, 4, -5])
>>> exp
tensor([ 2, -3, 4, -5])
>>> torch.float_power(a, exp)
tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
"""
...
@overload
def float_power(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
float_power(input, exponent, *, out=None) -> Tensor
Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
If neither input is complex returns a ``torch.float64`` tensor,
and if one or more inputs is complex returns a ``torch.complex128`` tensor.
.. note::
This function always computes in double precision, unlike :func:`torch.pow`,
which implements more typical :ref:`type promotion <type-promotion-doc>`.
This is useful when the computation needs to be performed in a wider or more precise dtype,
or the results of the computation may contain fractional values not representable in the input dtypes,
like when an integer base is raised to a negative integer exponent.
Args:
input (Tensor or Number): the base value(s)
exponent (Tensor or Number): the exponent value(s)
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randint(10, (4,))
>>> a
tensor([6, 4, 7, 1])
>>> torch.float_power(a, 2)
tensor([36., 16., 49., 1.], dtype=torch.float64)
>>> a = torch.arange(1, 5)
>>> a
tensor([ 1, 2, 3, 4])
>>> exp = torch.tensor([2, -3, 4, -5])
>>> exp
tensor([ 2, -3, 4, -5])
>>> torch.float_power(a, exp)
tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
"""
...
@overload
def float_power(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
float_power(input, exponent, *, out=None) -> Tensor
Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
If neither input is complex returns a ``torch.float64`` tensor,
and if one or more inputs is complex returns a ``torch.complex128`` tensor.
.. note::
This function always computes in double precision, unlike :func:`torch.pow`,
which implements more typical :ref:`type promotion <type-promotion-doc>`.
This is useful when the computation needs to be performed in a wider or more precise dtype,
or the results of the computation may contain fractional values not representable in the input dtypes,
like when an integer base is raised to a negative integer exponent.
Args:
input (Tensor or Number): the base value(s)
exponent (Tensor or Number): the exponent value(s)
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randint(10, (4,))
>>> a
tensor([6, 4, 7, 1])
>>> torch.float_power(a, 2)
tensor([36., 16., 49., 1.], dtype=torch.float64)
>>> a = torch.arange(1, 5)
>>> a
tensor([ 1, 2, 3, 4])
>>> exp = torch.tensor([2, -3, 4, -5])
>>> exp
tensor([ 2, -3, 4, -5])
>>> torch.float_power(a, exp)
tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
"""
...
def floor(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
floor(input, *, out=None) -> Tensor
Returns a new tensor with the floor of the elements of :attr:`input`,
the largest integer less than or equal to each element.
For integer inputs, follows the array-api convention of returning a
copy of the input tensor.
.. math::
\text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.8166, 1.5308, -0.2530, -0.2091])
>>> torch.floor(a)
tensor([-1., 1., -1., -1.])
"""
...
def floor_(input: Tensor) -> Tensor: ...
def floor_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor:
r"""
floor_divide(input, other, *, out=None) -> Tensor
.. note::
Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed
truncation division. To restore the previous behavior use
:func:`torch.div` with ``rounding_mode='trunc'``.
Computes :attr:`input` divided by :attr:`other`, elementwise, and floors
the result.
.. math::
\text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right)
Supports broadcasting to a common shape, type promotion, and integer and float inputs.
Args:
input (Tensor or Number): the dividend
other (Tensor or Number): the divisor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([4.0, 3.0])
>>> b = torch.tensor([2.0, 2.0])
>>> torch.floor_divide(a, b)
tensor([2.0, 1.0])
>>> torch.floor_divide(a, 1.4)
tensor([2.0, 2.0])
"""
...
def fmax(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
fmax(input, other, *, out=None) -> Tensor
Computes the element-wise maximum of :attr:`input` and :attr:`other`.
This is like :func:`torch.maximum` except it handles NaNs differently:
if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum.
Only if both elements are NaN is NaN propagated.
This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function.
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
Args:
input (Tensor): the input tensor.
other (Tensor): the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')])
>>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')])
>>> torch.fmax(a, b)
tensor([9.7000, 0.5000, 3.1000, nan])
"""
...
def fmin(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
fmin(input, other, *, out=None) -> Tensor
Computes the element-wise minimum of :attr:`input` and :attr:`other`.
This is like :func:`torch.minimum` except it handles NaNs differently:
if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum.
Only if both elements are NaN is NaN propagated.
This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function.
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
Args:
input (Tensor): the input tensor.
other (Tensor): the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')])
>>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')])
>>> torch.fmin(a, b)
tensor([-9.3000, 0.1000, 2.1000, nan])
"""
...
@overload
def fmod(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
fmod(input, other, *, out=None) -> Tensor
Applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_ entrywise.
The result has the same sign as the dividend :attr:`input` and its absolute value
is less than that of :attr:`other`.
This function may be defined in terms of :func:`torch.div` as
.. code:: python
torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
.. note::
When the divisor is zero, returns ``NaN`` for floating point dtypes
on both CPU and GPU; raises ``RuntimeError`` for integer division by
zero on CPU; Integer division by zero on GPU may return any value.
.. note::
Complex inputs are not supported. In some cases, it is not mathematically
possible to satisfy the definition of a modulo operation with complex numbers.
.. seealso::
:func:`torch.remainder` which implements Python's modulus operator.
This one is defined using division rounding down the result.
Args:
input (Tensor): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
tensor([-1., -0., -1., 1., 0., 1.])
>>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5)
tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
"""
...
@overload
def fmod(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
fmod(input, other, *, out=None) -> Tensor
Applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_ entrywise.
The result has the same sign as the dividend :attr:`input` and its absolute value
is less than that of :attr:`other`.
This function may be defined in terms of :func:`torch.div` as
.. code:: python
torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
.. note::
When the divisor is zero, returns ``NaN`` for floating point dtypes
on both CPU and GPU; raises ``RuntimeError`` for integer division by
zero on CPU; Integer division by zero on GPU may return any value.
.. note::
Complex inputs are not supported. In some cases, it is not mathematically
possible to satisfy the definition of a modulo operation with complex numbers.
.. seealso::
:func:`torch.remainder` which implements Python's modulus operator.
This one is defined using division rounding down the result.
Args:
input (Tensor): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
tensor([-1., -0., -1., 1., 0., 1.])
>>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5)
tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
"""
...
def frac(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
frac(input, *, out=None) -> Tensor
Computes the fractional portion of each element in :attr:`input`.
.. math::
\text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i})
Example::
>>> torch.frac(torch.tensor([1, 2.5, -3.2]))
tensor([ 0.0000, 0.5000, -0.2000])
"""
...
def frac_(input: Tensor) -> Tensor: ...
def frexp(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.frexp:
r"""
frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent)
Decomposes :attr:`input` into mantissa and exponent tensors
such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`.
The range of mantissa is the open interval (-1, 1).
Supports float inputs.
Args:
input (Tensor): the input tensor
Keyword args:
out (tuple, optional): the output tensors
Example::
>>> x = torch.arange(9.)
>>> mantissa, exponent = torch.frexp(x)
>>> mantissa
tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000])
>>> exponent
tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32)
>>> torch.ldexp(mantissa, exponent)
tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.])
"""
...
def frobenius_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
def from_file(filename: str, shared: Optional[_bool] = None, size: Optional[_int] = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
from_file(filename, shared=None, size=0, *, dtype=None, layout=None, device=None, pin_memory=False)
Creates a CPU tensor with a storage backed by a memory-mapped file.
If ``shared`` is True, then memory is shared between processes. All changes are written to the file.
If ``shared`` is False, then changes to the tensor do not affect the file.
``size`` is the number of elements in the Tensor. If ``shared`` is ``False``, then the file must contain
at least ``size * sizeof(dtype)`` bytes. If ``shared`` is ``True`` the file will be created if needed.
.. note::
Only CPU tensors can be mapped to files.
.. note::
For now, tensors with storages backed by a memory-mapped file cannot be created in pinned memory.
Args:
filename (str): file name to map
shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the
underlying `mmap(2) call <https://man7.org/linux/man-pages/man2/mmap.2.html>`_)
size (int): number of elements in the tensor
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> t = torch.randn(2, 5, dtype=torch.float64)
>>> t.numpy().tofile('storage.pt')
>>> t_mapped = torch.from_file('storage.pt', shared=False, size=10, dtype=torch.float64)
"""
...
def from_numpy(ndarray) -> Tensor:
r"""
from_numpy(ndarray) -> Tensor
Creates a :class:`Tensor` from a :class:`numpy.ndarray`.
The returned tensor and :attr:`ndarray` share the same memory. Modifications to
the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned
tensor is not resizable.
It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``,
``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``,
``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``,
and ``bool``.
.. warning::
Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior.
Example::
>>> a = numpy.array([1, 2, 3])
>>> t = torch.from_numpy(a)
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([-1, 2, 3])
"""
...
def frombuffer(buffer: Any, *, dtype: _dtype, count: int = -1, offset: int = 0, requires_grad: _bool = False) -> Tensor:
r"""
frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor
Creates a 1-dimensional :class:`Tensor` from an object that implements
the Python buffer protocol.
Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of
the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count`
elements.
Note that either of the following must be true:
1. :attr:`count` is a positive non-zero number, and the total number of bytes
in the buffer is more than :attr:`offset` plus :attr:`count` times the size
(in bytes) of :attr:`dtype`.
2. :attr:`count` is negative, and the length (number of bytes) of the buffer
subtracted by the :attr:`offset` is a multiple of the size (in bytes) of
:attr:`dtype`.
The returned tensor and buffer share the same memory. Modifications to
the tensor will be reflected in the buffer and vice versa. The returned
tensor is not resizable.
.. note::
This function increments the reference count for the object that
owns the shared memory. Therefore, such memory will not be deallocated
before the returned tensor goes out of scope.
.. warning::
This function's behavior is undefined when passed an object implementing
the buffer protocol whose data is not on the CPU. Doing so is likely to
cause a segmentation fault.
.. warning::
This function does not try to infer the :attr:`dtype` (hence, it is not
optional). Passing a different :attr:`dtype` than its source may result
in unexpected behavior.
Args:
buffer (object): a Python object that exposes the buffer interface.
Keyword args:
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
count (int, optional): the number of desired elements to be read.
If negative, all the elements (until the end of the buffer) will be
read. Default: -1.
offset (int, optional): the number of bytes to skip at the start of
the buffer. Default: 0.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> import array
>>> a = array.array('i', [1, 2, 3])
>>> t = torch.frombuffer(a, dtype=torch.int32)
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([-1, 2, 3])
>>> # Interprets the signed char bytes as 32-bit integers.
>>> # Each 4 signed char elements will be interpreted as
>>> # 1 signed 32-bit integer.
>>> import array
>>> a = array.array('b', [-1, 0, 0, 0])
>>> torch.frombuffer(a, dtype=torch.int32)
tensor([255], dtype=torch.int32)
"""
...
@overload
def full(size: _size, fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
r"""
full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
tensor's dtype is inferred from :attr:`fill_value`.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
fill_value (Scalar): the value to fill the output tensor with.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.full((2, 3), 3.141592)
tensor([[ 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416]])
"""
...
@overload
def full(size: _size, fill_value: Union[Number, _complex], *, names: List[Union[str, None]], layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
r"""
full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
tensor's dtype is inferred from :attr:`fill_value`.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
fill_value (Scalar): the value to fill the output tensor with.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.full((2, 3), 3.141592)
tensor([[ 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416]])
"""
...
@overload
def full(size: Sequence[Union[_int, SymInt]], fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
tensor's dtype is inferred from :attr:`fill_value`.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
fill_value (Scalar): the value to fill the output tensor with.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.full((2, 3), 3.141592)
tensor([[ 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416]])
"""
...
@overload
def full(size: _size, fill_value: Union[Number, _complex], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
tensor's dtype is inferred from :attr:`fill_value`.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
fill_value (Scalar): the value to fill the output tensor with.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.full((2, 3), 3.141592)
tensor([[ 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416]])
"""
...
def full_like(input: Tensor, fill_value: Union[Number, _complex], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
full_like(input, fill_value, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`.
``torch.full_like(input, fill_value)`` is equivalent to
``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
input (Tensor): the size of :attr:`input` will determine size of the output tensor.
fill_value: the number to fill the output tensor with.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
Default: if ``None``, defaults to the dtype of :attr:`input`.
layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
Default: if ``None``, defaults to the layout of :attr:`input`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, defaults to the device of :attr:`input`.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
"""
...
def fused_moving_avg_obs_fake_quant(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> Tensor: ...
@overload
def gather(input: Tensor, dim: _int, index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor:
r"""
gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor
Gathers values along an axis specified by `dim`.
For a 3-D tensor the output is specified by::
out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
:attr:`input` and :attr:`index` must have the same number of dimensions.
It is also required that ``index.size(d) <= input.size(d)`` for all
dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`.
Note that ``input`` and ``index`` do not broadcast against each other.
Args:
input (Tensor): the source tensor
dim (int): the axis along which to index
index (LongTensor): the indices of elements to gather
Keyword arguments:
sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
out (Tensor, optional): the destination tensor
Example::
>>> t = torch.tensor([[1, 2], [3, 4]])
>>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]]))
tensor([[ 1, 1],
[ 4, 3]])
"""
...
@overload
def gather(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor:
r"""
gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor
Gathers values along an axis specified by `dim`.
For a 3-D tensor the output is specified by::
out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
:attr:`input` and :attr:`index` must have the same number of dimensions.
It is also required that ``index.size(d) <= input.size(d)`` for all
dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`.
Note that ``input`` and ``index`` do not broadcast against each other.
Args:
input (Tensor): the source tensor
dim (int): the axis along which to index
index (LongTensor): the indices of elements to gather
Keyword arguments:
sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
out (Tensor, optional): the destination tensor
Example::
>>> t = torch.tensor([[1, 2], [3, 4]])
>>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]]))
tensor([[ 1, 1],
[ 4, 3]])
"""
...
def gcd(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
gcd(input, other, *, out=None) -> Tensor
Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`.
Both :attr:`input` and :attr:`other` must have integer types.
.. note::
This defines :math:`gcd(0, 0) = 0`.
Args:
input (Tensor): the input tensor.
other (Tensor): the second input tensor
Keyword arguments:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([5, 10, 15])
>>> b = torch.tensor([3, 4, 5])
>>> torch.gcd(a, b)
tensor([1, 2, 5])
>>> c = torch.tensor([3])
>>> torch.gcd(a, c)
tensor([1, 1, 3])
"""
...
def gcd_(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def ge(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
ge(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} \geq \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere
Example::
>>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[True, True], [False, True]])
"""
...
@overload
def ge(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
ge(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} \geq \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere
Example::
>>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[True, True], [False, True]])
"""
...
def geqrf(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.geqrf:
r"""
geqrf(input, *, out=None) -> (Tensor, Tensor)
This is a low-level function for calling LAPACK's geqrf directly. This function
returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ .
Computes a QR decomposition of :attr:`input`.
Both `Q` and `R` matrices are stored in the same output tensor `a`.
The elements of `R` are stored on and above the diagonal.
Elementary reflectors (or Householder vectors) implicitly defining matrix `Q`
are stored below the diagonal.
The results of this function can be used together with :func:`torch.linalg.householder_product`
to obtain the `Q` matrix or
with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix,
for an efficient matrix-matrix multiplication.
See `LAPACK documentation for geqrf`_ for further details.
.. note::
See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq`
with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition.
Args:
input (Tensor): the input matrix
Keyword args:
out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`.
.. _LAPACK documentation for geqrf:
http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html
"""
...
def ger(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
ger(input, vec2, *, out=None) -> Tensor
Alias of :func:`torch.outer`.
.. warning::
This function is deprecated and will be removed in a future PyTorch release.
Use :func:`torch.outer` instead.
"""
...
def get_default_dtype() -> _dtype:
r"""
get_default_dtype() -> torch.dtype
Get the current default floating point :class:`torch.dtype`.
Example::
>>> torch.get_default_dtype() # initial default for floating point is torch.float32
torch.float32
>>> torch.set_default_dtype(torch.float64)
>>> torch.get_default_dtype() # default is now changed to torch.float64
torch.float64
"""
...
def get_num_interop_threads() -> _int:
r"""
get_num_interop_threads() -> int
Returns the number of threads used for inter-op parallelism on CPU
(e.g. in JIT interpreter)
"""
...
def get_num_threads() -> _int:
r"""
get_num_threads() -> int
Returns the number of threads used for parallelizing CPU operations
"""
...
@overload
def gradient(input: Tensor, *, spacing: Optional[Union[Number, _complex]] = None, dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]:
r"""
gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
one or more dimensions using the `second-order accurate central differences method
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
either first or second order estimates at the boundaries.
The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
:attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
:math:`g(1, 2, 3)\ == input[1, 2, 3]`.
When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
This is detailed in the "Keyword Arguments" section below.
The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
.. math::
\begin{aligned}
f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
\end{aligned}
Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
.. math::
f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
+ ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
.. note::
We estimate the gradient of functions in complex domain
:math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
The value of each partial derivative at the boundary points is computed differently. See edge_order below.
Args:
input (``Tensor``): the tensor that represents the values of the function
Keyword args:
spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
the coordinates are (t0[1], t1[2], t2[3])
dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
the :attr:`spacing` argument must correspond with the specified dims."
edge_order (``int``, optional): 1 or 2, for `first-order
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
`second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
estimation of the boundary ("edge") values, respectively.
Examples::
>>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
>>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
>>> values = torch.tensor([4., 1., 1., 16.], )
>>> torch.gradient(values, spacing = coordinates)
(tensor([-3., -2., 2., 5.]),)
>>> # Estimates the gradient of the R^2 -> R function whose samples are
>>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
>>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
>>> # partial derivative for both dimensions.
>>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> torch.gradient(t)
(tensor([[ 9., 18., 36., 72.],
[ 9., 18., 36., 72.]]),
tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]))
>>> # A scalar value for spacing modifies the relationship between tensor indices
>>> # and input coordinates by multiplying the indices to find the
>>> # coordinates. For example, below the indices of the innermost
>>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
>>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
[ 5.0000, 7.5000, 15.0000, 20.0000]]))
>>> # doubling the spacing between samples halves the estimated partial gradients.
>>>
>>> # Estimates only the partial derivative for dimension 1
>>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
(tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]),)
>>> # When spacing is a list of scalars, the relationship between the tensor
>>> # indices and input coordinates changes based on dimension.
>>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
>>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
>>> # 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = [3., 2.])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
>>> # The following example is a replication of the previous one with explicit
>>> # coordinates.
>>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
>>> torch.gradient(t, spacing = coords)
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
"""
...
@overload
def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]:
r"""
gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
one or more dimensions using the `second-order accurate central differences method
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
either first or second order estimates at the boundaries.
The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
:attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
:math:`g(1, 2, 3)\ == input[1, 2, 3]`.
When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
This is detailed in the "Keyword Arguments" section below.
The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
.. math::
\begin{aligned}
f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
\end{aligned}
Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
.. math::
f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
+ ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
.. note::
We estimate the gradient of functions in complex domain
:math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
The value of each partial derivative at the boundary points is computed differently. See edge_order below.
Args:
input (``Tensor``): the tensor that represents the values of the function
Keyword args:
spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
the coordinates are (t0[1], t1[2], t2[3])
dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
the :attr:`spacing` argument must correspond with the specified dims."
edge_order (``int``, optional): 1 or 2, for `first-order
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
`second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
estimation of the boundary ("edge") values, respectively.
Examples::
>>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
>>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
>>> values = torch.tensor([4., 1., 1., 16.], )
>>> torch.gradient(values, spacing = coordinates)
(tensor([-3., -2., 2., 5.]),)
>>> # Estimates the gradient of the R^2 -> R function whose samples are
>>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
>>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
>>> # partial derivative for both dimensions.
>>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> torch.gradient(t)
(tensor([[ 9., 18., 36., 72.],
[ 9., 18., 36., 72.]]),
tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]))
>>> # A scalar value for spacing modifies the relationship between tensor indices
>>> # and input coordinates by multiplying the indices to find the
>>> # coordinates. For example, below the indices of the innermost
>>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
>>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
[ 5.0000, 7.5000, 15.0000, 20.0000]]))
>>> # doubling the spacing between samples halves the estimated partial gradients.
>>>
>>> # Estimates only the partial derivative for dimension 1
>>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
(tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]),)
>>> # When spacing is a list of scalars, the relationship between the tensor
>>> # indices and input coordinates changes based on dimension.
>>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
>>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
>>> # 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = [3., 2.])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
>>> # The following example is a replication of the previous one with explicit
>>> # coordinates.
>>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
>>> torch.gradient(t, spacing = coords)
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
"""
...
@overload
def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]:
r"""
gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
one or more dimensions using the `second-order accurate central differences method
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
either first or second order estimates at the boundaries.
The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
:attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
:math:`g(1, 2, 3)\ == input[1, 2, 3]`.
When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
This is detailed in the "Keyword Arguments" section below.
The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
.. math::
\begin{aligned}
f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
\end{aligned}
Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
.. math::
f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
+ ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
.. note::
We estimate the gradient of functions in complex domain
:math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
The value of each partial derivative at the boundary points is computed differently. See edge_order below.
Args:
input (``Tensor``): the tensor that represents the values of the function
Keyword args:
spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
the coordinates are (t0[1], t1[2], t2[3])
dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
the :attr:`spacing` argument must correspond with the specified dims."
edge_order (``int``, optional): 1 or 2, for `first-order
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
`second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
estimation of the boundary ("edge") values, respectively.
Examples::
>>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
>>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
>>> values = torch.tensor([4., 1., 1., 16.], )
>>> torch.gradient(values, spacing = coordinates)
(tensor([-3., -2., 2., 5.]),)
>>> # Estimates the gradient of the R^2 -> R function whose samples are
>>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
>>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
>>> # partial derivative for both dimensions.
>>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> torch.gradient(t)
(tensor([[ 9., 18., 36., 72.],
[ 9., 18., 36., 72.]]),
tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]))
>>> # A scalar value for spacing modifies the relationship between tensor indices
>>> # and input coordinates by multiplying the indices to find the
>>> # coordinates. For example, below the indices of the innermost
>>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
>>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
[ 5.0000, 7.5000, 15.0000, 20.0000]]))
>>> # doubling the spacing between samples halves the estimated partial gradients.
>>>
>>> # Estimates only the partial derivative for dimension 1
>>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
(tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]),)
>>> # When spacing is a list of scalars, the relationship between the tensor
>>> # indices and input coordinates changes based on dimension.
>>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
>>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
>>> # 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = [3., 2.])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
>>> # The following example is a replication of the previous one with explicit
>>> # coordinates.
>>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
>>> torch.gradient(t, spacing = coords)
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
"""
...
@overload
def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]:
r"""
gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
one or more dimensions using the `second-order accurate central differences method
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
either first or second order estimates at the boundaries.
The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
:attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
:math:`g(1, 2, 3)\ == input[1, 2, 3]`.
When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
This is detailed in the "Keyword Arguments" section below.
The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
.. math::
\begin{aligned}
f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
\end{aligned}
Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
.. math::
f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
+ ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
.. note::
We estimate the gradient of functions in complex domain
:math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
The value of each partial derivative at the boundary points is computed differently. See edge_order below.
Args:
input (``Tensor``): the tensor that represents the values of the function
Keyword args:
spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
the coordinates are (t0[1], t1[2], t2[3])
dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
the :attr:`spacing` argument must correspond with the specified dims."
edge_order (``int``, optional): 1 or 2, for `first-order
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
`second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
estimation of the boundary ("edge") values, respectively.
Examples::
>>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
>>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
>>> values = torch.tensor([4., 1., 1., 16.], )
>>> torch.gradient(values, spacing = coordinates)
(tensor([-3., -2., 2., 5.]),)
>>> # Estimates the gradient of the R^2 -> R function whose samples are
>>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
>>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
>>> # partial derivative for both dimensions.
>>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> torch.gradient(t)
(tensor([[ 9., 18., 36., 72.],
[ 9., 18., 36., 72.]]),
tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]))
>>> # A scalar value for spacing modifies the relationship between tensor indices
>>> # and input coordinates by multiplying the indices to find the
>>> # coordinates. For example, below the indices of the innermost
>>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
>>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
[ 5.0000, 7.5000, 15.0000, 20.0000]]))
>>> # doubling the spacing between samples halves the estimated partial gradients.
>>>
>>> # Estimates only the partial derivative for dimension 1
>>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
(tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]),)
>>> # When spacing is a list of scalars, the relationship between the tensor
>>> # indices and input coordinates changes based on dimension.
>>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
>>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
>>> # 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = [3., 2.])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
>>> # The following example is a replication of the previous one with explicit
>>> # coordinates.
>>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
>>> torch.gradient(t, spacing = coords)
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
"""
...
@overload
def gradient(input: Tensor, *, spacing: Union[Number, _complex], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]:
r"""
gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
one or more dimensions using the `second-order accurate central differences method
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
either first or second order estimates at the boundaries.
The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
:attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
:math:`g(1, 2, 3)\ == input[1, 2, 3]`.
When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
This is detailed in the "Keyword Arguments" section below.
The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
.. math::
\begin{aligned}
f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
\end{aligned}
Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
.. math::
f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
+ ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
.. note::
We estimate the gradient of functions in complex domain
:math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
The value of each partial derivative at the boundary points is computed differently. See edge_order below.
Args:
input (``Tensor``): the tensor that represents the values of the function
Keyword args:
spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
the coordinates are (t0[1], t1[2], t2[3])
dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
the :attr:`spacing` argument must correspond with the specified dims."
edge_order (``int``, optional): 1 or 2, for `first-order
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
`second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
estimation of the boundary ("edge") values, respectively.
Examples::
>>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
>>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
>>> values = torch.tensor([4., 1., 1., 16.], )
>>> torch.gradient(values, spacing = coordinates)
(tensor([-3., -2., 2., 5.]),)
>>> # Estimates the gradient of the R^2 -> R function whose samples are
>>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
>>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
>>> # partial derivative for both dimensions.
>>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> torch.gradient(t)
(tensor([[ 9., 18., 36., 72.],
[ 9., 18., 36., 72.]]),
tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]))
>>> # A scalar value for spacing modifies the relationship between tensor indices
>>> # and input coordinates by multiplying the indices to find the
>>> # coordinates. For example, below the indices of the innermost
>>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
>>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
[ 5.0000, 7.5000, 15.0000, 20.0000]]))
>>> # doubling the spacing between samples halves the estimated partial gradients.
>>>
>>> # Estimates only the partial derivative for dimension 1
>>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
(tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]),)
>>> # When spacing is a list of scalars, the relationship between the tensor
>>> # indices and input coordinates changes based on dimension.
>>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
>>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
>>> # 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = [3., 2.])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
>>> # The following example is a replication of the previous one with explicit
>>> # coordinates.
>>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
>>> torch.gradient(t, spacing = coords)
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
"""
...
@overload
def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]:
r"""
gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
one or more dimensions using the `second-order accurate central differences method
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
either first or second order estimates at the boundaries.
The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
:attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
:math:`g(1, 2, 3)\ == input[1, 2, 3]`.
When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
This is detailed in the "Keyword Arguments" section below.
The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
.. math::
\begin{aligned}
f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
\end{aligned}
Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
.. math::
f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
+ ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
.. note::
We estimate the gradient of functions in complex domain
:math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
The value of each partial derivative at the boundary points is computed differently. See edge_order below.
Args:
input (``Tensor``): the tensor that represents the values of the function
Keyword args:
spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
the coordinates are (t0[1], t1[2], t2[3])
dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
the :attr:`spacing` argument must correspond with the specified dims."
edge_order (``int``, optional): 1 or 2, for `first-order
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
`second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
estimation of the boundary ("edge") values, respectively.
Examples::
>>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
>>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
>>> values = torch.tensor([4., 1., 1., 16.], )
>>> torch.gradient(values, spacing = coordinates)
(tensor([-3., -2., 2., 5.]),)
>>> # Estimates the gradient of the R^2 -> R function whose samples are
>>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
>>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
>>> # partial derivative for both dimensions.
>>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> torch.gradient(t)
(tensor([[ 9., 18., 36., 72.],
[ 9., 18., 36., 72.]]),
tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]))
>>> # A scalar value for spacing modifies the relationship between tensor indices
>>> # and input coordinates by multiplying the indices to find the
>>> # coordinates. For example, below the indices of the innermost
>>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
>>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
[ 5.0000, 7.5000, 15.0000, 20.0000]]))
>>> # doubling the spacing between samples halves the estimated partial gradients.
>>>
>>> # Estimates only the partial derivative for dimension 1
>>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
(tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]),)
>>> # When spacing is a list of scalars, the relationship between the tensor
>>> # indices and input coordinates changes based on dimension.
>>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
>>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
>>> # 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = [3., 2.])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
>>> # The following example is a replication of the previous one with explicit
>>> # coordinates.
>>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
>>> torch.gradient(t, spacing = coords)
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
"""
...
@overload
def gradient(input: Tensor, *, dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]:
r"""
gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
one or more dimensions using the `second-order accurate central differences method
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
either first or second order estimates at the boundaries.
The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
:attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
:math:`g(1, 2, 3)\ == input[1, 2, 3]`.
When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
This is detailed in the "Keyword Arguments" section below.
The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
.. math::
\begin{aligned}
f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
\end{aligned}
Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
.. math::
f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
+ ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
.. note::
We estimate the gradient of functions in complex domain
:math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
The value of each partial derivative at the boundary points is computed differently. See edge_order below.
Args:
input (``Tensor``): the tensor that represents the values of the function
Keyword args:
spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
the coordinates are (t0[1], t1[2], t2[3])
dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
the :attr:`spacing` argument must correspond with the specified dims."
edge_order (``int``, optional): 1 or 2, for `first-order
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
`second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
estimation of the boundary ("edge") values, respectively.
Examples::
>>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
>>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
>>> values = torch.tensor([4., 1., 1., 16.], )
>>> torch.gradient(values, spacing = coordinates)
(tensor([-3., -2., 2., 5.]),)
>>> # Estimates the gradient of the R^2 -> R function whose samples are
>>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
>>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
>>> # partial derivative for both dimensions.
>>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> torch.gradient(t)
(tensor([[ 9., 18., 36., 72.],
[ 9., 18., 36., 72.]]),
tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]))
>>> # A scalar value for spacing modifies the relationship between tensor indices
>>> # and input coordinates by multiplying the indices to find the
>>> # coordinates. For example, below the indices of the innermost
>>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
>>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
[ 5.0000, 7.5000, 15.0000, 20.0000]]))
>>> # doubling the spacing between samples halves the estimated partial gradients.
>>>
>>> # Estimates only the partial derivative for dimension 1
>>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
(tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]),)
>>> # When spacing is a list of scalars, the relationship between the tensor
>>> # indices and input coordinates changes based on dimension.
>>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
>>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
>>> # 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = [3., 2.])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
>>> # The following example is a replication of the previous one with explicit
>>> # coordinates.
>>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
>>> torch.gradient(t, spacing = coords)
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
"""
...
@overload
def greater(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
greater(input, other, *, out=None) -> Tensor
Alias for :func:`torch.gt`.
"""
...
@overload
def greater(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
greater(input, other, *, out=None) -> Tensor
Alias for :func:`torch.gt`.
"""
...
@overload
def greater_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
greater_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.ge`.
"""
...
@overload
def greater_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
greater_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.ge`.
"""
...
def grid_sampler(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
def grid_sampler_2d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
def grid_sampler_3d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
def group_norm(input: Tensor, num_groups: _int, weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enabled: _bool = True) -> Tensor: ...
@overload
def gru(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
@overload
def gru(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
def gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ...
@overload
def gt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
gt(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} > \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere
Example::
>>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, True], [False, False]])
"""
...
@overload
def gt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
gt(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} > \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere
Example::
>>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, True], [False, False]])
"""
...
@overload
def hamming_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Hamming window function.
.. math::
w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.hamming_window(L, periodic=True)`` equal to
``torch.hamming_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
.. note::
This is a generalized version of :meth:`torch.hann_window`.
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
alpha (float, optional): The coefficient :math:`\alpha` in the equation above
beta (float, optional): The coefficient :math:`\beta` in the equation above
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Returns:
Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window.
"""
...
@overload
def hamming_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Hamming window function.
.. math::
w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.hamming_window(L, periodic=True)`` equal to
``torch.hamming_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
.. note::
This is a generalized version of :meth:`torch.hann_window`.
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
alpha (float, optional): The coefficient :math:`\alpha` in the equation above
beta (float, optional): The coefficient :math:`\beta` in the equation above
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Returns:
Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window.
"""
...
@overload
def hamming_window(window_length: _int, periodic: _bool, alpha: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Hamming window function.
.. math::
w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.hamming_window(L, periodic=True)`` equal to
``torch.hamming_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
.. note::
This is a generalized version of :meth:`torch.hann_window`.
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
alpha (float, optional): The coefficient :math:`\alpha` in the equation above
beta (float, optional): The coefficient :math:`\beta` in the equation above
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Returns:
Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window.
"""
...
@overload
def hamming_window(window_length: _int, periodic: _bool, alpha: _float, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Hamming window function.
.. math::
w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.hamming_window(L, periodic=True)`` equal to
``torch.hamming_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
.. note::
This is a generalized version of :meth:`torch.hann_window`.
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
alpha (float, optional): The coefficient :math:`\alpha` in the equation above
beta (float, optional): The coefficient :math:`\beta` in the equation above
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Returns:
Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window.
"""
...
@overload
def hann_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
hann_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Hann window function.
.. math::
w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
\sin^2 \left( \frac{\pi n}{N - 1} \right),
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.hann_window(L, periodic=True)`` equal to
``torch.hann_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Returns:
Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
"""
...
@overload
def hann_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
hann_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Hann window function.
.. math::
w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
\sin^2 \left( \frac{\pi n}{N - 1} \right),
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.hann_window(L, periodic=True)`` equal to
``torch.hann_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Returns:
Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
"""
...
def hardshrink(input: Tensor, lambd: Union[Number, _complex] = 0.5, *, out: Optional[Tensor] = None) -> Tensor: ...
def heaviside(input: Tensor, values: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
heaviside(input, values, *, out=None) -> Tensor
Computes the Heaviside step function for each element in :attr:`input`.
The Heaviside step function is defined as:
.. math::
\text{{heaviside}}(input, values) = \begin{cases}
0, & \text{if input < 0}\\
values, & \text{if input == 0}\\
1, & \text{if input > 0}
\end{cases}
Args:
input (Tensor): the input tensor.
values (Tensor): The values to use where :attr:`input` is zero.
Keyword arguments:
out (Tensor, optional): the output tensor.
Example::
>>> input = torch.tensor([-1.5, 0, 2.0])
>>> values = torch.tensor([0.5])
>>> torch.heaviside(input, values)
tensor([0.0000, 0.5000, 1.0000])
>>> values = torch.tensor([1.2, -2.0, 3.5])
>>> torch.heaviside(input, values)
tensor([0., -2., 1.])
"""
...
def hinge_embedding_loss(input: Tensor, target: Tensor, margin: _float = 1.0, reduction: _int = 1) -> Tensor: ...
def histc(input: Tensor, bins: _int = 100, min: Union[Number, _complex] = 0, max: Union[Number, _complex] = 0, *, out: Optional[Tensor] = None) -> Tensor:
r"""
histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor
Computes the histogram of a tensor.
The elements are sorted into equal width bins between :attr:`min` and
:attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and
maximum values of the data are used.
Elements lower than min and higher than max and ``NaN`` elements are ignored.
Args:
input (Tensor): the input tensor.
bins (int): number of histogram bins
min (Scalar): lower end of the range (inclusive)
max (Scalar): upper end of the range (inclusive)
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
Tensor: Histogram represented as a tensor
Example::
>>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3)
tensor([ 0., 2., 1., 0.])
"""
...
@overload
def histogram(input: Tensor, bins: Tensor, *, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram:
r"""
histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor)
Computes a histogram of the values in a tensor.
:attr:`bins` can be an integer or a 1D tensor.
If :attr:`bins` is an int, it specifies the number of equal-width bins.
By default, the lower and upper range of the bins is determined by the
minimum and maximum elements of the input tensor. The :attr:`range`
argument can be provided to specify a range for the bins.
If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges
including the rightmost edge. It should contain at least 2 elements
and its elements should be increasing.
Args:
input (Tensor): the input tensor.
bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor,
defines the sequence of bin edges including the rightmost edge.
Keyword args:
range (tuple of float): Defines the range of the bins.
weight (Tensor): If provided, weight should have the same shape as input. Each value in
input contributes its associated weight towards its bin's result.
density (bool): If False, the result will contain the count (or total weight) in each bin.
If True, the result is the value of the probability density function over the bins,
normalized such that the integral over the range of the bins is 1.
out (Tensor, optional): the output tensor. (tuple, optional): The result tuple of two output tensors (hist, bin_edges).
Returns:
hist (Tensor): 1D Tensor containing the values of the histogram.
bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins.
Example::
>>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]))
(tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
>>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True)
(tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
"""
...
@overload
def histogram(input: Tensor, bins: _int = 100, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram:
r"""
histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor)
Computes a histogram of the values in a tensor.
:attr:`bins` can be an integer or a 1D tensor.
If :attr:`bins` is an int, it specifies the number of equal-width bins.
By default, the lower and upper range of the bins is determined by the
minimum and maximum elements of the input tensor. The :attr:`range`
argument can be provided to specify a range for the bins.
If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges
including the rightmost edge. It should contain at least 2 elements
and its elements should be increasing.
Args:
input (Tensor): the input tensor.
bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor,
defines the sequence of bin edges including the rightmost edge.
Keyword args:
range (tuple of float): Defines the range of the bins.
weight (Tensor): If provided, weight should have the same shape as input. Each value in
input contributes its associated weight towards its bin's result.
density (bool): If False, the result will contain the count (or total weight) in each bin.
If True, the result is the value of the probability density function over the bins,
normalized such that the integral over the range of the bins is 1.
out (Tensor, optional): the output tensor. (tuple, optional): The result tuple of two output tensors (hist, bin_edges).
Returns:
hist (Tensor): 1D Tensor containing the values of the histogram.
bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins.
Example::
>>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]))
(tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
>>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True)
(tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
"""
...
@overload
def histogramdd(input: Tensor, bins: _int, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd:
r"""
histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
Computes a multi-dimensional histogram of the values in a tensor.
Interprets the elements of an input tensor whose innermost dimension has size N
as a collection of N-dimensional points. Maps each of the points into a set of
N-dimensional bins and returns the number of points (or total weight) in each bin.
:attr:`input` must be a tensor with at least 2 dimensions.
If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
If input has three or more dimensions, all but the last dimension are flattened.
Each dimension is independently associated with its own strictly increasing sequence
of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
tensors. Alternatively, bin edges may be constructed automatically by passing a
sequence of integers specifying the number of equal-width bins in each dimension.
For each N-dimensional point in input:
- Each of its coordinates is binned independently among the bin edges
corresponding to its dimension
- Binning results are combined to identify the N-dimensional bin (if any)
into which the point falls
- If the point falls into a bin, the bin's count (or total weight) is incremented
- Points which do not fall into any bin do not contribute to the output
:attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
the left and right edges of all bins. Every bin is exclusive of its left edge. Only
the rightmost bin is inclusive of its right edge.
If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
in each dimension. By default, the leftmost and rightmost bin edges in each dimension
are determined by the minimum and maximum elements of the input tensor in the
corresponding dimension. The :attr:`range` argument can be provided to manually
specify the leftmost and rightmost bin edges in each dimension.
If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
.. note::
See also :func:`torch.histogram`, which specifically computes 1D histograms.
While :func:`torch.histogramdd` infers the dimensionality of its bins and
binned values from the shape of :attr:`input`, :func:`torch.histogram`
accepts and flattens :attr:`input` of any shape.
Args:
input (Tensor): the input tensor.
bins: Tensor[], int[], or int.
If Tensor[], defines the sequences of bin edges.
If int[], defines the number of equal-width bins in each dimension.
If int, defines the number of equal-width bins for all dimensions.
Keyword args:
range (sequence of float): Defines the leftmost and rightmost bin edges
in each dimension.
weight (Tensor): By default, each value in the input has weight 1. If a weight
tensor is passed, each N-dimensional coordinate in input
contributes its associated weight towards its bin's result.
The weight tensor should have the same shape as the :attr:`input`
tensor excluding its innermost dimension N.
density (bool): If False (default), the result will contain the count (or total weight)
in each bin. If True, each count (weight) is divided by the total count
(total weight), then divided by the volume of its associated bin.
Returns:
hist (Tensor): N-dimensional Tensor containing the values of the histogram.
bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
Example::
>>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
... weight=torch.tensor([1., 2., 4., 8.]))
torch.return_types.histogramdd(
hist=tensor([[0., 1., 0.],
[2., 0., 0.],
[4., 0., 8.]]),
bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
tensor([0.0000, 0.6667, 1.3333, 2.0000])))
>>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
... range=[0., 1., 0., 1.], density=True)
torch.return_types.histogramdd(
hist=tensor([[2., 0.],
[0., 2.]]),
bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
tensor([0.0000, 0.5000, 1.0000])))
"""
...
@overload
def histogramdd(input: Tensor, bins: _size, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd:
r"""
histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
Computes a multi-dimensional histogram of the values in a tensor.
Interprets the elements of an input tensor whose innermost dimension has size N
as a collection of N-dimensional points. Maps each of the points into a set of
N-dimensional bins and returns the number of points (or total weight) in each bin.
:attr:`input` must be a tensor with at least 2 dimensions.
If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
If input has three or more dimensions, all but the last dimension are flattened.
Each dimension is independently associated with its own strictly increasing sequence
of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
tensors. Alternatively, bin edges may be constructed automatically by passing a
sequence of integers specifying the number of equal-width bins in each dimension.
For each N-dimensional point in input:
- Each of its coordinates is binned independently among the bin edges
corresponding to its dimension
- Binning results are combined to identify the N-dimensional bin (if any)
into which the point falls
- If the point falls into a bin, the bin's count (or total weight) is incremented
- Points which do not fall into any bin do not contribute to the output
:attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
the left and right edges of all bins. Every bin is exclusive of its left edge. Only
the rightmost bin is inclusive of its right edge.
If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
in each dimension. By default, the leftmost and rightmost bin edges in each dimension
are determined by the minimum and maximum elements of the input tensor in the
corresponding dimension. The :attr:`range` argument can be provided to manually
specify the leftmost and rightmost bin edges in each dimension.
If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
.. note::
See also :func:`torch.histogram`, which specifically computes 1D histograms.
While :func:`torch.histogramdd` infers the dimensionality of its bins and
binned values from the shape of :attr:`input`, :func:`torch.histogram`
accepts and flattens :attr:`input` of any shape.
Args:
input (Tensor): the input tensor.
bins: Tensor[], int[], or int.
If Tensor[], defines the sequences of bin edges.
If int[], defines the number of equal-width bins in each dimension.
If int, defines the number of equal-width bins for all dimensions.
Keyword args:
range (sequence of float): Defines the leftmost and rightmost bin edges
in each dimension.
weight (Tensor): By default, each value in the input has weight 1. If a weight
tensor is passed, each N-dimensional coordinate in input
contributes its associated weight towards its bin's result.
The weight tensor should have the same shape as the :attr:`input`
tensor excluding its innermost dimension N.
density (bool): If False (default), the result will contain the count (or total weight)
in each bin. If True, each count (weight) is divided by the total count
(total weight), then divided by the volume of its associated bin.
Returns:
hist (Tensor): N-dimensional Tensor containing the values of the histogram.
bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
Example::
>>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
... weight=torch.tensor([1., 2., 4., 8.]))
torch.return_types.histogramdd(
hist=tensor([[0., 1., 0.],
[2., 0., 0.],
[4., 0., 8.]]),
bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
tensor([0.0000, 0.6667, 1.3333, 2.0000])))
>>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
... range=[0., 1., 0., 1.], density=True)
torch.return_types.histogramdd(
hist=tensor([[2., 0.],
[0., 2.]]),
bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
tensor([0.0000, 0.5000, 1.0000])))
"""
...
@overload
def histogramdd(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd:
r"""
histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
Computes a multi-dimensional histogram of the values in a tensor.
Interprets the elements of an input tensor whose innermost dimension has size N
as a collection of N-dimensional points. Maps each of the points into a set of
N-dimensional bins and returns the number of points (or total weight) in each bin.
:attr:`input` must be a tensor with at least 2 dimensions.
If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
If input has three or more dimensions, all but the last dimension are flattened.
Each dimension is independently associated with its own strictly increasing sequence
of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
tensors. Alternatively, bin edges may be constructed automatically by passing a
sequence of integers specifying the number of equal-width bins in each dimension.
For each N-dimensional point in input:
- Each of its coordinates is binned independently among the bin edges
corresponding to its dimension
- Binning results are combined to identify the N-dimensional bin (if any)
into which the point falls
- If the point falls into a bin, the bin's count (or total weight) is incremented
- Points which do not fall into any bin do not contribute to the output
:attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
the left and right edges of all bins. Every bin is exclusive of its left edge. Only
the rightmost bin is inclusive of its right edge.
If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
in each dimension. By default, the leftmost and rightmost bin edges in each dimension
are determined by the minimum and maximum elements of the input tensor in the
corresponding dimension. The :attr:`range` argument can be provided to manually
specify the leftmost and rightmost bin edges in each dimension.
If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
.. note::
See also :func:`torch.histogram`, which specifically computes 1D histograms.
While :func:`torch.histogramdd` infers the dimensionality of its bins and
binned values from the shape of :attr:`input`, :func:`torch.histogram`
accepts and flattens :attr:`input` of any shape.
Args:
input (Tensor): the input tensor.
bins: Tensor[], int[], or int.
If Tensor[], defines the sequences of bin edges.
If int[], defines the number of equal-width bins in each dimension.
If int, defines the number of equal-width bins for all dimensions.
Keyword args:
range (sequence of float): Defines the leftmost and rightmost bin edges
in each dimension.
weight (Tensor): By default, each value in the input has weight 1. If a weight
tensor is passed, each N-dimensional coordinate in input
contributes its associated weight towards its bin's result.
The weight tensor should have the same shape as the :attr:`input`
tensor excluding its innermost dimension N.
density (bool): If False (default), the result will contain the count (or total weight)
in each bin. If True, each count (weight) is divided by the total count
(total weight), then divided by the volume of its associated bin.
Returns:
hist (Tensor): N-dimensional Tensor containing the values of the histogram.
bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
Example::
>>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
... weight=torch.tensor([1., 2., 4., 8.]))
torch.return_types.histogramdd(
hist=tensor([[0., 1., 0.],
[2., 0., 0.],
[4., 0., 8.]]),
bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
tensor([0.0000, 0.6667, 1.3333, 2.0000])))
>>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
... range=[0., 1., 0., 1.], density=True)
torch.return_types.histogramdd(
hist=tensor([[2., 0.],
[0., 2.]]),
bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
tensor([0.0000, 0.5000, 1.0000])))
"""
...
def hsmm(input: Tensor, mat2: Tensor) -> Tensor: ...
@overload
def hsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]:
r"""
hsplit(input, indices_or_sections) -> List of Tensors
Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors
horizontally according to :attr:`indices_or_sections`. Each split is a view of
:attr:`input`.
If :attr:`input` is one dimensional this is equivalent to calling
torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is
zero), and if :attr:`input` has two or more dimensions it's equivalent to calling
torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1),
except that if :attr:`indices_or_sections` is an integer it must evenly divide
the split dimension or a runtime error will be thrown.
This function is based on NumPy's :func:`numpy.hsplit`.
Args:
input (Tensor): tensor to split.
indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
Example::
>>> t = torch.arange(16.0).reshape(4,4)
>>> t
tensor([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> torch.hsplit(t, 2)
(tensor([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
tensor([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]]))
>>> torch.hsplit(t, [3, 6])
(tensor([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
tensor([[ 3.],
[ 7.],
[11.],
[15.]]),
tensor([], size=(4, 0)))
"""
...
@overload
def hsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]:
r"""
hsplit(input, indices_or_sections) -> List of Tensors
Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors
horizontally according to :attr:`indices_or_sections`. Each split is a view of
:attr:`input`.
If :attr:`input` is one dimensional this is equivalent to calling
torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is
zero), and if :attr:`input` has two or more dimensions it's equivalent to calling
torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1),
except that if :attr:`indices_or_sections` is an integer it must evenly divide
the split dimension or a runtime error will be thrown.
This function is based on NumPy's :func:`numpy.hsplit`.
Args:
input (Tensor): tensor to split.
indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
Example::
>>> t = torch.arange(16.0).reshape(4,4)
>>> t
tensor([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> torch.hsplit(t, 2)
(tensor([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
tensor([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]]))
>>> torch.hsplit(t, [3, 6])
(tensor([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
tensor([[ 3.],
[ 7.],
[11.],
[15.]]),
tensor([], size=(4, 0)))
"""
...
def hspmm(mat1: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
hspmm(mat1, mat2, *, out=None) -> Tensor
Performs a matrix multiplication of a :ref:`sparse COO matrix
<sparse-coo-docs>` :attr:`mat1` and a strided matrix :attr:`mat2`. The
result is a (1 + 1)-dimensional :ref:`hybrid COO matrix
<sparse-hybrid-coo-docs>`.
Args:
mat1 (Tensor): the first sparse matrix to be matrix multiplied
mat2 (Tensor): the second strided matrix to be matrix multiplied
Keyword args:
out (Tensor, optional): the output tensor.
"""
...
def hstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
r"""
hstack(tensors, *, out=None) -> Tensor
Stack tensors in sequence horizontally (column wise).
This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors.
Args:
tensors (sequence of Tensors): sequence of tensors to concatenate
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([1, 2, 3])
>>> b = torch.tensor([4, 5, 6])
>>> torch.hstack((a,b))
tensor([1, 2, 3, 4, 5, 6])
>>> a = torch.tensor([[1],[2],[3]])
>>> b = torch.tensor([[4],[5],[6]])
>>> torch.hstack((a,b))
tensor([[1, 4],
[2, 5],
[3, 6]])
"""
...
def hypot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
hypot(input, other, *, out=None) -> Tensor
Given the legs of a right triangle, return its hypotenuse.
.. math::
\text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}}
The shapes of ``input`` and ``other`` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the first input tensor
other (Tensor): the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0]))
tensor([5.0000, 5.6569, 6.4031])
"""
...
def i0(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
i0(input, *, out=None) -> Tensor
Alias for :func:`torch.special.i0`.
"""
...
def i0_(input: Tensor) -> Tensor: ...
def igamma(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
igamma(input, other, *, out=None) -> Tensor
Alias for :func:`torch.special.gammainc`.
"""
...
def igammac(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
igammac(input, other, *, out=None) -> Tensor
Alias for :func:`torch.special.gammaincc`.
"""
...
def imag(input: Tensor) -> Tensor:
r"""
imag(input) -> Tensor
Returns a new tensor containing imaginary values of the :attr:`self` tensor.
The returned tensor and :attr:`self` share the same underlying storage.
.. warning::
:func:`imag` is only supported for tensors with complex dtypes.
Args:
input (Tensor): the input tensor.
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
>>> x.imag
tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
"""
...
@overload
def index_add(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
r"""
index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
See :meth:`~Tensor.index_add_` for function description.
"""
...
@overload
def index_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor:
r"""
index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
See :meth:`~Tensor.index_add_` for function description.
"""
...
@overload
def index_copy(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
index_copy(input, dim, index, source, *, out=None) -> Tensor
See :meth:`~Tensor.index_add_` for function description.
"""
...
@overload
def index_copy(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor:
r"""
index_copy(input, dim, index, source, *, out=None) -> Tensor
See :meth:`~Tensor.index_add_` for function description.
"""
...
@overload
def index_fill(input: Tensor, dim: _int, index: Tensor, value: Tensor) -> Tensor: ...
@overload
def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ...
@overload
def index_fill(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: ...
@overload
def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: ...
def index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ...
def index_put_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ...
def index_reduce(input: Tensor, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor:
r"""
index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor
See :meth:`~Tensor.index_reduce_` for function description.
"""
...
@overload
def index_select(input: Tensor, dim: _int, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
index_select(input, dim, index, *, out=None) -> Tensor
Returns a new tensor which indexes the :attr:`input` tensor along dimension
:attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
The returned tensor has the same number of dimensions as the original tensor
(:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
of :attr:`index`; other dimensions have the same size as in the original tensor.
.. note:: The returned tensor does **not** use the same storage as the original
tensor. If :attr:`out` has a different shape than expected, we
silently change it to the correct shape, reallocating the underlying
storage if necessary.
Args:
input (Tensor): the input tensor.
dim (int): the dimension in which we index
index (IntTensor or LongTensor): the 1-D tensor containing the indices to index
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> x = torch.randn(3, 4)
>>> x
tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
[-0.4664, 0.2647, -0.1228, -1.1068],
[-1.1734, -0.6571, 0.7230, -0.6004]])
>>> indices = torch.tensor([0, 2])
>>> torch.index_select(x, 0, indices)
tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
[-1.1734, -0.6571, 0.7230, -0.6004]])
>>> torch.index_select(x, 1, indices)
tensor([[ 0.1427, -0.5414],
[-0.4664, -0.1228],
[-1.1734, 0.7230]])
"""
...
@overload
def index_select(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
index_select(input, dim, index, *, out=None) -> Tensor
Returns a new tensor which indexes the :attr:`input` tensor along dimension
:attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
The returned tensor has the same number of dimensions as the original tensor
(:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
of :attr:`index`; other dimensions have the same size as in the original tensor.
.. note:: The returned tensor does **not** use the same storage as the original
tensor. If :attr:`out` has a different shape than expected, we
silently change it to the correct shape, reallocating the underlying
storage if necessary.
Args:
input (Tensor): the input tensor.
dim (int): the dimension in which we index
index (IntTensor or LongTensor): the 1-D tensor containing the indices to index
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> x = torch.randn(3, 4)
>>> x
tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
[-0.4664, 0.2647, -0.1228, -1.1068],
[-1.1734, -0.6571, 0.7230, -0.6004]])
>>> indices = torch.tensor([0, 2])
>>> torch.index_select(x, 0, indices)
tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
[-1.1734, -0.6571, 0.7230, -0.6004]])
>>> torch.index_select(x, 1, indices)
tensor([[ 0.1427, -0.5414],
[-0.4664, -0.1228],
[-1.1734, 0.7230]])
"""
...
def indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.indices`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def init_num_threads() -> None: ...
def inner(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
inner(input, other, *, out=None) -> Tensor
Computes the dot product for 1D tensors. For higher dimensions, sums the product
of elements from :attr:`input` and :attr:`other` along their last dimension.
.. note::
If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent
to `torch.mul(input, other)`.
If both :attr:`input` and :attr:`other` are non-scalars, the size of their last
dimension must match and the result is equivalent to `torch.tensordot(input,
other, dims=([-1], [-1]))`
Args:
input (Tensor): First input tensor
other (Tensor): Second input tensor
Keyword args:
out (Tensor, optional): Optional output tensor to write result into. The output
shape is `input.shape[:-1] + other.shape[:-1]`.
Example::
# Dot product
>>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1]))
tensor(7)
# Multidimensional input tensors
>>> a = torch.randn(2, 3)
>>> a
tensor([[0.8173, 1.0874, 1.1784],
[0.3279, 0.1234, 2.7894]])
>>> b = torch.randn(2, 4, 3)
>>> b
tensor([[[-0.4682, -0.7159, 0.1506],
[ 0.4034, -0.3657, 1.0387],
[ 0.9892, -0.6684, 0.1774],
[ 0.9482, 1.3261, 0.3917]],
[[ 0.4537, 0.7493, 1.1724],
[ 0.2291, 0.5749, -0.2267],
[-0.7920, 0.3607, -0.3701],
[ 1.3666, -0.5850, -1.7242]]])
>>> torch.inner(a, b)
tensor([[[-0.9837, 1.1560, 0.2907, 2.6785],
[ 2.5671, 0.5452, -0.6912, -1.5509]],
[[ 0.1782, 2.9843, 0.7366, 1.5672],
[ 3.5115, -0.4864, -1.2476, -4.4337]]])
# Scalar input
>>> torch.inner(a, torch.tensor(2))
tensor([[1.6347, 2.1748, 2.3567],
[0.6558, 0.2469, 5.5787]])
"""
...
def instance_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], use_input_stats: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
def int_repr(input: Tensor) -> Tensor: ...
def inverse(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
inverse(input, *, out=None) -> Tensor
Alias for :func:`torch.linalg.inv`
"""
...
def is_complex(input: Tensor) -> _bool:
r"""
is_complex(input) -> (bool)
Returns True if the data type of :attr:`input` is a complex data type i.e.,
one of ``torch.complex64``, and ``torch.complex128``.
Args:
input (Tensor): the input tensor.
"""
...
def is_conj(input: Tensor) -> _bool:
r"""
is_conj(input) -> (bool)
Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`.
Args:
input (Tensor): the input tensor.
"""
...
def is_distributed(input: Tensor) -> _bool: ...
def is_floating_point(input: Tensor) -> _bool:
r"""
is_floating_point(input) -> (bool)
Returns True if the data type of :attr:`input` is a floating point data type i.e.,
one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``.
Args:
input (Tensor): the input tensor.
"""
...
def is_grad_enabled() -> _bool:
r"""
is_grad_enabled() -> (bool)
Returns True if grad mode is currently enabled.
"""
...
def is_inference(input: Tensor) -> _bool:
r"""
is_inference(input) -> (bool)
Returns True if :attr:`input` is an inference tensor.
A non-view tensor is an inference tensor if and only if it was
allocated during inference mode. A view tensor is an inference
tensor if and only if the tensor it is a view of is an inference tensor.
For details on inference mode please see
`Inference Mode <https://pytorch.org/cppdocs/notes/inference_mode.html>`_.
Args:
input (Tensor): the input tensor.
"""
...
def is_inference_mode_enabled() -> _bool:
r"""
is_inference_mode_enabled() -> (bool)
Returns True if inference mode is currently enabled.
"""
...
def is_neg(input: Tensor) -> _bool: ...
def is_nonzero(input: Tensor) -> _bool:
r"""
is_nonzero(input) -> (bool)
Returns True if the :attr:`input` is a single element tensor which is not equal to zero
after type conversions.
i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or
``torch.tensor([False])``.
Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case
of sparse tensors).
Args:
input (Tensor): the input tensor.
Examples::
>>> torch.is_nonzero(torch.tensor([0.]))
False
>>> torch.is_nonzero(torch.tensor([1.5]))
True
>>> torch.is_nonzero(torch.tensor([False]))
False
>>> torch.is_nonzero(torch.tensor([3]))
True
>>> torch.is_nonzero(torch.tensor([1, 3, 5]))
Traceback (most recent call last):
...
RuntimeError: bool value of Tensor with more than one value is ambiguous
>>> torch.is_nonzero(torch.tensor([]))
Traceback (most recent call last):
...
RuntimeError: bool value of Tensor with no values is ambiguous
"""
...
def is_same_size(input: Tensor, other: Tensor) -> _bool: ...
def is_signed(input: Tensor) -> _bool: ...
def is_vulkan_available() -> _bool: ...
def isclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> Tensor:
r"""
isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
Returns a new tensor with boolean elements representing if each element of
:attr:`input` is "close" to the corresponding element of :attr:`other`.
Closeness is defined as:
.. math::
\lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
where :attr:`input` and :attr:`other` are finite. Where :attr:`input`
and/or :attr:`other` are nonfinite they are close if and only if
they are equal, with NaNs being considered equal to each other when
:attr:`equal_nan` is True.
Args:
input (Tensor): first tensor to compare
other (Tensor): second tensor to compare
atol (float, optional): absolute tolerance. Default: 1e-08
rtol (float, optional): relative tolerance. Default: 1e-05
equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
Examples::
>>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4)))
tensor([ True, False, False])
>>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5)
tensor([True, True])
"""
...
def isfinite(input: Tensor) -> Tensor:
r"""
isfinite(input) -> Tensor
Returns a new tensor with boolean elements representing if each element is `finite` or not.
Real values are finite when they are not NaN, negative infinity, or infinity.
Complex values are finite when both their real and imaginary parts are finite.
Args:
input (Tensor): the input tensor.
Returns:
A boolean tensor that is True where :attr:`input` is finite and False elsewhere
Example::
>>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
tensor([True, False, True, False, False])
"""
...
@overload
def isin(elements: Tensor, test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor:
r"""
isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
a boolean tensor of the same shape as :attr:`elements` that is True for elements
in :attr:`test_elements` and False otherwise.
.. note::
One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
Args:
elements (Tensor or Scalar): Input elements
test_elements (Tensor or Scalar): Values against which to test for each input element
assume_unique (bool, optional): If True, assumes both :attr:`elements` and
:attr:`test_elements` contain unique elements, which can speed up the
calculation. Default: False
invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
values for elements *not* in :attr:`test_elements`. Default: False
Returns:
A boolean tensor of the same shape as :attr:`elements` that is True for elements in
:attr:`test_elements` and False otherwise
Example:
>>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
tensor([[False, True],
[ True, False]])
"""
...
@overload
def isin(element: Union[Number, _complex], test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor:
r"""
isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
a boolean tensor of the same shape as :attr:`elements` that is True for elements
in :attr:`test_elements` and False otherwise.
.. note::
One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
Args:
elements (Tensor or Scalar): Input elements
test_elements (Tensor or Scalar): Values against which to test for each input element
assume_unique (bool, optional): If True, assumes both :attr:`elements` and
:attr:`test_elements` contain unique elements, which can speed up the
calculation. Default: False
invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
values for elements *not* in :attr:`test_elements`. Default: False
Returns:
A boolean tensor of the same shape as :attr:`elements` that is True for elements in
:attr:`test_elements` and False otherwise
Example:
>>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
tensor([[False, True],
[ True, False]])
"""
...
@overload
def isin(elements: Tensor, test_element: Union[Number, _complex], *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor:
r"""
isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
a boolean tensor of the same shape as :attr:`elements` that is True for elements
in :attr:`test_elements` and False otherwise.
.. note::
One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
Args:
elements (Tensor or Scalar): Input elements
test_elements (Tensor or Scalar): Values against which to test for each input element
assume_unique (bool, optional): If True, assumes both :attr:`elements` and
:attr:`test_elements` contain unique elements, which can speed up the
calculation. Default: False
invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
values for elements *not* in :attr:`test_elements`. Default: False
Returns:
A boolean tensor of the same shape as :attr:`elements` that is True for elements in
:attr:`test_elements` and False otherwise
Example:
>>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
tensor([[False, True],
[ True, False]])
"""
...
def isinf(input: Tensor) -> Tensor:
r"""
isinf(input) -> Tensor
Tests if each element of :attr:`input` is infinite
(positive or negative infinity) or not.
.. note::
Complex values are infinite when their real or imaginary part is
infinite.
Args:
input (Tensor): the input tensor.
Returns:
A boolean tensor that is True where :attr:`input` is infinite and False elsewhere
Example::
>>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
tensor([False, True, False, True, False])
"""
...
def isnan(input: Tensor) -> Tensor:
r"""
isnan(input) -> Tensor
Returns a new tensor with boolean elements representing if each element of :attr:`input`
is NaN or not. Complex values are considered NaN when either their real
and/or imaginary part is NaN.
Arguments:
input (Tensor): the input tensor.
Returns:
A boolean tensor that is True where :attr:`input` is NaN and False elsewhere
Example::
>>> torch.isnan(torch.tensor([1, float('nan'), 2]))
tensor([False, True, False])
"""
...
def isneginf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
isneginf(input, *, out=None) -> Tensor
Tests if each element of :attr:`input` is negative infinity or not.
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
>>> torch.isneginf(a)
tensor([ True, False, False])
"""
...
def isposinf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
isposinf(input, *, out=None) -> Tensor
Tests if each element of :attr:`input` is positive infinity or not.
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
>>> torch.isposinf(a)
tensor([False, True, False])
"""
...
def isreal(input: Tensor) -> Tensor:
r"""
isreal(input) -> Tensor
Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not.
All real-valued types are considered real. Complex values are considered real when their imaginary part is 0.
Arguments:
input (Tensor): the input tensor.
Returns:
A boolean tensor that is True where :attr:`input` is real and False elsewhere
Example::
>>> torch.isreal(torch.tensor([1, 1+1j, 2+0j]))
tensor([True, False, True])
"""
...
def istft(input: Tensor, n_fft: _int, hop_length: Optional[_int] = None, win_length: Optional[_int] = None, window: Optional[Tensor] = None, center: _bool = True, normalized: _bool = False, onesided: Optional[_bool] = None, length: Optional[_int] = None, return_complex: _bool = False) -> Tensor: ...
@overload
def kaiser_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
where ``L`` is the :attr:`window_length`. This function computes:
.. math::
out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
The :attr:`periodic` argument is intended as a helpful shorthand
to produce a periodic window as input to functions like :func:`torch.stft`.
.. note::
If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
Args:
window_length (int): length of the window.
periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
If False, returns a symmetric window suitable for use in filter design.
beta (float, optional): shape parameter for the window.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
"""
...
@overload
def kaiser_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
where ``L`` is the :attr:`window_length`. This function computes:
.. math::
out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
The :attr:`periodic` argument is intended as a helpful shorthand
to produce a periodic window as input to functions like :func:`torch.stft`.
.. note::
If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
Args:
window_length (int): length of the window.
periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
If False, returns a symmetric window suitable for use in filter design.
beta (float, optional): shape parameter for the window.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
"""
...
@overload
def kaiser_window(window_length: _int, periodic: _bool, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
where ``L`` is the :attr:`window_length`. This function computes:
.. math::
out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
The :attr:`periodic` argument is intended as a helpful shorthand
to produce a periodic window as input to functions like :func:`torch.stft`.
.. note::
If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
Args:
window_length (int): length of the window.
periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
If False, returns a symmetric window suitable for use in filter design.
beta (float, optional): shape parameter for the window.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
"""
...
def kl_div(input: Tensor, target: Tensor, reduction: _int = 1, *, log_target: _bool = False) -> Tensor: ...
def kron(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
kron(input, other, *, out=None) -> Tensor
Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`.
If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a
:math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a
:math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries:
.. math::
(\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} =
\text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n},
where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`.
If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions.
Supports real-valued and complex-valued inputs.
.. note::
This function generalizes the typical definition of the Kronecker product for two matrices to two tensors,
as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a
:math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix:
.. math::
\mathbf{A} \otimes \mathbf{B}=\begin{bmatrix}
a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\
\vdots & \ddots & \vdots \\
a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix}
where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`.
Arguments:
input (Tensor)
other (Tensor)
Keyword args:
out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None``
Examples::
>>> mat1 = torch.eye(2)
>>> mat2 = torch.ones(2, 2)
>>> torch.kron(mat1, mat2)
tensor([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 1., 1.]])
>>> mat1 = torch.eye(2)
>>> mat2 = torch.arange(1, 5).reshape(2, 2)
>>> torch.kron(mat1, mat2)
tensor([[1., 2., 0., 0.],
[3., 4., 0., 0.],
[0., 0., 1., 2.],
[0., 0., 3., 4.]])
"""
...
@overload
def kthvalue(input: Tensor, k: _int, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue:
r"""
kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
smallest element of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each element found.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
are the same size as :attr:`input`, except in the dimension :attr:`dim` where
they are of size 1. Otherwise, :attr:`dim` is squeezed
(see :func:`torch.squeeze`), resulting in both the :attr:`values` and
:attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
.. note::
When :attr:`input` is a CUDA tensor and there are multiple valid
:attr:`k` th values, this function may nondeterministically return
:attr:`indices` for any of them.
Args:
input (Tensor): the input tensor.
k (int): k for the k-th smallest element
dim (int, optional): the dimension to find the kth value along
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (tuple, optional): the output tuple of (Tensor, LongTensor)
can be optionally given to be used as output buffers
Example::
>>> x = torch.arange(1., 6.)
>>> x
tensor([ 1., 2., 3., 4., 5.])
>>> torch.kthvalue(x, 4)
torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
>>> x=torch.arange(1.,7.).resize_(2,3)
>>> x
tensor([[ 1., 2., 3.],
[ 4., 5., 6.]])
>>> torch.kthvalue(x, 2, 0, True)
torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
"""
...
@overload
def kthvalue(input: Tensor, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue:
r"""
kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
smallest element of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each element found.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
are the same size as :attr:`input`, except in the dimension :attr:`dim` where
they are of size 1. Otherwise, :attr:`dim` is squeezed
(see :func:`torch.squeeze`), resulting in both the :attr:`values` and
:attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
.. note::
When :attr:`input` is a CUDA tensor and there are multiple valid
:attr:`k` th values, this function may nondeterministically return
:attr:`indices` for any of them.
Args:
input (Tensor): the input tensor.
k (int): k for the k-th smallest element
dim (int, optional): the dimension to find the kth value along
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (tuple, optional): the output tuple of (Tensor, LongTensor)
can be optionally given to be used as output buffers
Example::
>>> x = torch.arange(1., 6.)
>>> x
tensor([ 1., 2., 3., 4., 5.])
>>> torch.kthvalue(x, 4)
torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
>>> x=torch.arange(1.,7.).resize_(2,3)
>>> x
tensor([[ 1., 2., 3.],
[ 4., 5., 6.]])
>>> torch.kthvalue(x, 2, 0, True)
torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
"""
...
def layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enable: _bool = True) -> Tensor: ...
def lcm(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
lcm(input, other, *, out=None) -> Tensor
Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`.
Both :attr:`input` and :attr:`other` must have integer types.
.. note::
This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`.
Args:
input (Tensor): the input tensor.
other (Tensor): the second input tensor
Keyword arguments:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([5, 10, 15])
>>> b = torch.tensor([3, 4, 5])
>>> torch.lcm(a, b)
tensor([15, 20, 15])
>>> c = torch.tensor([3])
>>> torch.lcm(a, c)
tensor([15, 30, 15])
"""
...
def lcm_(input: Tensor, other: Tensor) -> Tensor: ...
def ldexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
ldexp(input, other, *, out=None) -> Tensor
Multiplies :attr:`input` by 2 ** :attr:`other`.
.. math::
\text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i
Typically this function is used to construct floating point numbers by multiplying
mantissas in :attr:`input` with integral powers of two created from the exponents
in :attr:`other`.
Args:
input (Tensor): the input tensor.
other (Tensor): a tensor of exponents, typically integers.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
tensor([2.])
>>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))
tensor([ 2., 4., 8., 16.])
"""
...
def ldexp_(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def le(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
le(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} \leq \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or Scalar): the tensor or value to compare
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
A boolean tensor that is True where :attr:`input` is less than or equal to
:attr:`other` and False elsewhere
Example::
>>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[True, False], [True, True]])
"""
...
@overload
def le(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
le(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} \leq \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or Scalar): the tensor or value to compare
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
A boolean tensor that is True where :attr:`input` is less than or equal to
:attr:`other` and False elsewhere
Example::
>>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[True, False], [True, True]])
"""
...
@overload
def lerp(input: Tensor, end: Tensor, weight: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
lerp(input, end, weight, *, out=None)
Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
.. math::
\text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
The shapes of :attr:`start` and :attr:`end` must be
:ref:`broadcastable <broadcasting-semantics>`. If :attr:`weight` is a tensor, then
the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the tensor with the starting points
end (Tensor): the tensor with the ending points
weight (float or tensor): the weight for the interpolation formula
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> start = torch.arange(1., 5.)
>>> end = torch.empty(4).fill_(10)
>>> start
tensor([ 1., 2., 3., 4.])
>>> end
tensor([ 10., 10., 10., 10.])
>>> torch.lerp(start, end, 0.5)
tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
>>> torch.lerp(start, end, torch.full_like(start, 0.5))
tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
"""
...
@overload
def lerp(input: Tensor, end: Tensor, weight: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
lerp(input, end, weight, *, out=None)
Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
.. math::
\text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
The shapes of :attr:`start` and :attr:`end` must be
:ref:`broadcastable <broadcasting-semantics>`. If :attr:`weight` is a tensor, then
the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the tensor with the starting points
end (Tensor): the tensor with the ending points
weight (float or tensor): the weight for the interpolation formula
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> start = torch.arange(1., 5.)
>>> end = torch.empty(4).fill_(10)
>>> start
tensor([ 1., 2., 3., 4.])
>>> end
tensor([ 10., 10., 10., 10.])
>>> torch.lerp(start, end, 0.5)
tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
>>> torch.lerp(start, end, torch.full_like(start, 0.5))
tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
"""
...
@overload
def less(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
less(input, other, *, out=None) -> Tensor
Alias for :func:`torch.lt`.
"""
...
@overload
def less(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
less(input, other, *, out=None) -> Tensor
Alias for :func:`torch.lt`.
"""
...
@overload
def less_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
less_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.le`.
"""
...
@overload
def less_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
less_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.le`.
"""
...
def lgamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
lgamma(input, *, out=None) -> Tensor
Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`.
.. math::
\text{out}_{i} = \ln |\Gamma(\text{input}_{i})|
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.arange(0.5, 2, 0.5)
>>> torch.lgamma(a)
tensor([ 0.5724, 0.0000, -0.1208])
"""
...
@overload
def linspace(start: Number, end: Number, steps: Optional[_int] = None, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
r"""
linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
.. math::
(\text{start},
\text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
\ldots,
\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
\text{end})
From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
Args:
start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
steps (int): size of the constructed tensor
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.linspace(3, 10, steps=5)
tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
>>> torch.linspace(-10, 10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=1)
tensor([-10.])
"""
...
@overload
def linspace(start: Tensor, end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
.. math::
(\text{start},
\text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
\ldots,
\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
\text{end})
From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
Args:
start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
steps (int): size of the constructed tensor
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.linspace(3, 10, steps=5)
tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
>>> torch.linspace(-10, 10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=1)
tensor([-10.])
"""
...
@overload
def linspace(start: Union[Number, _complex], end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
.. math::
(\text{start},
\text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
\ldots,
\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
\text{end})
From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
Args:
start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
steps (int): size of the constructed tensor
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.linspace(3, 10, steps=5)
tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
>>> torch.linspace(-10, 10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=1)
tensor([-10.])
"""
...
@overload
def linspace(start: Tensor, end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
.. math::
(\text{start},
\text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
\ldots,
\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
\text{end})
From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
Args:
start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
steps (int): size of the constructed tensor
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.linspace(3, 10, steps=5)
tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
>>> torch.linspace(-10, 10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=1)
tensor([-10.])
"""
...
@overload
def linspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
.. math::
(\text{start},
\text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
\ldots,
\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
\text{end})
From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
Args:
start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
steps (int): size of the constructed tensor
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.linspace(3, 10, steps=5)
tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
>>> torch.linspace(-10, 10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=1)
tensor([-10.])
"""
...
def log(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
log(input, *, out=None) -> Tensor
Returns a new tensor with the natural logarithm of the elements
of :attr:`input`.
.. math::
y_{i} = \log_{e} (x_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.rand(5) * 5
>>> a
tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739])
>>> torch.log(a)
tensor([ 1.5637, 1.4640, 0.1952, -1.4226, 1.5204])
"""
...
def log10(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
log10(input, *, out=None) -> Tensor
Returns a new tensor with the logarithm to the base 10 of the elements
of :attr:`input`.
.. math::
y_{i} = \log_{10} (x_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.rand(5)
>>> a
tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251])
>>> torch.log10(a)
tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476])
"""
...
def log10_(input: Tensor) -> Tensor: ...
def log1p(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
log1p(input, *, out=None) -> Tensor
Returns a new tensor with the natural logarithm of (1 + :attr:`input`).
.. math::
y_i = \log_{e} (x_i + 1)
.. note:: This function is more accurate than :func:`torch.log` for small
values of :attr:`input`
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(5)
>>> a
tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492])
>>> torch.log1p(a)
tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225])
"""
...
def log1p_(input: Tensor) -> Tensor: ...
def log2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
log2(input, *, out=None) -> Tensor
Returns a new tensor with the logarithm to the base 2 of the elements
of :attr:`input`.
.. math::
y_{i} = \log_{2} (x_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.rand(5)
>>> a
tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490])
>>> torch.log2(a)
tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504])
"""
...
def log2_(input: Tensor) -> Tensor: ...
def log_(input: Tensor) -> Tensor: ...
@overload
def log_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
@overload
def log_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ...
def logaddexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
logaddexp(input, other, *, out=None) -> Tensor
Logarithm of the sum of exponentiations of the inputs.
Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful
in statistics where the calculated probabilities of events may be so small as to
exceed the range of normal floating point numbers. In such cases the logarithm
of the calculated probability is stored. This function allows adding
probabilities stored in such a fashion.
This op should be disambiguated with :func:`torch.logsumexp` which performs a
reduction on a single tensor.
Args:
input (Tensor): the input tensor.
other (Tensor): the second input tensor
Keyword arguments:
out (Tensor, optional): the output tensor.
Example::
>>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3]))
tensor([-0.3069, -0.6867, -0.8731])
>>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3]))
tensor([-1., -2., -3.])
>>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3]))
tensor([1.1269e+00, 2.0000e+03, 3.0000e+04])
"""
...
def logaddexp2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
logaddexp2(input, other, *, out=None) -> Tensor
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See
:func:`torch.logaddexp` for more details.
Args:
input (Tensor): the input tensor.
other (Tensor): the second input tensor
Keyword arguments:
out (Tensor, optional): the output tensor.
"""
...
@overload
def logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor:
r"""
logcumsumexp(input, dim, *, out=None) -> Tensor
Returns the logarithm of the cumulative summation of the exponentiation of
elements of :attr:`input` in the dimension :attr:`dim`.
For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
.. math::
\text{logcumsumexp}(x)_{ij} = \log \sum\limits_{j=0}^{i} \exp(x_{ij})
Args:
input (Tensor): the input tensor.
dim (int): the dimension to do the operation over
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(10)
>>> torch.logcumsumexp(a, dim=0)
tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811,
1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475]))
"""
...
@overload
def logcumsumexp(input: Tensor, dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor:
r"""
logcumsumexp(input, dim, *, out=None) -> Tensor
Returns the logarithm of the cumulative summation of the exponentiation of
elements of :attr:`input` in the dimension :attr:`dim`.
For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
.. math::
\text{logcumsumexp}(x)_{ij} = \log \sum\limits_{j=0}^{i} \exp(x_{ij})
Args:
input (Tensor): the input tensor.
dim (int): the dimension to do the operation over
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(10)
>>> torch.logcumsumexp(a, dim=0)
tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811,
1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475]))
"""
...
def logdet(input: Tensor) -> Tensor:
r"""
logdet(input) -> Tensor
Calculates log determinant of a square matrix or batches of square matrices.
It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has
a negative determinant.
.. note::
Backward through :meth:`logdet` internally uses SVD results when :attr:`input`
is not invertible. In this case, double backward through :meth:`logdet` will
be unstable in when :attr:`input` doesn't have distinct singular values. See
:func:`torch.linalg.svd` for details.
.. seealso::
:func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the
absolute value of the determinant of real-valued (resp. complex) square matrices.
Arguments:
input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more
batch dimensions.
Example::
>>> A = torch.randn(3, 3)
>>> torch.det(A)
tensor(0.2611)
>>> torch.logdet(A)
tensor(-1.3430)
>>> A
tensor([[[ 0.9254, -0.6213],
[-0.5787, 1.6843]],
[[ 0.3242, -0.9665],
[ 0.4539, -0.0887]],
[[ 1.1336, -0.4025],
[-0.7089, 0.9032]]])
>>> A.det()
tensor([1.1990, 0.4099, 0.7386])
>>> A.det().log()
tensor([ 0.1815, -0.8917, -0.3031])
"""
...
def logical_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
logical_and(input, other, *, out=None) -> Tensor
Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are
treated as ``True``.
Args:
input (Tensor): the input tensor.
other (Tensor): the tensor to compute AND with
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
tensor([ True, False, False])
>>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
>>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
>>> torch.logical_and(a, b)
tensor([False, False, True, False])
>>> torch.logical_and(a.double(), b.double())
tensor([False, False, True, False])
>>> torch.logical_and(a.double(), b)
tensor([False, False, True, False])
>>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool))
tensor([False, False, True, False])
"""
...
def logical_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
logical_not(input, *, out=None) -> Tensor
Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool
dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``.
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.logical_not(torch.tensor([True, False]))
tensor([False, True])
>>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
tensor([ True, False, False])
>>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
tensor([ True, False, False])
>>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
tensor([1, 0, 0], dtype=torch.int16)
"""
...
def logical_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
logical_or(input, other, *, out=None) -> Tensor
Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
treated as ``True``.
Args:
input (Tensor): the input tensor.
other (Tensor): the tensor to compute OR with
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
tensor([ True, False, True])
>>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
>>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
>>> torch.logical_or(a, b)
tensor([ True, True, True, False])
>>> torch.logical_or(a.double(), b.double())
tensor([ True, True, True, False])
>>> torch.logical_or(a.double(), b)
tensor([ True, True, True, False])
>>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool))
tensor([ True, True, True, False])
"""
...
def logical_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
logical_xor(input, other, *, out=None) -> Tensor
Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
treated as ``True``.
Args:
input (Tensor): the input tensor.
other (Tensor): the tensor to compute XOR with
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
tensor([False, False, True])
>>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
>>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
>>> torch.logical_xor(a, b)
tensor([ True, True, False, False])
>>> torch.logical_xor(a.double(), b.double())
tensor([ True, True, False, False])
>>> torch.logical_xor(a.double(), b)
tensor([ True, True, False, False])
>>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool))
tensor([ True, True, False, False])
"""
...
def logit(input: Tensor, eps: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor:
r"""
logit(input, eps=None, *, out=None) -> Tensor
Alias for :func:`torch.special.logit`.
"""
...
def logit_(input: Tensor, eps: Optional[_float] = None) -> Tensor: ...
@overload
def logspace(start: Number, end: Number, steps: Optional[_int] = None, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
r"""
logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
:math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
with base :attr:`base`. That is, the values are:
.. math::
(\text{base}^{\text{start}},
\text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\ldots,
\text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\text{base}^{\text{end}})
From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
Args:
start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
steps (int): size of the constructed tensor
base (float, optional): base of the logarithm function. Default: ``10.0``.
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.logspace(start=-10, end=10, steps=5)
tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
>>> torch.logspace(start=0.1, end=1.0, steps=5)
tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
>>> torch.logspace(start=0.1, end=1.0, steps=1)
tensor([1.2589])
>>> torch.logspace(start=2, end=2, steps=1, base=2)
tensor([4.0])
"""
...
@overload
def logspace(start: Tensor, end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
:math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
with base :attr:`base`. That is, the values are:
.. math::
(\text{base}^{\text{start}},
\text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\ldots,
\text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\text{base}^{\text{end}})
From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
Args:
start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
steps (int): size of the constructed tensor
base (float, optional): base of the logarithm function. Default: ``10.0``.
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.logspace(start=-10, end=10, steps=5)
tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
>>> torch.logspace(start=0.1, end=1.0, steps=5)
tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
>>> torch.logspace(start=0.1, end=1.0, steps=1)
tensor([1.2589])
>>> torch.logspace(start=2, end=2, steps=1, base=2)
tensor([4.0])
"""
...
@overload
def logspace(start: Union[Number, _complex], end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
:math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
with base :attr:`base`. That is, the values are:
.. math::
(\text{base}^{\text{start}},
\text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\ldots,
\text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\text{base}^{\text{end}})
From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
Args:
start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
steps (int): size of the constructed tensor
base (float, optional): base of the logarithm function. Default: ``10.0``.
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.logspace(start=-10, end=10, steps=5)
tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
>>> torch.logspace(start=0.1, end=1.0, steps=5)
tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
>>> torch.logspace(start=0.1, end=1.0, steps=1)
tensor([1.2589])
>>> torch.logspace(start=2, end=2, steps=1, base=2)
tensor([4.0])
"""
...
@overload
def logspace(start: Tensor, end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
:math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
with base :attr:`base`. That is, the values are:
.. math::
(\text{base}^{\text{start}},
\text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\ldots,
\text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\text{base}^{\text{end}})
From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
Args:
start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
steps (int): size of the constructed tensor
base (float, optional): base of the logarithm function. Default: ``10.0``.
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.logspace(start=-10, end=10, steps=5)
tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
>>> torch.logspace(start=0.1, end=1.0, steps=5)
tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
>>> torch.logspace(start=0.1, end=1.0, steps=1)
tensor([1.2589])
>>> torch.logspace(start=2, end=2, steps=1, base=2)
tensor([4.0])
"""
...
@overload
def logspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
:math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
with base :attr:`base`. That is, the values are:
.. math::
(\text{base}^{\text{start}},
\text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\ldots,
\text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\text{base}^{\text{end}})
From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
Args:
start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
steps (int): size of the constructed tensor
base (float, optional): base of the logarithm function. Default: ``10.0``.
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.logspace(start=-10, end=10, steps=5)
tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
>>> torch.logspace(start=0.1, end=1.0, steps=5)
tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
>>> torch.logspace(start=0.1, end=1.0, steps=1)
tensor([1.2589])
>>> torch.logspace(start=2, end=2, steps=1, base=2)
tensor([4.0])
"""
...
@overload
def logsumexp(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
logsumexp(input, dim, keepdim=False, *, out=None)
Returns the log of summed exponentials of each row of the :attr:`input`
tensor in the given dimension :attr:`dim`. The computation is numerically
stabilized.
For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
.. math::
\text{logsumexp}(x)_{i} = \log \sum_j \exp(x_{ij})
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(3, 3)
>>> torch.logsumexp(a, 1)
tensor([1.4907, 1.0593, 1.5696])
>>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1)))
tensor(1.6859e-07)
"""
...
@overload
def logsumexp(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
logsumexp(input, dim, keepdim=False, *, out=None)
Returns the log of summed exponentials of each row of the :attr:`input`
tensor in the given dimension :attr:`dim`. The computation is numerically
stabilized.
For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
.. math::
\text{logsumexp}(x)_{i} = \log \sum_j \exp(x_{ij})
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(3, 3)
>>> torch.logsumexp(a, 1)
tensor([1.4907, 1.0593, 1.5696])
>>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1)))
tensor(1.6859e-07)
"""
...
@overload
def lstm(data: Tensor, batch_sizes: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
@overload
def lstm(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
def lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
@overload
def lt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
lt(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} < \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere
Example::
>>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, False], [True, False]])
"""
...
@overload
def lt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
lt(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} < \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere
Example::
>>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, False], [True, False]])
"""
...
def lu_solve(input: Tensor, LU_data: Tensor, LU_pivots: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor
Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted
LU factorization of A from :func:`~linalg.lu_factor`.
This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`.
.. warning::
:func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`.
:func:`torch.lu_solve` will be removed in a future PyTorch release.
``X = torch.lu_solve(B, LU, pivots)`` should be replaced with
.. code:: python
X = linalg.lu_solve(LU, pivots, B)
Arguments:
b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*`
is zero or more batch dimensions.
LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`,
where :math:`*` is zero or more batch dimensions.
LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`,
where :math:`*` is zero or more batch dimensions.
The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of
:attr:`LU_data`.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> A = torch.randn(2, 3, 3)
>>> b = torch.randn(2, 3, 1)
>>> LU, pivots = torch.linalg.lu_factor(A)
>>> x = torch.lu_solve(b, LU, pivots)
>>> torch.dist(A @ x, b)
tensor(1.00000e-07 *
2.8312)
"""
...
def lu_unpack(LU_data: Tensor, LU_pivots: Tensor, unpack_data: _bool = True, unpack_pivots: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.lu_unpack:
r"""
lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor)
Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices.
.. seealso::
:func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient
than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`.
Args:
LU_data (Tensor): the packed LU factorization data
LU_pivots (Tensor): the packed LU factorization pivots
unpack_data (bool): flag indicating if the data should be unpacked.
If ``False``, then the returned ``L`` and ``U`` are empty tensors.
Default: ``True``
unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``.
If ``False``, then the returned ``P`` is an empty tensor.
Default: ``True``
Keyword args:
out (tuple, optional): output tuple of three tensors. Ignored if `None`.
Returns:
A namedtuple ``(P, L, U)``
Examples::
>>> A = torch.randn(2, 3, 3)
>>> LU, pivots = torch.linalg.lu_factor(A)
>>> P, L, U = torch.lu_unpack(LU, pivots)
>>> # We can recover A from the factorization
>>> A_ = P @ L @ U
>>> torch.allclose(A, A_)
True
>>> # LU factorization of a rectangular matrix:
>>> A = torch.randn(2, 3, 2)
>>> LU, pivots = torch.linalg.lu_factor(A)
>>> P, L, U = torch.lu_unpack(LU, pivots)
>>> # P, L, U are the same as returned by linalg.lu
>>> P_, L_, U_ = torch.linalg.lu(A)
>>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_)
True
"""
...
def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ...
@overload
def masked_fill(input: Tensor, mask: Tensor, value: Tensor) -> Tensor: ...
@overload
def masked_fill(input: Tensor, mask: Tensor, value: Union[Number, _complex]) -> Tensor: ...
def masked_scatter(input: Tensor, mask: Tensor, source: Tensor) -> Tensor: ...
def masked_select(input: Tensor, mask: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
masked_select(input, mask, *, out=None) -> Tensor
Returns a new 1-D tensor which indexes the :attr:`input` tensor according to
the boolean mask :attr:`mask` which is a `BoolTensor`.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need
to match, but they must be :ref:`broadcastable <broadcasting-semantics>`.
.. note:: The returned tensor does **not** use the same storage
as the original tensor
Args:
input (Tensor): the input tensor.
mask (BoolTensor): the tensor containing the binary mask to index with
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> x = torch.randn(3, 4)
>>> x
tensor([[ 0.3552, -2.3825, -0.8297, 0.3477],
[-1.2035, 1.2252, 0.5002, 0.6248],
[ 0.1307, -2.0608, 0.1244, 2.0139]])
>>> mask = x.ge(0.5)
>>> mask
tensor([[False, False, False, False],
[False, True, True, True],
[False, False, False, True]])
>>> torch.masked_select(x, mask)
tensor([ 1.2252, 0.5002, 0.6248, 2.0139])
"""
...
def matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
matmul(input, other, *, out=None) -> Tensor
Matrix product of two tensors.
The behavior depends on the dimensionality of the tensors as follows:
- If both tensors are 1-dimensional, the dot product (scalar) is returned.
- If both arguments are 2-dimensional, the matrix-matrix product is returned.
- If the first argument is 1-dimensional and the second argument is 2-dimensional,
a 1 is prepended to its dimension for the purpose of the matrix multiply.
After the matrix multiply, the prepended dimension is removed.
- If the first argument is 2-dimensional and the second argument is 1-dimensional,
the matrix-vector product is returned.
- If both arguments are at least 1-dimensional and at least one argument is
N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first
argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the
batched matrix multiply and removed after. If the second argument is 1-dimensional, a
1 is appended to its dimension for the purpose of the batched matrix multiple and removed after.
The non-matrix (i.e. batch) dimensions are :ref:`broadcasted <broadcasting-semantics>` (and thus
must be broadcastable). For example, if :attr:`input` is a
:math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)`
tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor.
Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs
are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a
:math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)`
tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the
matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor.
This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. In particular the
matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions
as :func:`torch.mm`
.. warning::
Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
or may not have autograd support. If you notice missing functionality please
open a feature request.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
.. note::
The 1-dimensional dot product version of this function does not support an :attr:`out` parameter.
Arguments:
input (Tensor): the first tensor to be multiplied
other (Tensor): the second tensor to be multiplied
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> # vector x vector
>>> tensor1 = torch.randn(3)
>>> tensor2 = torch.randn(3)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([])
>>> # matrix x vector
>>> tensor1 = torch.randn(3, 4)
>>> tensor2 = torch.randn(4)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([3])
>>> # batched matrix x broadcasted vector
>>> tensor1 = torch.randn(10, 3, 4)
>>> tensor2 = torch.randn(4)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([10, 3])
>>> # batched matrix x batched matrix
>>> tensor1 = torch.randn(10, 3, 4)
>>> tensor2 = torch.randn(10, 4, 5)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([10, 3, 5])
>>> # batched matrix x broadcasted matrix
>>> tensor1 = torch.randn(10, 3, 4)
>>> tensor2 = torch.randn(4, 5)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([10, 3, 5])
"""
...
def matrix_exp(input: Tensor) -> Tensor:
r"""
matrix_exp(A) -> Tensor
Alias for :func:`torch.linalg.matrix_exp`.
"""
...
def matrix_power(input: Tensor, n: _int, *, out: Optional[Tensor] = None) -> Tensor:
r"""
matrix_power(input, n, *, out=None) -> Tensor
Alias for :func:`torch.linalg.matrix_power`
"""
...
@overload
def max(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
max(input) -> Tensor
Returns the maximum value of all elements in the ``input`` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``max(dim=0)``
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6763, 0.7445, -2.2369]])
>>> torch.max(a)
tensor(0.7445)
.. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each maximum value found
(argmax).
If ``keepdim`` is ``True``, the output tensors are of the same size
as ``input`` except in the dimension ``dim`` where they are of size 1.
Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than ``input``.
.. note:: If there are multiple maximal values in a reduced row then
the indices of the first maximal value are returned.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``.
Keyword args:
out (tuple, optional): the result tuple of two output tensors (max, max_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
[ 1.1949, -1.1127, -2.2379, -0.6702],
[ 1.5717, -0.9207, 0.1297, -1.8768],
[-0.6172, 1.0036, -0.6060, -0.2432]])
>>> torch.max(a, 1)
torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
.. function:: max(input, other, *, out=None) -> Tensor
:noindex:
See :func:`torch.maximum`.
"""
...
@overload
def max(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
max(input) -> Tensor
Returns the maximum value of all elements in the ``input`` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``max(dim=0)``
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6763, 0.7445, -2.2369]])
>>> torch.max(a)
tensor(0.7445)
.. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each maximum value found
(argmax).
If ``keepdim`` is ``True``, the output tensors are of the same size
as ``input`` except in the dimension ``dim`` where they are of size 1.
Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than ``input``.
.. note:: If there are multiple maximal values in a reduced row then
the indices of the first maximal value are returned.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``.
Keyword args:
out (tuple, optional): the result tuple of two output tensors (max, max_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
[ 1.1949, -1.1127, -2.2379, -0.6702],
[ 1.5717, -0.9207, 0.1297, -1.8768],
[-0.6172, 1.0036, -0.6060, -0.2432]])
>>> torch.max(a, 1)
torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
.. function:: max(input, other, *, out=None) -> Tensor
:noindex:
See :func:`torch.maximum`.
"""
...
@overload
def max(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max:
r"""
max(input) -> Tensor
Returns the maximum value of all elements in the ``input`` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``max(dim=0)``
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6763, 0.7445, -2.2369]])
>>> torch.max(a)
tensor(0.7445)
.. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each maximum value found
(argmax).
If ``keepdim`` is ``True``, the output tensors are of the same size
as ``input`` except in the dimension ``dim`` where they are of size 1.
Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than ``input``.
.. note:: If there are multiple maximal values in a reduced row then
the indices of the first maximal value are returned.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``.
Keyword args:
out (tuple, optional): the result tuple of two output tensors (max, max_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
[ 1.1949, -1.1127, -2.2379, -0.6702],
[ 1.5717, -0.9207, 0.1297, -1.8768],
[-0.6172, 1.0036, -0.6060, -0.2432]])
>>> torch.max(a, 1)
torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
.. function:: max(input, other, *, out=None) -> Tensor
:noindex:
See :func:`torch.maximum`.
"""
...
@overload
def max(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max:
r"""
max(input) -> Tensor
Returns the maximum value of all elements in the ``input`` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``max(dim=0)``
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6763, 0.7445, -2.2369]])
>>> torch.max(a)
tensor(0.7445)
.. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each maximum value found
(argmax).
If ``keepdim`` is ``True``, the output tensors are of the same size
as ``input`` except in the dimension ``dim`` where they are of size 1.
Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than ``input``.
.. note:: If there are multiple maximal values in a reduced row then
the indices of the first maximal value are returned.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``.
Keyword args:
out (tuple, optional): the result tuple of two output tensors (max, max_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
[ 1.1949, -1.1127, -2.2379, -0.6702],
[ 1.5717, -0.9207, 0.1297, -1.8768],
[-0.6172, 1.0036, -0.6060, -0.2432]])
>>> torch.max(a, 1)
torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
.. function:: max(input, other, *, out=None) -> Tensor
:noindex:
See :func:`torch.maximum`.
"""
...
def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
def max_pool1d_with_indices(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tuple[Tensor, Tensor]: ...
def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
def maximum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
maximum(input, other, *, out=None) -> Tensor
Computes the element-wise maximum of :attr:`input` and :attr:`other`.
.. note::
If one of the elements being compared is a NaN, then that element is returned.
:func:`maximum` is not supported for tensors with complex dtypes.
Args:
input (Tensor): the input tensor.
other (Tensor): the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor((1, 2, -1))
>>> b = torch.tensor((3, 0, 4))
>>> torch.maximum(a, b)
tensor([3, 2, 4])
"""
...
@overload
def mean(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor:
r"""
mean(input, *, dtype=None) -> Tensor
Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex.
Args:
input (Tensor):
the input tensor, either of floating point or complex dtype
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.2294, -0.5481, 1.3288]])
>>> torch.mean(a)
tensor(0.3367)
.. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
:noindex:
Returns the mean value of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
out (Tensor, optional): the output tensor.
.. seealso::
:func:`torch.nanmean` computes the mean value of `non-NaN` elements.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
[-0.9644, 1.0131, -0.6549, -1.4279],
[-0.2951, -1.3350, -0.7694, 0.5600],
[ 1.0842, -0.9580, 0.3623, 0.2343]])
>>> torch.mean(a, 1)
tensor([-0.0163, -0.5085, -0.4599, 0.1807])
>>> torch.mean(a, 1, True)
tensor([[-0.0163],
[-0.5085],
[-0.4599],
[ 0.1807]])
"""
...
@overload
def mean(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
mean(input, *, dtype=None) -> Tensor
Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex.
Args:
input (Tensor):
the input tensor, either of floating point or complex dtype
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.2294, -0.5481, 1.3288]])
>>> torch.mean(a)
tensor(0.3367)
.. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
:noindex:
Returns the mean value of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
out (Tensor, optional): the output tensor.
.. seealso::
:func:`torch.nanmean` computes the mean value of `non-NaN` elements.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
[-0.9644, 1.0131, -0.6549, -1.4279],
[-0.2951, -1.3350, -0.7694, 0.5600],
[ 1.0842, -0.9580, 0.3623, 0.2343]])
>>> torch.mean(a, 1)
tensor([-0.0163, -0.5085, -0.4599, 0.1807])
>>> torch.mean(a, 1, True)
tensor([[-0.0163],
[-0.5085],
[-0.4599],
[ 0.1807]])
"""
...
@overload
def mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
mean(input, *, dtype=None) -> Tensor
Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex.
Args:
input (Tensor):
the input tensor, either of floating point or complex dtype
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.2294, -0.5481, 1.3288]])
>>> torch.mean(a)
tensor(0.3367)
.. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
:noindex:
Returns the mean value of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
out (Tensor, optional): the output tensor.
.. seealso::
:func:`torch.nanmean` computes the mean value of `non-NaN` elements.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
[-0.9644, 1.0131, -0.6549, -1.4279],
[-0.2951, -1.3350, -0.7694, 0.5600],
[ 1.0842, -0.9580, 0.3623, 0.2343]])
>>> torch.mean(a, 1)
tensor([-0.0163, -0.5085, -0.4599, 0.1807])
>>> torch.mean(a, 1, True)
tensor([[-0.0163],
[-0.5085],
[-0.4599],
[ 0.1807]])
"""
...
@overload
def median(input: Tensor) -> Tensor:
r"""
median(input) -> Tensor
Returns the median of the values in :attr:`input`.
.. note::
The median is not unique for :attr:`input` tensors with an even number
of elements. In this case the lower of the two medians is returned. To
compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
.. warning::
This function produces deterministic (sub)gradients unlike ``median(dim=0)``
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 1.5219, -1.5212, 0.2202]])
>>> torch.median(a)
tensor(0.2202)
.. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
If :attr:`keepdim` is ``True``, the output tensors are of the same size
as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the outputs tensor having 1 fewer dimension than :attr:`input`.
.. note::
The median is not unique for :attr:`input` tensors with an even number
of elements in the dimension :attr:`dim`. In this case the lower of the
two medians is returned. To compute the mean of both medians in
:attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
.. warning::
``indices`` does not necessarily contain the first occurrence of each
median value found, unless it is unique.
The exact implementation details are device-specific.
Do not expect the same result when run on CPU and GPU in general.
For the same reason do not expect the gradients to be deterministic.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
tensor, which must have dtype long, with their indices in the dimension
:attr:`dim` of :attr:`input`.
Example::
>>> a = torch.randn(4, 5)
>>> a
tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
[ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
[-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
[ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
>>> torch.median(a, 1)
torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
"""
...
@overload
def median(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median:
r"""
median(input) -> Tensor
Returns the median of the values in :attr:`input`.
.. note::
The median is not unique for :attr:`input` tensors with an even number
of elements. In this case the lower of the two medians is returned. To
compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
.. warning::
This function produces deterministic (sub)gradients unlike ``median(dim=0)``
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 1.5219, -1.5212, 0.2202]])
>>> torch.median(a)
tensor(0.2202)
.. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
If :attr:`keepdim` is ``True``, the output tensors are of the same size
as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the outputs tensor having 1 fewer dimension than :attr:`input`.
.. note::
The median is not unique for :attr:`input` tensors with an even number
of elements in the dimension :attr:`dim`. In this case the lower of the
two medians is returned. To compute the mean of both medians in
:attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
.. warning::
``indices`` does not necessarily contain the first occurrence of each
median value found, unless it is unique.
The exact implementation details are device-specific.
Do not expect the same result when run on CPU and GPU in general.
For the same reason do not expect the gradients to be deterministic.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
tensor, which must have dtype long, with their indices in the dimension
:attr:`dim` of :attr:`input`.
Example::
>>> a = torch.randn(4, 5)
>>> a
tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
[ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
[-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
[ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
>>> torch.median(a, 1)
torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
"""
...
@overload
def median(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median:
r"""
median(input) -> Tensor
Returns the median of the values in :attr:`input`.
.. note::
The median is not unique for :attr:`input` tensors with an even number
of elements. In this case the lower of the two medians is returned. To
compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
.. warning::
This function produces deterministic (sub)gradients unlike ``median(dim=0)``
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 1.5219, -1.5212, 0.2202]])
>>> torch.median(a)
tensor(0.2202)
.. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
If :attr:`keepdim` is ``True``, the output tensors are of the same size
as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the outputs tensor having 1 fewer dimension than :attr:`input`.
.. note::
The median is not unique for :attr:`input` tensors with an even number
of elements in the dimension :attr:`dim`. In this case the lower of the
two medians is returned. To compute the mean of both medians in
:attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
.. warning::
``indices`` does not necessarily contain the first occurrence of each
median value found, unless it is unique.
The exact implementation details are device-specific.
Do not expect the same result when run on CPU and GPU in general.
For the same reason do not expect the gradients to be deterministic.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
tensor, which must have dtype long, with their indices in the dimension
:attr:`dim` of :attr:`input`.
Example::
>>> a = torch.randn(4, 5)
>>> a
tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
[ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
[-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
[ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
>>> torch.median(a, 1)
torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
"""
...
@overload
def min(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
min(input) -> Tensor
Returns the minimum value of all elements in the :attr:`input` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``min(dim=0)``
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6750, 1.0857, 1.7197]])
>>> torch.min(a)
tensor(0.6750)
.. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each minimum value found
(argmin).
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensors having 1 fewer dimension than :attr:`input`.
.. note:: If there are multiple minimal values in a reduced row then
the indices of the first minimal value are returned.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (tuple, optional): the tuple of two output tensors (min, min_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
[-1.4644, -0.2635, -0.3651, 0.6134],
[ 0.2457, 0.0384, 1.0128, 0.7015],
[-0.1153, 2.9849, 2.1458, 0.5788]])
>>> torch.min(a, 1)
torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
.. function:: min(input, other, *, out=None) -> Tensor
:noindex:
See :func:`torch.minimum`.
"""
...
@overload
def min(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
min(input) -> Tensor
Returns the minimum value of all elements in the :attr:`input` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``min(dim=0)``
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6750, 1.0857, 1.7197]])
>>> torch.min(a)
tensor(0.6750)
.. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each minimum value found
(argmin).
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensors having 1 fewer dimension than :attr:`input`.
.. note:: If there are multiple minimal values in a reduced row then
the indices of the first minimal value are returned.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (tuple, optional): the tuple of two output tensors (min, min_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
[-1.4644, -0.2635, -0.3651, 0.6134],
[ 0.2457, 0.0384, 1.0128, 0.7015],
[-0.1153, 2.9849, 2.1458, 0.5788]])
>>> torch.min(a, 1)
torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
.. function:: min(input, other, *, out=None) -> Tensor
:noindex:
See :func:`torch.minimum`.
"""
...
@overload
def min(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min:
r"""
min(input) -> Tensor
Returns the minimum value of all elements in the :attr:`input` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``min(dim=0)``
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6750, 1.0857, 1.7197]])
>>> torch.min(a)
tensor(0.6750)
.. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each minimum value found
(argmin).
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensors having 1 fewer dimension than :attr:`input`.
.. note:: If there are multiple minimal values in a reduced row then
the indices of the first minimal value are returned.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (tuple, optional): the tuple of two output tensors (min, min_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
[-1.4644, -0.2635, -0.3651, 0.6134],
[ 0.2457, 0.0384, 1.0128, 0.7015],
[-0.1153, 2.9849, 2.1458, 0.5788]])
>>> torch.min(a, 1)
torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
.. function:: min(input, other, *, out=None) -> Tensor
:noindex:
See :func:`torch.minimum`.
"""
...
@overload
def min(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min:
r"""
min(input) -> Tensor
Returns the minimum value of all elements in the :attr:`input` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``min(dim=0)``
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6750, 1.0857, 1.7197]])
>>> torch.min(a)
tensor(0.6750)
.. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each minimum value found
(argmin).
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensors having 1 fewer dimension than :attr:`input`.
.. note:: If there are multiple minimal values in a reduced row then
the indices of the first minimal value are returned.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (tuple, optional): the tuple of two output tensors (min, min_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
[-1.4644, -0.2635, -0.3651, 0.6134],
[ 0.2457, 0.0384, 1.0128, 0.7015],
[-0.1153, 2.9849, 2.1458, 0.5788]])
>>> torch.min(a, 1)
torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
.. function:: min(input, other, *, out=None) -> Tensor
:noindex:
See :func:`torch.minimum`.
"""
...
def minimum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
minimum(input, other, *, out=None) -> Tensor
Computes the element-wise minimum of :attr:`input` and :attr:`other`.
.. note::
If one of the elements being compared is a NaN, then that element is returned.
:func:`minimum` is not supported for tensors with complex dtypes.
Args:
input (Tensor): the input tensor.
other (Tensor): the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor((1, 2, -1))
>>> b = torch.tensor((3, 0, 4))
>>> torch.minimum(a, b)
tensor([1, 0, -1])
"""
...
def miopen_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
def miopen_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ...
def miopen_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
def miopen_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
def miopen_convolution_transpose(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ...
def miopen_depthwise_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ...
def miopen_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: _int, num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: _size, dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
def mkldnn_adaptive_avg_pool2d(input: Tensor, output_size: Union[_int, _size], *, out: Optional[Tensor] = None) -> Tensor: ...
def mkldnn_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
def mkldnn_linear_backward_weights(grad_output: Tensor, input: Tensor, weight: Tensor, bias_defined: _bool) -> Tuple[Tensor, Tensor]: ...
def mkldnn_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
def mkldnn_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
def mkldnn_rnn_layer(input: Tensor, weight0: Tensor, weight1: Tensor, weight2: Tensor, weight3: Tensor, hx_: Tensor, cx_: Tensor, reverse: _bool, batch_sizes: _size, mode: _int, hidden_size: _int, num_layers: _int, has_biases: _bool, bidirectional: _bool, batch_first: _bool, train: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
def mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
mm(input, mat2, *, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`.
If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
For broadcasting matrix products, see :func:`torch.matmul`.
Supports strided and sparse 2-D tensors as inputs, autograd with
respect to strided inputs.
This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`.
If :attr:`out` is provided it's layout will be used. Otherwise, the result
layout will be deduced from that of :attr:`input`.
.. warning::
Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
or may not have autograd support. If you notice missing functionality please
open a feature request.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
input (Tensor): the first matrix to be matrix multiplied
mat2 (Tensor): the second matrix to be matrix multiplied
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.mm(mat1, mat2)
tensor([[ 0.4851, 0.5037, -0.3633],
[-0.0760, -3.6705, 2.4784]])
"""
...
@overload
def mode(input: Tensor, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode:
r"""
mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`, i.e. a value which appears most often
in that row, and ``indices`` is the index location of each mode value found.
By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than :attr:`input`.
.. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> b = torch.tensor(
[[0, 0, 0, 2, 0, 0, 2],
[0, 3, 0, 0, 2, 0, 1],
[2, 2, 2, 0, 0, 0, 3],
[2, 2, 3, 0, 1, 1, 0],
[1, 1, 0, 0, 2, 0, 2]])
>>> torch.mode(b, 0)
torch.return_types.mode(
values=tensor([0, 2, 0, 0, 0, 0, 2]),
indices=tensor([1, 3, 4, 4, 2, 4, 4]))
"""
...
@overload
def mode(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode:
r"""
mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`, i.e. a value which appears most often
in that row, and ``indices`` is the index location of each mode value found.
By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than :attr:`input`.
.. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> b = torch.tensor(
[[0, 0, 0, 2, 0, 0, 2],
[0, 3, 0, 0, 2, 0, 1],
[2, 2, 2, 0, 0, 0, 3],
[2, 2, 3, 0, 1, 1, 0],
[1, 1, 0, 0, 2, 0, 2]])
>>> torch.mode(b, 0)
torch.return_types.mode(
values=tensor([0, 2, 0, 0, 0, 0, 2]),
indices=tensor([1, 3, 4, 4, 2, 4, 4]))
"""
...
@overload
def moveaxis(input: Tensor, source: _int, destination: _int) -> Tensor:
r"""
moveaxis(input, source, destination) -> Tensor
Alias for :func:`torch.movedim`.
This function is equivalent to NumPy's moveaxis function.
Examples::
>>> t = torch.randn(3,2,1)
>>> t
tensor([[[-0.3362],
[-0.8437]],
[[-0.9627],
[ 0.1727]],
[[ 0.5173],
[-0.1398]]])
>>> torch.moveaxis(t, 1, 0).shape
torch.Size([2, 3, 1])
>>> torch.moveaxis(t, 1, 0)
tensor([[[-0.3362],
[-0.9627],
[ 0.5173]],
[[-0.8437],
[ 0.1727],
[-0.1398]]])
>>> torch.moveaxis(t, (1, 2), (0, 1)).shape
torch.Size([2, 1, 3])
>>> torch.moveaxis(t, (1, 2), (0, 1))
tensor([[[-0.3362, -0.9627, 0.5173]],
[[-0.8437, 0.1727, -0.1398]]])
"""
...
@overload
def moveaxis(input: Tensor, source: _size, destination: _size) -> Tensor:
r"""
moveaxis(input, source, destination) -> Tensor
Alias for :func:`torch.movedim`.
This function is equivalent to NumPy's moveaxis function.
Examples::
>>> t = torch.randn(3,2,1)
>>> t
tensor([[[-0.3362],
[-0.8437]],
[[-0.9627],
[ 0.1727]],
[[ 0.5173],
[-0.1398]]])
>>> torch.moveaxis(t, 1, 0).shape
torch.Size([2, 3, 1])
>>> torch.moveaxis(t, 1, 0)
tensor([[[-0.3362],
[-0.9627],
[ 0.5173]],
[[-0.8437],
[ 0.1727],
[-0.1398]]])
>>> torch.moveaxis(t, (1, 2), (0, 1)).shape
torch.Size([2, 1, 3])
>>> torch.moveaxis(t, (1, 2), (0, 1))
tensor([[[-0.3362, -0.9627, 0.5173]],
[[-0.8437, 0.1727, -0.1398]]])
"""
...
@overload
def movedim(input: Tensor, source: _int, destination: _int) -> Tensor:
r"""
movedim(input, source, destination) -> Tensor
Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source`
to the position(s) in :attr:`destination`.
Other dimensions of :attr:`input` that are not explicitly moved remain in
their original order and appear at the positions not specified in :attr:`destination`.
Args:
input (Tensor): the input tensor.
source (int or tuple of ints): Original positions of the dims to move. These must be unique.
destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique.
Examples::
>>> t = torch.randn(3,2,1)
>>> t
tensor([[[-0.3362],
[-0.8437]],
[[-0.9627],
[ 0.1727]],
[[ 0.5173],
[-0.1398]]])
>>> torch.movedim(t, 1, 0).shape
torch.Size([2, 3, 1])
>>> torch.movedim(t, 1, 0)
tensor([[[-0.3362],
[-0.9627],
[ 0.5173]],
[[-0.8437],
[ 0.1727],
[-0.1398]]])
>>> torch.movedim(t, (1, 2), (0, 1)).shape
torch.Size([2, 1, 3])
>>> torch.movedim(t, (1, 2), (0, 1))
tensor([[[-0.3362, -0.9627, 0.5173]],
[[-0.8437, 0.1727, -0.1398]]])
"""
...
@overload
def movedim(input: Tensor, source: _size, destination: _size) -> Tensor:
r"""
movedim(input, source, destination) -> Tensor
Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source`
to the position(s) in :attr:`destination`.
Other dimensions of :attr:`input` that are not explicitly moved remain in
their original order and appear at the positions not specified in :attr:`destination`.
Args:
input (Tensor): the input tensor.
source (int or tuple of ints): Original positions of the dims to move. These must be unique.
destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique.
Examples::
>>> t = torch.randn(3,2,1)
>>> t
tensor([[[-0.3362],
[-0.8437]],
[[-0.9627],
[ 0.1727]],
[[ 0.5173],
[-0.1398]]])
>>> torch.movedim(t, 1, 0).shape
torch.Size([2, 3, 1])
>>> torch.movedim(t, 1, 0)
tensor([[[-0.3362],
[-0.9627],
[ 0.5173]],
[[-0.8437],
[ 0.1727],
[-0.1398]]])
>>> torch.movedim(t, (1, 2), (0, 1)).shape
torch.Size([2, 1, 3])
>>> torch.movedim(t, (1, 2), (0, 1))
tensor([[[-0.3362, -0.9627, 0.5173]],
[[-0.8437, 0.1727, -0.1398]]])
"""
...
def msort(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
msort(input, *, out=None) -> Tensor
Sorts the elements of the :attr:`input` tensor along its first dimension
in ascending order by value.
.. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`.
See also :func:`torch.sort`.
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> t = torch.randn(3, 4)
>>> t
tensor([[-0.1321, 0.4370, -1.2631, -1.1289],
[-2.0527, -1.1250, 0.2275, 0.3077],
[-0.0881, -0.1259, -0.5495, 1.0284]])
>>> torch.msort(t)
tensor([[-2.0527, -1.1250, -1.2631, -1.1289],
[-0.1321, -0.1259, -0.5495, 0.3077],
[-0.0881, 0.4370, 0.2275, 1.0284]])
"""
...
def mul(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
mul(input, other, *, out=None) -> Tensor
Multiplies :attr:`input` by :attr:`other`.
.. math::
\text{out}_i = \text{input}_i \times \text{other}_i
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Args:
input (Tensor): the input tensor.
other (Tensor or Number) - the tensor or number to multiply input by.
Keyword args:
out (Tensor, optional): the output tensor.
Examples::
>>> a = torch.randn(3)
>>> a
tensor([ 0.2015, -0.4255, 2.6087])
>>> torch.mul(a, 100)
tensor([ 20.1494, -42.5491, 260.8663])
>>> b = torch.randn(4, 1)
>>> b
tensor([[ 1.1207],
[-0.3137],
[ 0.0700],
[ 0.8378]])
>>> c = torch.randn(1, 4)
>>> c
tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]])
>>> torch.mul(b, c)
tensor([[ 0.5767, 0.1363, -0.5877, 2.5083],
[-0.1614, -0.0382, 0.1645, -0.7021],
[ 0.0360, 0.0085, -0.0367, 0.1567],
[ 0.4312, 0.1019, -0.4394, 1.8753]])
"""
...
def multinomial(input: Tensor, num_samples: _int, replacement: _bool = False, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor
Returns a tensor where each row contains :attr:`num_samples` indices sampled
from the multinomial (a stricter definition would be multivariate,
refer to torch.distributions.multinomial.Multinomial for more details)
probability distribution located in the corresponding row
of tensor :attr:`input`.
.. note::
The rows of :attr:`input` do not need to sum to one (in which case we use
the values as weights), but must be non-negative, finite and have
a non-zero sum.
Indices are ordered from left to right according to when each was sampled
(first samples are placed in first column).
If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`.
If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape
:math:`(m \times \text{num\_samples})`.
If replacement is ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
.. note::
When drawn without replacement, :attr:`num_samples` must be lower than
number of non-zero elements in :attr:`input` (or the min number of non-zero
elements in each row of :attr:`input` if it is a matrix).
Args:
input (Tensor): the input tensor containing probabilities
num_samples (int): number of samples to draw
replacement (bool, optional): whether to draw with replacement or not
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
Example::
>>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights
>>> torch.multinomial(weights, 2)
tensor([1, 2])
>>> torch.multinomial(weights, 4) # ERROR!
RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False,
not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320
>>> torch.multinomial(weights, 4, replacement=True)
tensor([ 2, 1, 1, 1])
"""
...
@overload
def multiply(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
multiply(input, other, *, out=None)
Alias for :func:`torch.mul`.
"""
...
@overload
def multiply(input: Tensor, other: Union[Number, _complex]) -> Tensor:
r"""
multiply(input, other, *, out=None)
Alias for :func:`torch.mul`.
"""
...
def mv(input: Tensor, vec: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
mv(input, vec, *, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`input` and the vector
:attr:`vec`.
If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size :math:`m`, :attr:`out` will be 1-D of size :math:`n`.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
Args:
input (Tensor): matrix to be multiplied
vec (Tensor): vector to be multiplied
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.mv(mat, vec)
tensor([ 1.0404, -0.6361])
"""
...
def mvlgamma(input: Tensor, p: _int, *, out: Optional[Tensor] = None) -> Tensor:
r"""
mvlgamma(input, p, *, out=None) -> Tensor
Alias for :func:`torch.special.multigammaln`.
"""
...
def nan_to_num(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor:
r"""
nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor
Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input`
with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively.
By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the
greatest finite value representable by :attr:`input`'s dtype, and negative infinity
is replaced with the least finite value representable by :attr:`input`'s dtype.
Args:
input (Tensor): the input tensor.
nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero.
posinf (Number, optional): if a Number, the value to replace positive infinity values with.
If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype.
Default is None.
neginf (Number, optional): if a Number, the value to replace negative infinity values with.
If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype.
Default is None.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14])
>>> torch.nan_to_num(x)
tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
>>> torch.nan_to_num(x, nan=2.0)
tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
>>> torch.nan_to_num(x, nan=2.0, posinf=1.0)
tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00])
"""
...
def nan_to_num_(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor: ...
def nanmean(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
Computes the mean of all `non-NaN` elements along the specified dimensions.
This function is identical to :func:`torch.mean` when there are no `NaN` values
in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will
propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the
`NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`).
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
out (Tensor, optional): the output tensor.
.. seealso::
:func:`torch.mean` computes the mean value, propagating `NaN`.
Example::
>>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]])
>>> x.mean()
tensor(nan)
>>> x.nanmean()
tensor(1.8000)
>>> x.mean(dim=0)
tensor([ nan, 1.5000, 2.5000])
>>> x.nanmean(dim=0)
tensor([1.0000, 1.5000, 2.5000])
# If all elements in the reduced dimensions are NaN then the result is NaN
>>> torch.tensor([torch.nan]).nanmean()
tensor(nan)
"""
...
@overload
def nanmedian(input: Tensor) -> Tensor:
r"""
nanmedian(input) -> Tensor
Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
while this function will return the median of the non-``NaN`` elements in :attr:`input`.
If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.tensor([1, float('nan'), 3, 2])
>>> a.median()
tensor(nan)
>>> a.nanmedian()
tensor(2.)
.. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
found in the dimension :attr:`dim`.
This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
tensor, which must have dtype long, with their indices in the dimension
:attr:`dim` of :attr:`input`.
Example::
>>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
>>> a
tensor([[2., 3., 1.],
[nan, 1., nan]])
>>> a.median(0)
torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
>>> a.nanmedian(0)
torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
"""
...
@overload
def nanmedian(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian:
r"""
nanmedian(input) -> Tensor
Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
while this function will return the median of the non-``NaN`` elements in :attr:`input`.
If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.tensor([1, float('nan'), 3, 2])
>>> a.median()
tensor(nan)
>>> a.nanmedian()
tensor(2.)
.. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
found in the dimension :attr:`dim`.
This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
tensor, which must have dtype long, with their indices in the dimension
:attr:`dim` of :attr:`input`.
Example::
>>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
>>> a
tensor([[2., 3., 1.],
[nan, 1., nan]])
>>> a.median(0)
torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
>>> a.nanmedian(0)
torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
"""
...
@overload
def nanmedian(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian:
r"""
nanmedian(input) -> Tensor
Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
while this function will return the median of the non-``NaN`` elements in :attr:`input`.
If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.tensor([1, float('nan'), 3, 2])
>>> a.median()
tensor(nan)
>>> a.nanmedian()
tensor(2.)
.. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
found in the dimension :attr:`dim`.
This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
tensor, which must have dtype long, with their indices in the dimension
:attr:`dim` of :attr:`input`.
Example::
>>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
>>> a
tensor([[2., 3., 1.],
[nan, 1., nan]])
>>> a.median(0)
torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
>>> a.nanmedian(0)
torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
"""
...
@overload
def nanquantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor:
r"""
nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
not exist. If all values in a reduced row are ``NaN`` then the quantiles for
that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`.
Args:
input (Tensor): the input tensor.
q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1]
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword arguments:
interpolation (str): interpolation method to use when the desired quantile lies between two data points.
Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
Default is ``linear``.
out (Tensor, optional): the output tensor.
Example::
>>> t = torch.tensor([float('nan'), 1, 2])
>>> t.quantile(0.5)
tensor(nan)
>>> t.nanquantile(0.5)
tensor(1.5000)
>>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]])
>>> t
tensor([[nan, nan],
[1., 2.]])
>>> t.nanquantile(0.5, dim=0)
tensor([1., 2.])
>>> t.nanquantile(0.5, dim=1)
tensor([ nan, 1.5000])
"""
...
@overload
def nanquantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor:
r"""
nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
not exist. If all values in a reduced row are ``NaN`` then the quantiles for
that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`.
Args:
input (Tensor): the input tensor.
q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1]
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword arguments:
interpolation (str): interpolation method to use when the desired quantile lies between two data points.
Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
Default is ``linear``.
out (Tensor, optional): the output tensor.
Example::
>>> t = torch.tensor([float('nan'), 1, 2])
>>> t.quantile(0.5)
tensor(nan)
>>> t.nanquantile(0.5)
tensor(1.5000)
>>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]])
>>> t
tensor([[nan, nan],
[1., 2.]])
>>> t.nanquantile(0.5, dim=0)
tensor([1., 2.])
>>> t.nanquantile(0.5, dim=1)
tensor([ nan, 1.5000])
"""
...
def nansum(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
nansum(input, *, dtype=None) -> Tensor
Returns the sum of all elements, treating Not a Numbers (NaNs) as zero.
Args:
input (Tensor): the input tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.tensor([1., 2., float('nan'), 4.])
>>> torch.nansum(a)
tensor(7.)
.. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor
:noindex:
Returns the sum of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero.
If :attr:`dim` is a list of dimensions, reduce over all of them.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> torch.nansum(torch.tensor([1., float("nan")]))
1.0
>>> a = torch.tensor([[1, 2], [3., float("nan")]])
>>> torch.nansum(a)
tensor(6.)
>>> torch.nansum(a, dim=0)
tensor([4., 2.])
>>> torch.nansum(a, dim=1)
tensor([3., 3.])
"""
...
@overload
def narrow(input: Tensor, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor:
r"""
narrow(input, dim, start, length) -> Tensor
Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The
returned tensor and :attr:`input` tensor share the same underlying storage.
Args:
input (Tensor): the tensor to narrow
dim (int): the dimension along which to narrow
start (int or Tensor): index of the element to start the narrowed dimension
from. Can be negative, which means indexing from the end of `dim`. If
`Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed)
length (int): length of the narrowed dimension, must be weakly positive
Example::
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> torch.narrow(x, 0, 0, 2)
tensor([[ 1, 2, 3],
[ 4, 5, 6]])
>>> torch.narrow(x, 1, 1, 2)
tensor([[ 2, 3],
[ 5, 6],
[ 8, 9]])
>>> torch.narrow(x, -1, torch.tensor(-1), 1)
tensor([[3],
[6],
[9]])
"""
...
@overload
def narrow(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor:
r"""
narrow(input, dim, start, length) -> Tensor
Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The
returned tensor and :attr:`input` tensor share the same underlying storage.
Args:
input (Tensor): the tensor to narrow
dim (int): the dimension along which to narrow
start (int or Tensor): index of the element to start the narrowed dimension
from. Can be negative, which means indexing from the end of `dim`. If
`Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed)
length (int): length of the narrowed dimension, must be weakly positive
Example::
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> torch.narrow(x, 0, 0, 2)
tensor([[ 1, 2, 3],
[ 4, 5, 6]])
>>> torch.narrow(x, 1, 1, 2)
tensor([[ 2, 3],
[ 5, 6],
[ 8, 9]])
>>> torch.narrow(x, -1, torch.tensor(-1), 1)
tensor([[3],
[6],
[9]])
"""
...
def narrow_copy(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor:
r"""
narrow_copy(input, dim, start, length, *, out=None) -> Tensor
Same as :meth:`Tensor.narrow` except this returns a copy rather
than shared storage. This is primarily for sparse tensors, which
do not have a shared-storage narrow method.
Args:
input (Tensor): the tensor to narrow
dim (int): the dimension along which to narrow
start (int): index of the element to start the narrowed dimension from. Can
be negative, which means indexing from the end of `dim`
length (int): length of the narrowed dimension, must be weakly positive
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> torch.narrow_copy(x, 0, 0, 2)
tensor([[ 1, 2, 3],
[ 4, 5, 6]])
>>> torch.narrow_copy(x, 1, 1, 2)
tensor([[ 2, 3],
[ 5, 6],
[ 8, 9]])
>>> s = torch.arange(16).reshape(2, 2, 2, 2).to_sparse(2)
>>> torch.narrow_copy(s, 0, 0, 1)
tensor(indices=tensor([[0, 0],
[0, 1]]),
values=tensor([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]]),
size=(1, 2, 2, 2), nnz=2, layout=torch.sparse_coo)
.. seealso::
:func:`torch.narrow` for a non copy variant
"""
...
def native_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ...
def native_channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ...
def native_dropout(input: Tensor, p: _float, train: Optional[_bool]) -> Tuple[Tensor, Tensor]: ...
def native_group_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], N: Union[_int, SymInt], C: Union[_int, SymInt], HxW: Union[_int, SymInt], group: _int, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
def native_layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor], bias: Optional[Tensor], eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
@overload
def native_norm(input: Tensor, p: Optional[Union[Number, _complex]], dim: Union[_int, _size], keepdim: _bool, dtype: Optional[_dtype]) -> Tensor: ...
@overload
def native_norm(input: Tensor, p: Union[Number, _complex] = 2) -> Tensor: ...
@overload
def ne(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
ne(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} \neq \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere
Example::
>>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, True], [True, False]])
"""
...
@overload
def ne(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
ne(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} \neq \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere
Example::
>>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, True], [True, False]])
"""
...
def neg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
neg(input, *, out=None) -> Tensor
Returns a new tensor with the negative of the elements of :attr:`input`.
.. math::
\text{out} = -1 \times \text{input}
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(5)
>>> a
tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
>>> torch.neg(a)
tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940])
"""
...
def neg_(input: Tensor) -> Tensor: ...
def negative(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
negative(input, *, out=None) -> Tensor
Alias for :func:`torch.neg`
"""
...
def negative_(input: Tensor) -> Tensor: ...
def nextafter(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
nextafter(input, other, *, out=None) -> Tensor
Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise.
The shapes of ``input`` and ``other`` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the first input tensor
other (Tensor): the second input tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> eps = torch.finfo(torch.float32).eps
>>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps])
tensor([True, True])
"""
...
@overload
def nonzero(input: Tensor, *, as_tuple: Literal[False] = False, out: Optional[Tensor] = None) -> Tensor:
r"""
nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
.. note::
:func:`torch.nonzero(..., as_tuple=False) <torch.nonzero>` (default) returns a
2-D tensor where each row is the index for a nonzero value.
:func:`torch.nonzero(..., as_tuple=True) <torch.nonzero>` returns a tuple of 1-D
index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
contains nonzero indices for a certain dimension.
See below for more details on the two behaviors.
When :attr:`input` is on CUDA, :func:`torch.nonzero() <torch.nonzero>` causes
host-device synchronization.
**When** :attr:`as_tuple` **is** ``False`` **(default)**:
Returns a tensor containing the indices of all non-zero elements of
:attr:`input`. Each row in the result contains the indices of a non-zero
element in :attr:`input`. The result is sorted lexicographically, with
the last index changing the fastest (C-style).
If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
**When** :attr:`as_tuple` **is** ``True``:
Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
each containing the indices (in that dimension) of all non-zero elements of
:attr:`input` .
If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
tensors of size :math:`z`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
value, it is treated as a one-dimensional tensor with one element.
Args:
input (Tensor): the input tensor.
Keyword args:
out (LongTensor, optional): the output tensor containing indices
Returns:
LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
each dimension, containing the indices of each nonzero element along that
dimension.
Example::
>>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
tensor([[ 0],
[ 1],
[ 2],
[ 4]])
>>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.0, 0.0, 1.2, 0.0],
... [0.0, 0.0, 0.0,-0.4]]))
tensor([[ 0, 0],
[ 1, 1],
[ 2, 2],
[ 3, 3]])
>>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
(tensor([0, 1, 2, 4]),)
>>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.0, 0.0, 1.2, 0.0],
... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
(tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
>>> torch.nonzero(torch.tensor(5), as_tuple=True)
(tensor([0]),)
"""
...
@overload
def nonzero(input: Tensor, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]:
r"""
nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
.. note::
:func:`torch.nonzero(..., as_tuple=False) <torch.nonzero>` (default) returns a
2-D tensor where each row is the index for a nonzero value.
:func:`torch.nonzero(..., as_tuple=True) <torch.nonzero>` returns a tuple of 1-D
index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
contains nonzero indices for a certain dimension.
See below for more details on the two behaviors.
When :attr:`input` is on CUDA, :func:`torch.nonzero() <torch.nonzero>` causes
host-device synchronization.
**When** :attr:`as_tuple` **is** ``False`` **(default)**:
Returns a tensor containing the indices of all non-zero elements of
:attr:`input`. Each row in the result contains the indices of a non-zero
element in :attr:`input`. The result is sorted lexicographically, with
the last index changing the fastest (C-style).
If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
**When** :attr:`as_tuple` **is** ``True``:
Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
each containing the indices (in that dimension) of all non-zero elements of
:attr:`input` .
If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
tensors of size :math:`z`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
value, it is treated as a one-dimensional tensor with one element.
Args:
input (Tensor): the input tensor.
Keyword args:
out (LongTensor, optional): the output tensor containing indices
Returns:
LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
each dimension, containing the indices of each nonzero element along that
dimension.
Example::
>>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
tensor([[ 0],
[ 1],
[ 2],
[ 4]])
>>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.0, 0.0, 1.2, 0.0],
... [0.0, 0.0, 0.0,-0.4]]))
tensor([[ 0, 0],
[ 1, 1],
[ 2, 2],
[ 3, 3]])
>>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
(tensor([0, 1, 2, 4]),)
>>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.0, 0.0, 1.2, 0.0],
... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
(tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
>>> torch.nonzero(torch.tensor(5), as_tuple=True)
(tensor([0]),)
"""
...
def nonzero_static(input: Tensor, *, size: _int, fill_value: _int = -1, out: Optional[Tensor] = None) -> Tensor: ...
def norm_except_dim(v: Tensor, pow: _int = 2, dim: _int = 0) -> Tensor: ...
@overload
def normal(mean: Tensor, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
normal(mean, std, *, generator=None, out=None) -> Tensor
Returns a tensor of random numbers drawn from separate normal distributions
whose mean and standard deviation are given.
The :attr:`mean` is a tensor with the mean of
each output element's normal distribution
The :attr:`std` is a tensor with the standard deviation of
each output element's normal distribution
The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
total number of elements in each tensor need to be the same.
.. note:: When the shapes do not match, the shape of :attr:`mean`
is used as the shape for the returned output tensor
.. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
its device with the CPU.
Args:
mean (Tensor): the tensor of per-element means
std (Tensor): the tensor of per-element standard deviations
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
Example::
>>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
8.0505, 8.1408, 9.0563, 10.0566])
.. function:: normal(mean=0.0, std, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the means are shared among all drawn
elements.
Args:
mean (float, optional): the mean for all distributions
std (Tensor): the tensor of per-element standard deviations
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
.. function:: normal(mean, std=1.0, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the standard deviations are shared among
all drawn elements.
Args:
mean (Tensor): the tensor of per-element means
std (float, optional): the standard deviation for all distributions
Keyword args:
out (Tensor, optional): the output tensor
Example::
>>> torch.normal(mean=torch.arange(1., 6.))
tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
.. function:: normal(mean, std, size, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the means and standard deviations are shared
among all drawn elements. The resulting tensor has size given by :attr:`size`.
Args:
mean (float): the mean for all distributions
std (float): the standard deviation for all distributions
size (int...): a sequence of integers defining the shape of the output tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.normal(2, 3, size=(1, 4))
tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
"""
...
@overload
def normal(mean: Tensor, std: _float = 1, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
normal(mean, std, *, generator=None, out=None) -> Tensor
Returns a tensor of random numbers drawn from separate normal distributions
whose mean and standard deviation are given.
The :attr:`mean` is a tensor with the mean of
each output element's normal distribution
The :attr:`std` is a tensor with the standard deviation of
each output element's normal distribution
The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
total number of elements in each tensor need to be the same.
.. note:: When the shapes do not match, the shape of :attr:`mean`
is used as the shape for the returned output tensor
.. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
its device with the CPU.
Args:
mean (Tensor): the tensor of per-element means
std (Tensor): the tensor of per-element standard deviations
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
Example::
>>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
8.0505, 8.1408, 9.0563, 10.0566])
.. function:: normal(mean=0.0, std, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the means are shared among all drawn
elements.
Args:
mean (float, optional): the mean for all distributions
std (Tensor): the tensor of per-element standard deviations
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
.. function:: normal(mean, std=1.0, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the standard deviations are shared among
all drawn elements.
Args:
mean (Tensor): the tensor of per-element means
std (float, optional): the standard deviation for all distributions
Keyword args:
out (Tensor, optional): the output tensor
Example::
>>> torch.normal(mean=torch.arange(1., 6.))
tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
.. function:: normal(mean, std, size, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the means and standard deviations are shared
among all drawn elements. The resulting tensor has size given by :attr:`size`.
Args:
mean (float): the mean for all distributions
std (float): the standard deviation for all distributions
size (int...): a sequence of integers defining the shape of the output tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.normal(2, 3, size=(1, 4))
tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
"""
...
@overload
def normal(mean: _float, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
normal(mean, std, *, generator=None, out=None) -> Tensor
Returns a tensor of random numbers drawn from separate normal distributions
whose mean and standard deviation are given.
The :attr:`mean` is a tensor with the mean of
each output element's normal distribution
The :attr:`std` is a tensor with the standard deviation of
each output element's normal distribution
The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
total number of elements in each tensor need to be the same.
.. note:: When the shapes do not match, the shape of :attr:`mean`
is used as the shape for the returned output tensor
.. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
its device with the CPU.
Args:
mean (Tensor): the tensor of per-element means
std (Tensor): the tensor of per-element standard deviations
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
Example::
>>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
8.0505, 8.1408, 9.0563, 10.0566])
.. function:: normal(mean=0.0, std, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the means are shared among all drawn
elements.
Args:
mean (float, optional): the mean for all distributions
std (Tensor): the tensor of per-element standard deviations
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
.. function:: normal(mean, std=1.0, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the standard deviations are shared among
all drawn elements.
Args:
mean (Tensor): the tensor of per-element means
std (float, optional): the standard deviation for all distributions
Keyword args:
out (Tensor, optional): the output tensor
Example::
>>> torch.normal(mean=torch.arange(1., 6.))
tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
.. function:: normal(mean, std, size, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the means and standard deviations are shared
among all drawn elements. The resulting tensor has size given by :attr:`size`.
Args:
mean (float): the mean for all distributions
std (float): the standard deviation for all distributions
size (int...): a sequence of integers defining the shape of the output tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.normal(2, 3, size=(1, 4))
tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
"""
...
@overload
def normal(mean: _float, std: _float, size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
normal(mean, std, *, generator=None, out=None) -> Tensor
Returns a tensor of random numbers drawn from separate normal distributions
whose mean and standard deviation are given.
The :attr:`mean` is a tensor with the mean of
each output element's normal distribution
The :attr:`std` is a tensor with the standard deviation of
each output element's normal distribution
The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
total number of elements in each tensor need to be the same.
.. note:: When the shapes do not match, the shape of :attr:`mean`
is used as the shape for the returned output tensor
.. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
its device with the CPU.
Args:
mean (Tensor): the tensor of per-element means
std (Tensor): the tensor of per-element standard deviations
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
Example::
>>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
8.0505, 8.1408, 9.0563, 10.0566])
.. function:: normal(mean=0.0, std, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the means are shared among all drawn
elements.
Args:
mean (float, optional): the mean for all distributions
std (Tensor): the tensor of per-element standard deviations
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
.. function:: normal(mean, std=1.0, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the standard deviations are shared among
all drawn elements.
Args:
mean (Tensor): the tensor of per-element means
std (float, optional): the standard deviation for all distributions
Keyword args:
out (Tensor, optional): the output tensor
Example::
>>> torch.normal(mean=torch.arange(1., 6.))
tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
.. function:: normal(mean, std, size, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the means and standard deviations are shared
among all drawn elements. The resulting tensor has size given by :attr:`size`.
Args:
mean (float): the mean for all distributions
std (float): the standard deviation for all distributions
size (int...): a sequence of integers defining the shape of the output tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.normal(2, 3, size=(1, 4))
tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
"""
...
@overload
def not_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
not_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.ne`.
"""
...
@overload
def not_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
not_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.ne`.
"""
...
@overload
def nuclear_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
@overload
def nuclear_norm(input: Tensor, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
def numel(self: Tensor) -> _int:
r"""
numel(input) -> int
Returns the total number of elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor.
Example::
>>> a = torch.randn(1, 2, 3, 4, 5)
>>> torch.numel(a)
120
>>> a = torch.zeros(4,4)
>>> torch.numel(a)
16
"""
...
@overload
def ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `1`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.ones(2, 3)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> torch.ones(5)
tensor([ 1., 1., 1., 1., 1.])
"""
...
@overload
def ones(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `1`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.ones(2, 3)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> torch.ones(5)
tensor([ 1., 1., 1., 1., 1.])
"""
...
@overload
def ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `1`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.ones(2, 3)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> torch.ones(5)
tensor([ 1., 1., 1., 1., 1.])
"""
...
@overload
def ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `1`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword arguments:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.ones(2, 3)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> torch.ones(5)
tensor([ 1., 1., 1., 1., 1.])
"""
...
def ones_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor filled with the scalar value `1`, with the same size as
:attr:`input`. ``torch.ones_like(input)`` is equivalent to
``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
.. warning::
As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
the old ``torch.ones_like(input, out=output)`` is equivalent to
``torch.ones(input.size(), out=output)``.
Args:
input (Tensor): the size of :attr:`input` will determine size of the output tensor.
Keyword arguments:
dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
Default: if ``None``, defaults to the dtype of :attr:`input`.
layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
Default: if ``None``, defaults to the layout of :attr:`input`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, defaults to the device of :attr:`input`.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
Example::
>>> input = torch.empty(2, 3)
>>> torch.ones_like(input)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.]])
"""
...
def orgqr(input: Tensor, input2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
orgqr(input, tau) -> Tensor
Alias for :func:`torch.linalg.householder_product`.
"""
...
def ormqr(input: Tensor, input2: Tensor, input3: Tensor, left: _bool = True, transpose: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor
Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix.
Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`,
where `Q` is represented using Householder reflectors `(input, tau)`.
See `Representation of Orthogonal or Unitary Matrices`_ for further details.
If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`.
When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`.
It has size :math:`n \times n` otherwise.
If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions.
.. seealso::
:func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q`
from the QR decomposition.
.. note::
This function supports backward but it is only fast when ``(input, tau)`` do not require gradients
and/or ``tau.size(-1)`` is very small.
``
Args:
input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions
and `mn` equals to `m` or `n` depending on the :attr:`left`.
tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions.
other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
left (bool): controls the order of multiplication.
transpose (bool): controls whether the matrix `Q` is conjugate transposed or not.
Keyword args:
out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`.
.. _Representation of Orthogonal or Unitary Matrices:
https://www.netlib.org/lapack/lug/node128.html
"""
...
def outer(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
outer(input, vec2, *, out=None) -> Tensor
Outer product of :attr:`input` and :attr:`vec2`.
If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of
size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
Args:
input (Tensor): 1-D input vector
vec2 (Tensor): 1-D input vector
Keyword args:
out (Tensor, optional): optional output matrix
Example::
>>> v1 = torch.arange(1., 5.)
>>> v2 = torch.arange(1., 4.)
>>> torch.outer(v1, v2)
tensor([[ 1., 2., 3.],
[ 2., 4., 6.],
[ 3., 6., 9.],
[ 4., 8., 12.]])
"""
...
def pairwise_distance(x1: Tensor, x2: Tensor, p: _float = 2, eps: _float = 1e-06, keepdim: _bool = False) -> Tensor: ...
def pdist(input: Tensor, p: _float = 2) -> Tensor: ...
def permute(input: Tensor, dims: _size) -> Tensor:
r"""
permute(input, dims) -> Tensor
Returns a view of the original tensor :attr:`input` with its dimensions permuted.
Args:
input (Tensor): the input tensor.
dims (tuple of int): The desired ordering of dimensions
Example:
>>> x = torch.randn(2, 3, 5)
>>> x.size()
torch.Size([2, 3, 5])
>>> torch.permute(x, (2, 0, 1)).size()
torch.Size([5, 2, 3])
"""
...
def permute_copy(input: Tensor, dims: _size, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.permute`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def pinverse(input: Tensor, rcond: _float = 1e-15) -> Tensor:
r"""
pinverse(input, rcond=1e-15) -> Tensor
Alias for :func:`torch.linalg.pinv`
"""
...
def pixel_shuffle(input: Tensor, upscale_factor: _int) -> Tensor: ...
def pixel_unshuffle(input: Tensor, downscale_factor: _int) -> Tensor: ...
def poisson(input: Tensor, generator: Optional[Generator] = None) -> Tensor:
r"""
poisson(input, generator=None) -> Tensor
Returns a tensor of the same size as :attr:`input` with each element
sampled from a Poisson distribution with rate parameter given by the corresponding
element in :attr:`input` i.e.,
.. math::
\text{out}_i \sim \text{Poisson}(\text{input}_i)
:attr:`input` must be non-negative.
Args:
input (Tensor): the input tensor containing the rates of the Poisson distribution
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
Example::
>>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5
>>> torch.poisson(rates)
tensor([[9., 1., 3., 5.],
[8., 6., 6., 0.],
[0., 4., 5., 3.],
[2., 1., 4., 2.]])
"""
...
def poisson_nll_loss(input: Tensor, target: Tensor, log_input: _bool, full: _bool, eps: _float, reduction: _int) -> Tensor: ...
def polar(abs: Tensor, angle: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
polar(abs, angle, *, out=None) -> Tensor
Constructs a complex tensor whose elements are Cartesian coordinates
corresponding to the polar coordinates with absolute value :attr:`abs` and angle
:attr:`angle`.
.. math::
\text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j
.. note::
`torch.polar` is similar to
`std::polar <https://en.cppreference.com/w/cpp/numeric/complex/polar>`_
and does not compute the polar decomposition
of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do.
The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is
infinite.
Args:
abs (Tensor): The absolute value the complex tensor. Must be float or double.
angle (Tensor): The angle of the complex tensor. Must be same dtype as
:attr:`abs`.
Keyword args:
out (Tensor): If the inputs are ``torch.float32``, must be
``torch.complex64``. If the inputs are ``torch.float64``, must be
``torch.complex128``.
Example::
>>> import numpy as np
>>> abs = torch.tensor([1, 2], dtype=torch.float64)
>>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64)
>>> z = torch.polar(abs, angle)
>>> z
tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128)
"""
...
def polygamma(n: _int, input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
polygamma(n, input, *, out=None) -> Tensor
Alias for :func:`torch.special.polygamma`.
"""
...
def positive(input: Tensor) -> Tensor:
r"""
positive(input) -> Tensor
Returns :attr:`input`.
Throws a runtime error if :attr:`input` is a bool tensor.
Args:
input (Tensor): the input tensor.
Example::
>>> t = torch.randn(5)
>>> t
tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
>>> torch.positive(t)
tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
"""
...
@overload
def pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
pow(input, exponent, *, out=None) -> Tensor
Takes the power of each element in :attr:`input` with :attr:`exponent` and
returns a tensor with the result.
:attr:`exponent` can be either a single ``float`` number or a `Tensor`
with the same number of elements as :attr:`input`.
When :attr:`exponent` is a scalar value, the operation applied is:
.. math::
\text{out}_i = x_i ^ \text{exponent}
When :attr:`exponent` is a tensor, the operation applied is:
.. math::
\text{out}_i = x_i ^ {\text{exponent}_i}
When :attr:`exponent` is a tensor, the shapes of :attr:`input`
and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the input tensor.
exponent (float or tensor): the exponent value
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
>>> torch.pow(a, 2)
tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
>>> exp = torch.arange(1., 5.)
>>> a = torch.arange(1., 5.)
>>> a
tensor([ 1., 2., 3., 4.])
>>> exp
tensor([ 1., 2., 3., 4.])
>>> torch.pow(a, exp)
tensor([ 1., 4., 27., 256.])
.. function:: pow(self, exponent, *, out=None) -> Tensor
:noindex:
:attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
The operation applied is:
.. math::
\text{out}_i = \text{self} ^ {\text{exponent}_i}
Args:
self (float): the scalar base value for the power operation
exponent (Tensor): the exponent tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> exp = torch.arange(1., 5.)
>>> base = 2
>>> torch.pow(base, exp)
tensor([ 2., 4., 8., 16.])
"""
...
@overload
def pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
pow(input, exponent, *, out=None) -> Tensor
Takes the power of each element in :attr:`input` with :attr:`exponent` and
returns a tensor with the result.
:attr:`exponent` can be either a single ``float`` number or a `Tensor`
with the same number of elements as :attr:`input`.
When :attr:`exponent` is a scalar value, the operation applied is:
.. math::
\text{out}_i = x_i ^ \text{exponent}
When :attr:`exponent` is a tensor, the operation applied is:
.. math::
\text{out}_i = x_i ^ {\text{exponent}_i}
When :attr:`exponent` is a tensor, the shapes of :attr:`input`
and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the input tensor.
exponent (float or tensor): the exponent value
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
>>> torch.pow(a, 2)
tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
>>> exp = torch.arange(1., 5.)
>>> a = torch.arange(1., 5.)
>>> a
tensor([ 1., 2., 3., 4.])
>>> exp
tensor([ 1., 2., 3., 4.])
>>> torch.pow(a, exp)
tensor([ 1., 4., 27., 256.])
.. function:: pow(self, exponent, *, out=None) -> Tensor
:noindex:
:attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
The operation applied is:
.. math::
\text{out}_i = \text{self} ^ {\text{exponent}_i}
Args:
self (float): the scalar base value for the power operation
exponent (Tensor): the exponent tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> exp = torch.arange(1., 5.)
>>> base = 2
>>> torch.pow(base, exp)
tensor([ 2., 4., 8., 16.])
"""
...
@overload
def pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
pow(input, exponent, *, out=None) -> Tensor
Takes the power of each element in :attr:`input` with :attr:`exponent` and
returns a tensor with the result.
:attr:`exponent` can be either a single ``float`` number or a `Tensor`
with the same number of elements as :attr:`input`.
When :attr:`exponent` is a scalar value, the operation applied is:
.. math::
\text{out}_i = x_i ^ \text{exponent}
When :attr:`exponent` is a tensor, the operation applied is:
.. math::
\text{out}_i = x_i ^ {\text{exponent}_i}
When :attr:`exponent` is a tensor, the shapes of :attr:`input`
and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the input tensor.
exponent (float or tensor): the exponent value
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
>>> torch.pow(a, 2)
tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
>>> exp = torch.arange(1., 5.)
>>> a = torch.arange(1., 5.)
>>> a
tensor([ 1., 2., 3., 4.])
>>> exp
tensor([ 1., 2., 3., 4.])
>>> torch.pow(a, exp)
tensor([ 1., 4., 27., 256.])
.. function:: pow(self, exponent, *, out=None) -> Tensor
:noindex:
:attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
The operation applied is:
.. math::
\text{out}_i = \text{self} ^ {\text{exponent}_i}
Args:
self (float): the scalar base value for the power operation
exponent (Tensor): the exponent tensor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> exp = torch.arange(1., 5.)
>>> base = 2
>>> torch.pow(base, exp)
tensor([ 2., 4., 8., 16.])
"""
...
def prelu(input: Tensor, weight: Tensor) -> Tensor: ...
@overload
def prod(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor:
r"""
prod(input, *, dtype=None) -> Tensor
Returns the product of all elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[-0.8020, 0.5428, -1.5854]])
>>> torch.prod(a)
tensor(0.6902)
.. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
:noindex:
Returns the product of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensor having 1 fewer dimension than :attr:`input`.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(4, 2)
>>> a
tensor([[ 0.5261, -0.3837],
[ 1.1857, -0.2498],
[-1.1646, 0.0705],
[ 1.1131, -1.0629]])
>>> torch.prod(a, 1)
tensor([-0.2018, -0.2962, -0.0821, -1.1831])
"""
...
@overload
def prod(input: Tensor, dim: _int, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
prod(input, *, dtype=None) -> Tensor
Returns the product of all elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[-0.8020, 0.5428, -1.5854]])
>>> torch.prod(a)
tensor(0.6902)
.. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
:noindex:
Returns the product of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensor having 1 fewer dimension than :attr:`input`.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(4, 2)
>>> a
tensor([[ 0.5261, -0.3837],
[ 1.1857, -0.2498],
[-1.1646, 0.0705],
[ 1.1131, -1.0629]])
>>> torch.prod(a, 1)
tensor([-0.2018, -0.2962, -0.0821, -1.1831])
"""
...
@overload
def prod(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
prod(input, *, dtype=None) -> Tensor
Returns the product of all elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[-0.8020, 0.5428, -1.5854]])
>>> torch.prod(a)
tensor(0.6902)
.. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
:noindex:
Returns the product of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensor having 1 fewer dimension than :attr:`input`.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(4, 2)
>>> a
tensor([[ 0.5261, -0.3837],
[ 1.1857, -0.2498],
[-1.1646, 0.0705],
[ 1.1131, -1.0629]])
>>> torch.prod(a, 1)
tensor([-0.2018, -0.2962, -0.0821, -1.1831])
"""
...
def promote_types(type1: _dtype, type2: _dtype) -> _dtype:
r"""
promote_types(type1, type2) -> dtype
Returns the :class:`torch.dtype` with the smallest size and scalar kind that is
not smaller nor of lower kind than either `type1` or `type2`. See type promotion
:ref:`documentation <type-promotion-doc>` for more information on the type
promotion logic.
Args:
type1 (:class:`torch.dtype`)
type2 (:class:`torch.dtype`)
Example::
>>> torch.promote_types(torch.int32, torch.float32)
torch.float32
>>> torch.promote_types(torch.uint8, torch.long)
torch.long
"""
...
def put(input: Tensor, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor: ...
def q_per_channel_axis(input: Tensor) -> _int: ...
def q_per_channel_scales(input: Tensor) -> Tensor: ...
def q_per_channel_zero_points(input: Tensor) -> Tensor: ...
def q_scale(input: Tensor) -> _float: ...
def q_zero_point(input: Tensor) -> _int: ...
def qr(input: Tensor, some: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.qr:
r"""
qr(input, some=True, *, out=None) -> (Tensor, Tensor)
Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`,
and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R`
with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and
:math:`R` being an upper triangular matrix or batch of upper triangular matrices.
If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization.
Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization.
.. warning::
:func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr`
and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been
replaced with a string parameter :attr:`mode`.
``Q, R = torch.qr(A)`` should be replaced with
.. code:: python
Q, R = torch.linalg.qr(A)
``Q, R = torch.qr(A, some=False)`` should be replaced with
.. code:: python
Q, R = torch.linalg.qr(A, mode="complete")
.. warning::
If you plan to backpropagate through QR, note that the current backward implementation
is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))`
columns of :attr:`input` are linearly independent.
This behavior will probably change once QR supports pivoting.
.. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs,
and may produce different (valid) decompositions on different device types
or different platforms.
Args:
input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more
batch dimensions consisting of matrices of dimension :math:`m \times n`.
some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for
complete QR decomposition. If `k = min(m, n)` then:
* ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default)
* ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n)
Keyword args:
out (tuple, optional): tuple of `Q` and `R` tensors.
The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above.
Example::
>>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> q, r = torch.qr(a)
>>> q
tensor([[-0.8571, 0.3943, 0.3314],
[-0.4286, -0.9029, -0.0343],
[ 0.2857, -0.1714, 0.9429]])
>>> r
tensor([[ -14.0000, -21.0000, 14.0000],
[ 0.0000, -175.0000, 70.0000],
[ 0.0000, 0.0000, -35.0000]])
>>> torch.mm(q, r).round()
tensor([[ 12., -51., 4.],
[ 6., 167., -68.],
[ -4., 24., -41.]])
>>> torch.mm(q.t(), q).round()
tensor([[ 1., 0., 0.],
[ 0., 1., -0.],
[ 0., -0., 1.]])
>>> a = torch.randn(3, 4, 5)
>>> q, r = torch.qr(a, some=False)
>>> torch.allclose(torch.matmul(q, r), a)
True
>>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5))
True
"""
...
@overload
def quantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor:
r"""
quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
indices ``i`` and ``j`` in the sorted order, result is computed according to the given
:attr:`interpolation` method as follows:
- ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
- ``lower``: ``a``.
- ``higher``: ``b``.
- ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
- ``midpoint``: ``(a + b) / 2``.
If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
.. note::
By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation.
Args:
input (Tensor): the input tensor.
q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1].
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword arguments:
interpolation (str): interpolation method to use when the desired quantile lies between two data points.
Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
Default is ``linear``.
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(2, 3)
>>> a
tensor([[ 0.0795, -1.2117, 0.9765],
[ 1.1707, 0.6706, 0.4884]])
>>> q = torch.tensor([0.25, 0.5, 0.75])
>>> torch.quantile(a, q, dim=1, keepdim=True)
tensor([[[-0.5661],
[ 0.5795]],
[[ 0.0795],
[ 0.6706]],
[[ 0.5280],
[ 0.9206]]])
>>> torch.quantile(a, q, dim=1, keepdim=True).shape
torch.Size([3, 2, 1])
>>> a = torch.arange(4.)
>>> a
tensor([0., 1., 2., 3.])
>>> torch.quantile(a, 0.6, interpolation='linear')
tensor(1.8000)
>>> torch.quantile(a, 0.6, interpolation='lower')
tensor(1.)
>>> torch.quantile(a, 0.6, interpolation='higher')
tensor(2.)
>>> torch.quantile(a, 0.6, interpolation='midpoint')
tensor(1.5000)
>>> torch.quantile(a, 0.6, interpolation='nearest')
tensor(2.)
>>> torch.quantile(a, 0.4, interpolation='nearest')
tensor(1.)
"""
...
@overload
def quantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor:
r"""
quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
indices ``i`` and ``j`` in the sorted order, result is computed according to the given
:attr:`interpolation` method as follows:
- ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
- ``lower``: ``a``.
- ``higher``: ``b``.
- ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
- ``midpoint``: ``(a + b) / 2``.
If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
.. note::
By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation.
Args:
input (Tensor): the input tensor.
q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1].
dim (int): the dimension to reduce.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword arguments:
interpolation (str): interpolation method to use when the desired quantile lies between two data points.
Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
Default is ``linear``.
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(2, 3)
>>> a
tensor([[ 0.0795, -1.2117, 0.9765],
[ 1.1707, 0.6706, 0.4884]])
>>> q = torch.tensor([0.25, 0.5, 0.75])
>>> torch.quantile(a, q, dim=1, keepdim=True)
tensor([[[-0.5661],
[ 0.5795]],
[[ 0.0795],
[ 0.6706]],
[[ 0.5280],
[ 0.9206]]])
>>> torch.quantile(a, q, dim=1, keepdim=True).shape
torch.Size([3, 2, 1])
>>> a = torch.arange(4.)
>>> a
tensor([0., 1., 2., 3.])
>>> torch.quantile(a, 0.6, interpolation='linear')
tensor(1.8000)
>>> torch.quantile(a, 0.6, interpolation='lower')
tensor(1.)
>>> torch.quantile(a, 0.6, interpolation='higher')
tensor(2.)
>>> torch.quantile(a, 0.6, interpolation='midpoint')
tensor(1.5000)
>>> torch.quantile(a, 0.6, interpolation='nearest')
tensor(2.)
>>> torch.quantile(a, 0.4, interpolation='nearest')
tensor(1.)
"""
...
def quantize_per_channel(input: Tensor, scales: Tensor, zero_points: Tensor, axis: _int, dtype: _dtype) -> Tensor:
r"""
quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor
Converts a float tensor to a per-channel quantized tensor with given scales and zero points.
Arguments:
input (Tensor): float tensor to quantize
scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)``
zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)``
axis (int): dimension on which apply per-channel quantization
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
Returns:
Tensor: A newly quantized tensor
Example::
>>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
>>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
tensor([[-1., 0.],
[ 1., 2.]], size=(2, 2), dtype=torch.quint8,
quantization_scheme=torch.per_channel_affine,
scale=tensor([0.1000, 0.0100], dtype=torch.float64),
zero_point=tensor([10, 0]), axis=0)
>>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr()
tensor([[ 0, 10],
[100, 200]], dtype=torch.uint8)
"""
...
@overload
def quantize_per_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, dtype: _dtype) -> Tensor:
r"""
quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
Converts a float tensor to a quantized tensor with given scale and zero point.
Arguments:
input (Tensor): float tensor or list of tensors to quantize
scale (float or Tensor): scale to apply in quantization formula
zero_point (int or Tensor): offset in integer value that maps to float zero
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
Returns:
Tensor: A newly quantized tensor or list of quantized tensors.
Example::
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
tensor([ 0, 10, 20, 30], dtype=torch.uint8)
>>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
>>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
(tensor([-1., 0.], size=(2,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
tensor([-2., 2.], size=(2,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
"""
...
@overload
def quantize_per_tensor(input: Tensor, scale: _float, zero_point: _int, dtype: _dtype) -> Tensor:
r"""
quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
Converts a float tensor to a quantized tensor with given scale and zero point.
Arguments:
input (Tensor): float tensor or list of tensors to quantize
scale (float or Tensor): scale to apply in quantization formula
zero_point (int or Tensor): offset in integer value that maps to float zero
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
Returns:
Tensor: A newly quantized tensor or list of quantized tensors.
Example::
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
tensor([ 0, 10, 20, 30], dtype=torch.uint8)
>>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
>>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
(tensor([-1., 0.], size=(2,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
tensor([-2., 2.], size=(2,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
"""
...
@overload
def quantize_per_tensor(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scales: Tensor, zero_points: Tensor, dtype: _dtype) -> Tuple[Tensor, ...]:
r"""
quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
Converts a float tensor to a quantized tensor with given scale and zero point.
Arguments:
input (Tensor): float tensor or list of tensors to quantize
scale (float or Tensor): scale to apply in quantization formula
zero_point (int or Tensor): offset in integer value that maps to float zero
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
Returns:
Tensor: A newly quantized tensor or list of quantized tensors.
Example::
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
tensor([ 0, 10, 20, 30], dtype=torch.uint8)
>>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
>>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
(tensor([-1., 0.], size=(2,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
tensor([-2., 2.], size=(2,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
"""
...
def quantize_per_tensor_dynamic(input: Tensor, dtype: _dtype, reduce_range: _bool) -> Tensor:
r"""
quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor
Converts a float tensor to a quantized tensor with scale and zero_point calculated
dynamically based on the input.
Arguments:
input (Tensor): float tensor or list of tensors to quantize
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``
reduce_range (bool): a flag to indicate whether to reduce the range of quantized
data by 1 bit, it's required to avoid instruction overflow for some hardwares
Returns:
Tensor: A newly (dynamically) quantized tensor
Example::
>>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False)
>>> print(t)
tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941,
zero_point=85)
>>> t.int_repr()
tensor([ 0, 85, 170, 255], dtype=torch.uint8)
"""
...
def quantized_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, var: Tensor, eps: _float, output_scale: _float, output_zero_point: _int) -> Tensor:
r"""
quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor
Applies batch normalization on a 4D (NCHW) quantized tensor.
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
Arguments:
input (Tensor): quantized tensor
weight (Tensor): float tensor that corresponds to the gamma, size C
bias (Tensor): float tensor that corresponds to the beta, size C
mean (Tensor): float mean value in batch normalization, size C
var (Tensor): float tensor for variance, size C
eps (float): a value added to the denominator for numerical stability.
output_scale (float): output quantized tensor scale
output_zero_point (int): output quantized tensor zero_point
Returns:
Tensor: A quantized tensor with batch normalization applied.
Example::
>>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
>>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2)
tensor([[[[-0.2000, -0.2000],
[ 1.6000, -0.2000]],
[[-0.4000, -0.4000],
[-0.4000, 0.6000]]],
[[[-0.2000, -0.2000],
[-0.2000, -0.2000]],
[[ 0.6000, -0.4000],
[ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2)
"""
...
def quantized_gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ...
def quantized_lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tuple[Tensor, Tensor]: ...
def quantized_max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor:
r"""
quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
Applies a 1D max pooling over an input quantized tensor composed of several input planes.
Arguments:
input (Tensor): quantized tensor
kernel_size (list of int): the size of the sliding window
stride (``list of int``, optional): the stride of the sliding window
padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
Defaults to False.
Returns:
Tensor: A quantized tensor with max_pool1d applied.
Example::
>>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8)
>>> torch.quantized_max_pool1d(qx, [2])
tensor([[0.0000],
[1.5000]], size=(2, 1), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
"""
...
def quantized_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor:
r"""
quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
Applies a 2D max pooling over an input quantized tensor composed of several input planes.
Arguments:
input (Tensor): quantized tensor
kernel_size (``list of int``): the size of the sliding window
stride (``list of int``, optional): the stride of the sliding window
padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
Defaults to False.
Returns:
Tensor: A quantized tensor with max_pool2d applied.
Example::
>>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
>>> torch.quantized_max_pool2d(qx, [2,2])
tensor([[[[1.5000]],
[[1.5000]]],
[[[0.0000]],
[[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
"""
...
def quantized_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
def quantized_rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ...
def quantized_rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ...
def rad2deg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
rad2deg(input, *, out=None) -> Tensor
Returns a new tensor with each of the elements of :attr:`input`
converted from angles in radians to degrees.
Args:
input (Tensor): the input tensor.
Keyword arguments:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])
>>> torch.rad2deg(a)
tensor([[ 180.0233, -180.0233],
[ 359.9894, -359.9894],
[ 89.9544, -89.9544]])
"""
...
def rad2deg_(input: Tensor) -> Tensor: ...
@overload
def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.rand(4)
tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
>>> torch.rand(2, 3)
tensor([[ 0.8237, 0.5781, 0.6879],
[ 0.3816, 0.7249, 0.0998]])
"""
...
@overload
def rand(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.rand(4)
tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
>>> torch.rand(2, 3)
tensor([[ 0.8237, 0.5781, 0.6879],
[ 0.3816, 0.7249, 0.0998]])
"""
...
@overload
def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.rand(4)
tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
>>> torch.rand(2, 3)
tensor([[ 0.8237, 0.5781, 0.6879],
[ 0.3816, 0.7249, 0.0998]])
"""
...
@overload
def rand(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.rand(4)
tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
>>> torch.rand(2, 3)
tensor([[ 0.8237, 0.5781, 0.6879],
[ 0.3816, 0.7249, 0.0998]])
"""
...
@overload
def rand(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.rand(4)
tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
>>> torch.rand(2, 3)
tensor([[ 0.8237, 0.5781, 0.6879],
[ 0.3816, 0.7249, 0.0998]])
"""
...
@overload
def rand(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.rand(4)
tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
>>> torch.rand(2, 3)
tensor([[ 0.8237, 0.5781, 0.6879],
[ 0.3816, 0.7249, 0.0998]])
"""
...
@overload
def rand(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.rand(4)
tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
>>> torch.rand(2, 3)
tensor([[ 0.8237, 0.5781, 0.6879],
[ 0.3816, 0.7249, 0.0998]])
"""
...
@overload
def rand(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.rand(4)
tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
>>> torch.rand(2, 3)
tensor([[ 0.8237, 0.5781, 0.6879],
[ 0.3816, 0.7249, 0.0998]])
"""
...
def rand_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same size as :attr:`input` that is filled with
random numbers from a uniform distribution on the interval :math:`[0, 1)`.
``torch.rand_like(input)`` is equivalent to
``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
input (Tensor): the size of :attr:`input` will determine size of the output tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
Default: if ``None``, defaults to the dtype of :attr:`input`.
layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
Default: if ``None``, defaults to the layout of :attr:`input`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, defaults to the device of :attr:`input`.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
"""
...
@overload
def randint(low: _int, high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
r"""
randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random integers generated uniformly
between :attr:`low` (inclusive) and :attr:`high` (exclusive).
The shape of the tensor is defined by the variable argument :attr:`size`.
.. note::
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
size (tuple): a tuple defining the shape of the output tensor.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
this function returns a tensor with dtype ``torch.int64``.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.randint(3, 5, (3,))
tensor([4, 3, 4])
>>> torch.randint(10, (2, 2))
tensor([[0, 2],
[5, 5]])
>>> torch.randint(3, 10, (2, 2))
tensor([[4, 5],
[6, 7]])
"""
...
@overload
def randint(high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
r"""
randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random integers generated uniformly
between :attr:`low` (inclusive) and :attr:`high` (exclusive).
The shape of the tensor is defined by the variable argument :attr:`size`.
.. note::
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
size (tuple): a tuple defining the shape of the output tensor.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
this function returns a tensor with dtype ``torch.int64``.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.randint(3, 5, (3,))
tensor([4, 3, 4])
>>> torch.randint(10, (2, 2))
tensor([[0, 2],
[5, 5]])
>>> torch.randint(3, 10, (2, 2))
tensor([[4, 5],
[6, 7]])
"""
...
@overload
def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random integers generated uniformly
between :attr:`low` (inclusive) and :attr:`high` (exclusive).
The shape of the tensor is defined by the variable argument :attr:`size`.
.. note::
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
size (tuple): a tuple defining the shape of the output tensor.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
this function returns a tensor with dtype ``torch.int64``.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.randint(3, 5, (3,))
tensor([4, 3, 4])
>>> torch.randint(10, (2, 2))
tensor([[0, 2],
[5, 5]])
>>> torch.randint(3, 10, (2, 2))
tensor([[4, 5],
[6, 7]])
"""
...
@overload
def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random integers generated uniformly
between :attr:`low` (inclusive) and :attr:`high` (exclusive).
The shape of the tensor is defined by the variable argument :attr:`size`.
.. note::
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
size (tuple): a tuple defining the shape of the output tensor.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
this function returns a tensor with dtype ``torch.int64``.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.randint(3, 5, (3,))
tensor([4, 3, 4])
>>> torch.randint(10, (2, 2))
tensor([[0, 2],
[5, 5]])
>>> torch.randint(3, 10, (2, 2))
tensor([[4, 5],
[6, 7]])
"""
...
@overload
def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random integers generated uniformly
between :attr:`low` (inclusive) and :attr:`high` (exclusive).
The shape of the tensor is defined by the variable argument :attr:`size`.
.. note::
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
size (tuple): a tuple defining the shape of the output tensor.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
this function returns a tensor with dtype ``torch.int64``.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.randint(3, 5, (3,))
tensor([4, 3, 4])
>>> torch.randint(10, (2, 2))
tensor([[0, 2],
[5, 5]])
>>> torch.randint(3, 10, (2, 2))
tensor([[4, 5],
[6, 7]])
"""
...
@overload
def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random integers generated uniformly
between :attr:`low` (inclusive) and :attr:`high` (exclusive).
The shape of the tensor is defined by the variable argument :attr:`size`.
.. note::
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
size (tuple): a tuple defining the shape of the output tensor.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
this function returns a tensor with dtype ``torch.int64``.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.randint(3, 5, (3,))
tensor([4, 3, 4])
>>> torch.randint(10, (2, 2))
tensor([[0, 2],
[5, 5]])
>>> torch.randint(3, 10, (2, 2))
tensor([[4, 5],
[6, 7]])
"""
...
@overload
def randint_like(input: Tensor, high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randint_like(input, low=0, high, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same shape as Tensor :attr:`input` filled with
random integers generated uniformly between :attr:`low` (inclusive) and
:attr:`high` (exclusive).
.. note:
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
input (Tensor): the size of :attr:`input` will determine size of the output tensor.
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
Default: if ``None``, defaults to the dtype of :attr:`input`.
layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
Default: if ``None``, defaults to the layout of :attr:`input`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, defaults to the device of :attr:`input`.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
"""
...
@overload
def randint_like(input: Tensor, low: Union[_int, SymInt], high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randint_like(input, low=0, high, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same shape as Tensor :attr:`input` filled with
random integers generated uniformly between :attr:`low` (inclusive) and
:attr:`high` (exclusive).
.. note:
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
input (Tensor): the size of :attr:`input` will determine size of the output tensor.
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
Default: if ``None``, defaults to the dtype of :attr:`input`.
layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
Default: if ``None``, defaults to the layout of :attr:`input`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, defaults to the device of :attr:`input`.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
"""
...
@overload
def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a normal distribution
with mean `0` and variance `1` (also called the standard normal
distribution).
.. math::
\text{out}_{i} \sim \mathcal{N}(0, 1)
For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
unit variance as
.. math::
\text{out}_{i} \sim \mathcal{CN}(0, 1)
This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
:math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
.. math::
\operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
\operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.randn(4)
tensor([-2.1436, 0.9966, 2.3426, -0.6366])
>>> torch.randn(2, 3)
tensor([[ 1.5954, 2.8929, -1.0923],
[ 1.1719, -0.4709, -0.1996]])
.. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
"""
...
@overload
def randn(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a normal distribution
with mean `0` and variance `1` (also called the standard normal
distribution).
.. math::
\text{out}_{i} \sim \mathcal{N}(0, 1)
For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
unit variance as
.. math::
\text{out}_{i} \sim \mathcal{CN}(0, 1)
This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
:math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
.. math::
\operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
\operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.randn(4)
tensor([-2.1436, 0.9966, 2.3426, -0.6366])
>>> torch.randn(2, 3)
tensor([[ 1.5954, 2.8929, -1.0923],
[ 1.1719, -0.4709, -0.1996]])
.. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
"""
...
@overload
def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a normal distribution
with mean `0` and variance `1` (also called the standard normal
distribution).
.. math::
\text{out}_{i} \sim \mathcal{N}(0, 1)
For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
unit variance as
.. math::
\text{out}_{i} \sim \mathcal{CN}(0, 1)
This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
:math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
.. math::
\operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
\operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.randn(4)
tensor([-2.1436, 0.9966, 2.3426, -0.6366])
>>> torch.randn(2, 3)
tensor([[ 1.5954, 2.8929, -1.0923],
[ 1.1719, -0.4709, -0.1996]])
.. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
"""
...
@overload
def randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a normal distribution
with mean `0` and variance `1` (also called the standard normal
distribution).
.. math::
\text{out}_{i} \sim \mathcal{N}(0, 1)
For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
unit variance as
.. math::
\text{out}_{i} \sim \mathcal{CN}(0, 1)
This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
:math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
.. math::
\operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
\operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.randn(4)
tensor([-2.1436, 0.9966, 2.3426, -0.6366])
>>> torch.randn(2, 3)
tensor([[ 1.5954, 2.8929, -1.0923],
[ 1.1719, -0.4709, -0.1996]])
.. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
"""
...
@overload
def randn(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a normal distribution
with mean `0` and variance `1` (also called the standard normal
distribution).
.. math::
\text{out}_{i} \sim \mathcal{N}(0, 1)
For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
unit variance as
.. math::
\text{out}_{i} \sim \mathcal{CN}(0, 1)
This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
:math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
.. math::
\operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
\operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.randn(4)
tensor([-2.1436, 0.9966, 2.3426, -0.6366])
>>> torch.randn(2, 3)
tensor([[ 1.5954, 2.8929, -1.0923],
[ 1.1719, -0.4709, -0.1996]])
.. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
"""
...
@overload
def randn(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a normal distribution
with mean `0` and variance `1` (also called the standard normal
distribution).
.. math::
\text{out}_{i} \sim \mathcal{N}(0, 1)
For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
unit variance as
.. math::
\text{out}_{i} \sim \mathcal{CN}(0, 1)
This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
:math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
.. math::
\operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
\operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.randn(4)
tensor([-2.1436, 0.9966, 2.3426, -0.6366])
>>> torch.randn(2, 3)
tensor([[ 1.5954, 2.8929, -1.0923],
[ 1.1719, -0.4709, -0.1996]])
.. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
"""
...
@overload
def randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a normal distribution
with mean `0` and variance `1` (also called the standard normal
distribution).
.. math::
\text{out}_{i} \sim \mathcal{N}(0, 1)
For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
unit variance as
.. math::
\text{out}_{i} \sim \mathcal{CN}(0, 1)
This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
:math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
.. math::
\operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
\operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.randn(4)
tensor([-2.1436, 0.9966, 2.3426, -0.6366])
>>> torch.randn(2, 3)
tensor([[ 1.5954, 2.8929, -1.0923],
[ 1.1719, -0.4709, -0.1996]])
.. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
"""
...
@overload
def randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with random numbers from a normal distribution
with mean `0` and variance `1` (also called the standard normal
distribution).
.. math::
\text{out}_{i} \sim \mathcal{N}(0, 1)
For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
unit variance as
.. math::
\text{out}_{i} \sim \mathcal{CN}(0, 1)
This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
:math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
.. math::
\operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
\operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.randn(4)
tensor([-2.1436, 0.9966, 2.3426, -0.6366])
>>> torch.randn(2, 3)
tensor([[ 1.5954, 2.8929, -1.0923],
[ 1.1719, -0.4709, -0.1996]])
.. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
"""
...
def randn_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same size as :attr:`input` that is filled with
random numbers from a normal distribution with mean 0 and variance 1. Please refer to :func:`torch.randn` for the
sampling process of complex dtypes. ``torch.randn_like(input)`` is equivalent to
``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
input (Tensor): the size of :attr:`input` will determine size of the output tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
Default: if ``None``, defaults to the dtype of :attr:`input`.
layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
Default: if ``None``, defaults to the layout of :attr:`input`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, defaults to the device of :attr:`input`.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
"""
...
@overload
def randperm(n: Union[_int, SymInt], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a random permutation of integers from ``0`` to ``n - 1``.
Args:
n (int): the upper bound (exclusive)
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: ``torch.int64``.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.randperm(4)
tensor([2, 1, 0, 3])
"""
...
@overload
def randperm(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a random permutation of integers from ``0`` to ``n - 1``.
Args:
n (int): the upper bound (exclusive)
Keyword args:
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: ``torch.int64``.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.randperm(4)
tensor([2, 1, 0, 3])
"""
...
def range(start: Number, end: Number, step: Number = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
r"""
range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1`
with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
the gap between two values in the tensor.
.. math::
\text{out}_{i+1} = \text{out}_i + \text{step}.
.. warning::
This function is deprecated and will be removed in a future release because its behavior is inconsistent with
Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end).
Args:
start (float): the starting value for the set of points. Default: ``0``.
end (float): the ending value for the set of points
step (float): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
arguments. If any of `start`, `end`, or `stop` are floating-point, the
`dtype` is inferred to be the default dtype, see
:meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
be `torch.int64`.
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.range(1, 4)
tensor([ 1., 2., 3., 4.])
>>> torch.range(1, 4, 0.5)
tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000])
"""
...
def ravel(input: Tensor) -> Tensor:
r"""
ravel(input) -> Tensor
Return a contiguous flattened tensor. A copy is made only if needed.
Args:
input (Tensor): the input tensor.
Example::
>>> t = torch.tensor([[[1, 2],
... [3, 4]],
... [[5, 6],
... [7, 8]]])
>>> torch.ravel(t)
tensor([1, 2, 3, 4, 5, 6, 7, 8])
"""
...
def real(input: Tensor) -> Tensor:
r"""
real(input) -> Tensor
Returns a new tensor containing real values of the :attr:`self` tensor.
The returned tensor and :attr:`self` share the same underlying storage.
Args:
input (Tensor): the input tensor.
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
>>> x.real
tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
"""
...
def reciprocal(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
reciprocal(input, *, out=None) -> Tensor
Returns a new tensor with the reciprocal of the elements of :attr:`input`
.. math::
\text{out}_{i} = \frac{1}{\text{input}_{i}}
.. note::
Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral
inputs to reciprocal are automatically :ref:`promoted <type-promotion-doc>` to
the default scalar type.
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.4595, -2.1219, -1.4314, 0.7298])
>>> torch.reciprocal(a)
tensor([-2.1763, -0.4713, -0.6986, 1.3702])
"""
...
def reciprocal_(input: Tensor) -> Tensor: ...
def relu(input: Tensor) -> Tensor: ...
def relu_(input: Tensor) -> Tensor: ...
@overload
def remainder(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
remainder(input, other, *, out=None) -> Tensor
Computes
`Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
is less than that of :attr:`other`.
It may also be defined in terms of :func:`torch.div` as
.. code:: python
torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
.. note::
Complex inputs are not supported. In some cases, it is not mathematically
possible to satisfy the definition of a modulo operation with complex numbers.
See :func:`torch.fmod` for how division by zero is handled.
.. seealso::
:func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
This one is defined in terms of division rounding towards zero.
Args:
input (Tensor or Scalar): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
tensor([ 1., 0., 1., 1., 0., 1.])
>>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
"""
...
@overload
def remainder(self: Union[Number, _complex], other: Tensor) -> Tensor:
r"""
remainder(input, other, *, out=None) -> Tensor
Computes
`Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
is less than that of :attr:`other`.
It may also be defined in terms of :func:`torch.div` as
.. code:: python
torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
.. note::
Complex inputs are not supported. In some cases, it is not mathematically
possible to satisfy the definition of a modulo operation with complex numbers.
See :func:`torch.fmod` for how division by zero is handled.
.. seealso::
:func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
This one is defined in terms of division rounding towards zero.
Args:
input (Tensor or Scalar): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
tensor([ 1., 0., 1., 1., 0., 1.])
>>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
"""
...
@overload
def remainder(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
remainder(input, other, *, out=None) -> Tensor
Computes
`Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
is less than that of :attr:`other`.
It may also be defined in terms of :func:`torch.div` as
.. code:: python
torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
.. note::
Complex inputs are not supported. In some cases, it is not mathematically
possible to satisfy the definition of a modulo operation with complex numbers.
See :func:`torch.fmod` for how division by zero is handled.
.. seealso::
:func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
This one is defined in terms of division rounding towards zero.
Args:
input (Tensor or Scalar): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
tensor([ 1., 0., 1., 1., 0., 1.])
>>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
"""
...
def renorm(input: Tensor, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
renorm(input, p, dim, maxnorm, *, out=None) -> Tensor
Returns a tensor where each sub-tensor of :attr:`input` along dimension
:attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower
than the value :attr:`maxnorm`
.. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged
Args:
input (Tensor): the input tensor.
p (float): the power for the norm computation
dim (int): the dimension to slice over to get the sub-tensors
maxnorm (float): the maximum norm to keep each sub-tensor under
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> x = torch.ones(3, 3)
>>> x[1].fill_(2)
tensor([ 2., 2., 2.])
>>> x[2].fill_(3)
tensor([ 3., 3., 3.])
>>> x
tensor([[ 1., 1., 1.],
[ 2., 2., 2.],
[ 3., 3., 3.]])
>>> torch.renorm(x, 1, 0, 5)
tensor([[ 1.0000, 1.0000, 1.0000],
[ 1.6667, 1.6667, 1.6667],
[ 1.6667, 1.6667, 1.6667]])
"""
...
@overload
def repeat_interleave(input: Tensor, repeats: Tensor, dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor:
r"""
repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
Repeat elements of a tensor.
.. warning::
This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
Args:
input (Tensor): the input tensor.
repeats (Tensor or int): The number of repetitions for each element.
repeats is broadcasted to fit the shape of the given axis.
dim (int, optional): The dimension along which to repeat values.
By default, use the flattened input array, and return a flat output
array.
Keyword args:
output_size (int, optional): Total output size for the given axis
( e.g. sum of repeats). If given, it will avoid stream synchronization
needed to calculate output shape of the tensor.
Returns:
Tensor: Repeated tensor which has the same shape as input, except along the given axis.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> x.repeat_interleave(2)
tensor([1, 1, 2, 2, 3, 3])
>>> y = torch.tensor([[1, 2], [3, 4]])
>>> torch.repeat_interleave(y, 2)
tensor([1, 1, 2, 2, 3, 3, 4, 4])
>>> torch.repeat_interleave(y, 3, dim=1)
tensor([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
tensor([[1, 2],
[3, 4],
[3, 4]])
>>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
tensor([[1, 2],
[3, 4],
[3, 4]])
If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
`tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
`1` appears `n2` times, `2` appears `n3` times, etc.
.. function:: repeat_interleave(repeats, *) -> Tensor
:noindex:
Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc.
Args:
repeats (Tensor): The number of repetitions for each element.
Returns:
Tensor: Repeated tensor of size `sum(repeats)`.
Example::
>>> torch.repeat_interleave(torch.tensor([1, 2, 3]))
tensor([0, 1, 1, 2, 2, 2])
"""
...
@overload
def repeat_interleave(repeats: Tensor, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor:
r"""
repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
Repeat elements of a tensor.
.. warning::
This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
Args:
input (Tensor): the input tensor.
repeats (Tensor or int): The number of repetitions for each element.
repeats is broadcasted to fit the shape of the given axis.
dim (int, optional): The dimension along which to repeat values.
By default, use the flattened input array, and return a flat output
array.
Keyword args:
output_size (int, optional): Total output size for the given axis
( e.g. sum of repeats). If given, it will avoid stream synchronization
needed to calculate output shape of the tensor.
Returns:
Tensor: Repeated tensor which has the same shape as input, except along the given axis.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> x.repeat_interleave(2)
tensor([1, 1, 2, 2, 3, 3])
>>> y = torch.tensor([[1, 2], [3, 4]])
>>> torch.repeat_interleave(y, 2)
tensor([1, 1, 2, 2, 3, 3, 4, 4])
>>> torch.repeat_interleave(y, 3, dim=1)
tensor([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
tensor([[1, 2],
[3, 4],
[3, 4]])
>>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
tensor([[1, 2],
[3, 4],
[3, 4]])
If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
`tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
`1` appears `n2` times, `2` appears `n3` times, etc.
.. function:: repeat_interleave(repeats, *) -> Tensor
:noindex:
Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc.
Args:
repeats (Tensor): The number of repetitions for each element.
Returns:
Tensor: Repeated tensor of size `sum(repeats)`.
Example::
>>> torch.repeat_interleave(torch.tensor([1, 2, 3]))
tensor([0, 1, 1, 2, 2, 2])
"""
...
@overload
def repeat_interleave(input: Tensor, repeats: Union[_int, SymInt], dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor:
r"""
repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
Repeat elements of a tensor.
.. warning::
This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
Args:
input (Tensor): the input tensor.
repeats (Tensor or int): The number of repetitions for each element.
repeats is broadcasted to fit the shape of the given axis.
dim (int, optional): The dimension along which to repeat values.
By default, use the flattened input array, and return a flat output
array.
Keyword args:
output_size (int, optional): Total output size for the given axis
( e.g. sum of repeats). If given, it will avoid stream synchronization
needed to calculate output shape of the tensor.
Returns:
Tensor: Repeated tensor which has the same shape as input, except along the given axis.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> x.repeat_interleave(2)
tensor([1, 1, 2, 2, 3, 3])
>>> y = torch.tensor([[1, 2], [3, 4]])
>>> torch.repeat_interleave(y, 2)
tensor([1, 1, 2, 2, 3, 3, 4, 4])
>>> torch.repeat_interleave(y, 3, dim=1)
tensor([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
tensor([[1, 2],
[3, 4],
[3, 4]])
>>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
tensor([[1, 2],
[3, 4],
[3, 4]])
If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
`tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
`1` appears `n2` times, `2` appears `n3` times, etc.
.. function:: repeat_interleave(repeats, *) -> Tensor
:noindex:
Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc.
Args:
repeats (Tensor): The number of repetitions for each element.
Returns:
Tensor: Repeated tensor of size `sum(repeats)`.
Example::
>>> torch.repeat_interleave(torch.tensor([1, 2, 3]))
tensor([0, 1, 1, 2, 2, 2])
"""
...
def reshape(input: Tensor, shape: Sequence[Union[_int, SymInt]]) -> Tensor:
r"""
reshape(input, shape) -> Tensor
Returns a tensor with the same data and number of elements as :attr:`input`,
but with the specified shape. When possible, the returned tensor will be a view
of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs
with compatible strides can be reshaped without copying, but you should not
depend on the copying vs. viewing behavior.
See :meth:`torch.Tensor.view` on when it is possible to return a view.
A single dimension may be -1, in which case it's inferred from the remaining
dimensions and the number of elements in :attr:`input`.
Args:
input (Tensor): the tensor to be reshaped
shape (tuple of int): the new shape
Example::
>>> a = torch.arange(4.)
>>> torch.reshape(a, (2, 2))
tensor([[ 0., 1.],
[ 2., 3.]])
>>> b = torch.tensor([[0, 1], [2, 3]])
>>> torch.reshape(b, (-1,))
tensor([ 0, 1, 2, 3])
"""
...
def resize_as_(input: Tensor, the_template: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor: ...
def resize_as_sparse_(input: Tensor, the_template: Tensor) -> Tensor: ...
def resolve_conj(input: Tensor) -> Tensor:
r"""
resolve_conj(input) -> Tensor
Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`,
else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`.
Args:
input (Tensor): the input tensor.
Example::
>>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
>>> y = x.conj()
>>> y.is_conj()
True
>>> z = y.resolve_conj()
>>> z
tensor([-1 - 1j, -2 - 2j, 3 + 3j])
>>> z.is_conj()
False
"""
...
def resolve_neg(input: Tensor) -> Tensor:
r"""
resolve_neg(input) -> Tensor
Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`,
else returns :attr:`input`. The output tensor will always have its negative bit set to `False`.
Args:
input (Tensor): the input tensor.
Example::
>>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
>>> y = x.conj()
>>> z = y.imag
>>> z.is_neg()
True
>>> out = z.resolve_neg()
>>> out
tensor([-1., -2., 3.])
>>> out.is_neg()
False
"""
...
@overload
def result_type(tensor: Tensor, other: Tensor) -> _dtype:
r"""
result_type(tensor1, tensor2) -> dtype
Returns the :class:`torch.dtype` that would result from performing an arithmetic
operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
for more information on the type promotion logic.
Args:
tensor1 (Tensor or Number): an input tensor or number
tensor2 (Tensor or Number): an input tensor or number
Example::
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
torch.float32
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
torch.uint8
"""
...
@overload
def result_type(scalar: Union[Number, _complex], tensor: Tensor) -> _dtype:
r"""
result_type(tensor1, tensor2) -> dtype
Returns the :class:`torch.dtype` that would result from performing an arithmetic
operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
for more information on the type promotion logic.
Args:
tensor1 (Tensor or Number): an input tensor or number
tensor2 (Tensor or Number): an input tensor or number
Example::
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
torch.float32
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
torch.uint8
"""
...
@overload
def result_type(tensor: Tensor, other: Union[Number, _complex]) -> _dtype:
r"""
result_type(tensor1, tensor2) -> dtype
Returns the :class:`torch.dtype` that would result from performing an arithmetic
operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
for more information on the type promotion logic.
Args:
tensor1 (Tensor or Number): an input tensor or number
tensor2 (Tensor or Number): an input tensor or number
Example::
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
torch.float32
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
torch.uint8
"""
...
@overload
def result_type(scalar1: Union[Number, _complex], scalar2: Union[Number, _complex]) -> _dtype:
r"""
result_type(tensor1, tensor2) -> dtype
Returns the :class:`torch.dtype` that would result from performing an arithmetic
operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
for more information on the type promotion logic.
Args:
tensor1 (Tensor or Number): an input tensor or number
tensor2 (Tensor or Number): an input tensor or number
Example::
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
torch.float32
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
torch.uint8
"""
...
@overload
def rnn_relu(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
@overload
def rnn_relu(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
def rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ...
@overload
def rnn_tanh(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
@overload
def rnn_tanh(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
def rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ...
def roll(input: Tensor, shifts: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], dims: Union[_int, _size] = ()) -> Tensor:
r"""
roll(input, shifts, dims=None) -> Tensor
Roll the tensor :attr:`input` along the given dimension(s). Elements that are
shifted beyond the last position are re-introduced at the first position. If
:attr:`dims` is `None`, the tensor will be flattened before rolling and then
restored to the original shape.
Args:
input (Tensor): the input tensor.
shifts (int or tuple of ints): The number of places by which the elements
of the tensor are shifted. If shifts is a tuple, dims must be a tuple of
the same size, and each dimension will be rolled by the corresponding
value
dims (int or tuple of ints): Axis along which to roll
Example::
>>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
>>> x
tensor([[1, 2],
[3, 4],
[5, 6],
[7, 8]])
>>> torch.roll(x, 1)
tensor([[8, 1],
[2, 3],
[4, 5],
[6, 7]])
>>> torch.roll(x, 1, 0)
tensor([[7, 8],
[1, 2],
[3, 4],
[5, 6]])
>>> torch.roll(x, -1, 0)
tensor([[3, 4],
[5, 6],
[7, 8],
[1, 2]])
>>> torch.roll(x, shifts=(2, 1), dims=(0, 1))
tensor([[6, 5],
[8, 7],
[2, 1],
[4, 3]])
"""
...
def rot90(input: Tensor, k: _int = 1, dims: _size = (0,1)) -> Tensor:
r"""
rot90(input, k=1, dims=[0,1]) -> Tensor
Rotate an n-D tensor by 90 degrees in the plane specified by dims axis.
Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0.
Args:
input (Tensor): the input tensor.
k (int): number of times to rotate. Default value is 1
dims (a list or tuple): axis to rotate. Default value is [0, 1]
Example::
>>> x = torch.arange(4).view(2, 2)
>>> x
tensor([[0, 1],
[2, 3]])
>>> torch.rot90(x, 1, [0, 1])
tensor([[1, 3],
[0, 2]])
>>> x = torch.arange(8).view(2, 2, 2)
>>> x
tensor([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> torch.rot90(x, 1, [1, 2])
tensor([[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
"""
...
@overload
def round(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
round(input, *, decimals=0, out=None) -> Tensor
Rounds elements of :attr:`input` to the nearest integer.
For integer inputs, follows the array-api convention of returning a
copy of the input tensor.
The return type of output is same as that of input's dtype.
.. note::
This function implements the "round half to even" to
break ties when a number is equidistant from two
integers (e.g. `round(2.5)` is 2).
When the :attr:\`decimals\` argument is specified the
algorithm used is similar to NumPy's `around`. This
algorithm is fast but inexact and it can easily
overflow for low precision dtypes.
Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`.
.. seealso::
:func:`torch.ceil`, which rounds up.
:func:`torch.floor`, which rounds down.
:func:`torch.trunc`, which rounds towards zero.
Args:
input (Tensor): the input tensor.
decimals (int): Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of positions
to the left of the decimal point.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7)))
tensor([ 5., -2., 9., -8.])
>>> # Values equidistant from two integers are rounded towards the
>>> # the nearest even value (zero is treated as even)
>>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5]))
tensor([-0., 0., 2., 2.])
>>> # A positive decimals argument rounds to the to that decimal place
>>> torch.round(torch.tensor([0.1234567]), decimals=3)
tensor([0.1230])
>>> # A negative decimals argument rounds to the left of the decimal
>>> torch.round(torch.tensor([1200.1234567]), decimals=-3)
tensor([1000.])
"""
...
@overload
def round(input: Tensor, *, decimals: _int, out: Optional[Tensor] = None) -> Tensor:
r"""
round(input, *, decimals=0, out=None) -> Tensor
Rounds elements of :attr:`input` to the nearest integer.
For integer inputs, follows the array-api convention of returning a
copy of the input tensor.
The return type of output is same as that of input's dtype.
.. note::
This function implements the "round half to even" to
break ties when a number is equidistant from two
integers (e.g. `round(2.5)` is 2).
When the :attr:\`decimals\` argument is specified the
algorithm used is similar to NumPy's `around`. This
algorithm is fast but inexact and it can easily
overflow for low precision dtypes.
Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`.
.. seealso::
:func:`torch.ceil`, which rounds up.
:func:`torch.floor`, which rounds down.
:func:`torch.trunc`, which rounds towards zero.
Args:
input (Tensor): the input tensor.
decimals (int): Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of positions
to the left of the decimal point.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7)))
tensor([ 5., -2., 9., -8.])
>>> # Values equidistant from two integers are rounded towards the
>>> # the nearest even value (zero is treated as even)
>>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5]))
tensor([-0., 0., 2., 2.])
>>> # A positive decimals argument rounds to the to that decimal place
>>> torch.round(torch.tensor([0.1234567]), decimals=3)
tensor([0.1230])
>>> # A negative decimals argument rounds to the left of the decimal
>>> torch.round(torch.tensor([1200.1234567]), decimals=-3)
tensor([1000.])
"""
...
@overload
def round_(input: Tensor) -> Tensor: ...
@overload
def round_(input: Tensor, *, decimals: _int) -> Tensor: ...
def row_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
def row_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
r"""
row_stack(tensors, *, out=None) -> Tensor
Alias of :func:`torch.vstack`.
"""
...
def rrelu(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ...
def rrelu_(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ...
def rsqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
rsqrt(input, *, out=None) -> Tensor
Returns a new tensor with the reciprocal of the square-root of each of
the elements of :attr:`input`.
.. math::
\text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}}
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.0370, 0.2970, 1.5420, -0.9105])
>>> torch.rsqrt(a)
tensor([ nan, 1.8351, 0.8053, nan])
"""
...
def rsqrt_(input: Tensor) -> Tensor: ...
@overload
def rsub(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ...
@overload
def rsub(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ...
def saddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number = 1, alpha: Number = 1, out: Optional[Tensor] = None) -> Tensor: ...
def scalar_tensor(s: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
@overload
def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, reduce: str, out: Optional[Tensor] = None) -> Tensor:
r"""
scatter(input, dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_`
"""
...
@overload
def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
scatter(input, dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_`
"""
...
@overload
def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str, out: Optional[Tensor] = None) -> Tensor:
r"""
scatter(input, dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_`
"""
...
@overload
def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor:
r"""
scatter(input, dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_`
"""
...
@overload
def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
scatter(input, dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_`
"""
...
@overload
def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor:
r"""
scatter(input, dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_`
"""
...
@overload
def scatter_add(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
scatter_add(input, dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_add_`
"""
...
@overload
def scatter_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor:
r"""
scatter_add(input, dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_add_`
"""
...
def scatter_reduce(input: Tensor, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor:
r"""
scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
"""
...
@overload
def searchsorted(sorted_sequence: Tensor, input: Tensor, *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, out=None, sorter=None) -> Tensor
Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
corresponding values in :attr:`values` were inserted before the indices, when sorted, the order
of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
Return a new tensor with the same size as :attr:`values`. More formally,
the returned index satisfies the following rules:
.. list-table::
:widths: 12 10 78
:header-rows: 1
* - :attr:`sorted_sequence`
- :attr:`right`
- *returned index satisfies*
* - 1-D
- False
- ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
* - 1-D
- True
- ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
* - N-D
- False
- ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
* - N-D
- True
- ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
Args:
sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
dimension unless :attr:`sorter` is provided, in which case the sequence does not
need to be sorted
values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
Keyword args:
out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
Default value is False, i.e. default output data type is torch.int64.
right (bool, optional): if False, return the first suitable location that is found. If True, return the
last such index. If no suitable index found, return 0 for non-numerical value
(eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
(one pass the last index of the *innermost* dimension). In other words, if False,
gets the lower bound index for each value in :attr:`values` on the corresponding
*innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
bound index instead. Default value is False. :attr:`side` does the same and is
preferred. It will error if :attr:`side` is set to "left" while this is True.
side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right`
and "right" corresponds to True for :attr:`right`. It will error if this is set to
"left" while :attr:`right` is True. Default value is None.
out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted
:attr:`sorted_sequence` containing a sequence of indices that sort it in the
ascending order on the innermost dimension
Example::
>>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
>>> sorted_sequence
tensor([[ 1, 3, 5, 7, 9],
[ 2, 4, 6, 8, 10]])
>>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
>>> values
tensor([[3, 6, 9],
[3, 6, 9]])
>>> torch.searchsorted(sorted_sequence, values)
tensor([[1, 3, 4],
[1, 2, 4]])
>>> torch.searchsorted(sorted_sequence, values, side='right')
tensor([[2, 3, 5],
[1, 3, 4]])
>>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
>>> sorted_sequence_1d
tensor([1, 3, 5, 7, 9])
>>> torch.searchsorted(sorted_sequence_1d, values)
tensor([[1, 3, 4],
[1, 3, 4]])
"""
...
@overload
def searchsorted(sorted_sequence: Tensor, self: Union[Number, _complex], *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, out=None, sorter=None) -> Tensor
Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
corresponding values in :attr:`values` were inserted before the indices, when sorted, the order
of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
Return a new tensor with the same size as :attr:`values`. More formally,
the returned index satisfies the following rules:
.. list-table::
:widths: 12 10 78
:header-rows: 1
* - :attr:`sorted_sequence`
- :attr:`right`
- *returned index satisfies*
* - 1-D
- False
- ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
* - 1-D
- True
- ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
* - N-D
- False
- ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
* - N-D
- True
- ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
Args:
sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
dimension unless :attr:`sorter` is provided, in which case the sequence does not
need to be sorted
values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
Keyword args:
out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
Default value is False, i.e. default output data type is torch.int64.
right (bool, optional): if False, return the first suitable location that is found. If True, return the
last such index. If no suitable index found, return 0 for non-numerical value
(eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
(one pass the last index of the *innermost* dimension). In other words, if False,
gets the lower bound index for each value in :attr:`values` on the corresponding
*innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
bound index instead. Default value is False. :attr:`side` does the same and is
preferred. It will error if :attr:`side` is set to "left" while this is True.
side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right`
and "right" corresponds to True for :attr:`right`. It will error if this is set to
"left" while :attr:`right` is True. Default value is None.
out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted
:attr:`sorted_sequence` containing a sequence of indices that sort it in the
ascending order on the innermost dimension
Example::
>>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
>>> sorted_sequence
tensor([[ 1, 3, 5, 7, 9],
[ 2, 4, 6, 8, 10]])
>>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
>>> values
tensor([[3, 6, 9],
[3, 6, 9]])
>>> torch.searchsorted(sorted_sequence, values)
tensor([[1, 3, 4],
[1, 2, 4]])
>>> torch.searchsorted(sorted_sequence, values, side='right')
tensor([[2, 3, 5],
[1, 3, 4]])
>>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
>>> sorted_sequence_1d
tensor([1, 3, 5, 7, 9])
>>> torch.searchsorted(sorted_sequence_1d, values)
tensor([[1, 3, 4],
[1, 3, 4]])
"""
...
def segment_reduce(data: Tensor, reduce: str, *, lengths: Optional[Tensor] = None, indices: Optional[Tensor] = None, offsets: Optional[Tensor] = None, axis: _int = 0, unsafe: _bool = False, initial: Optional[Union[Number, _complex]] = None) -> Tensor: ...
@overload
def select(input: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor:
r"""
select(input, dim, index) -> Tensor
Slices the :attr:`input` tensor along the selected dimension at the given index.
This function returns a view of the original tensor with the given dimension removed.
.. note:: If :attr:`input` is a sparse tensor and returning a view of
the tensor is not possible, a RuntimeError exception is
raised. In this is the case, consider using
:func:`torch.select_copy` function.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to slice
index (int): the index to select with
.. note::
:meth:`select` is equivalent to slicing. For example,
``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
"""
...
@overload
def select(input: Tensor, dim: Union[str, ellipsis, None], index: _int) -> Tensor:
r"""
select(input, dim, index) -> Tensor
Slices the :attr:`input` tensor along the selected dimension at the given index.
This function returns a view of the original tensor with the given dimension removed.
.. note:: If :attr:`input` is a sparse tensor and returning a view of
the tensor is not possible, a RuntimeError exception is
raised. In this is the case, consider using
:func:`torch.select_copy` function.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to slice
index (int): the index to select with
.. note::
:meth:`select` is equivalent to slicing. For example,
``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
"""
...
def select_copy(input: Tensor, dim: _int, index: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.select`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def select_scatter(input: Tensor, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor:
r"""
select_scatter(input, src, dim, index) -> Tensor
Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index.
This function returns a tensor with fresh storage; it does not create a view.
Args:
input (Tensor): the input tensor.
src (Tensor): The tensor to embed into :attr:`input`
dim (int): the dimension to insert the slice into.
index (int): the index to select with
.. note::
:attr:`src` must be of the proper size in order to be embedded
into :attr:`input`. Specifically, it should have the same shape as
``torch.select(input, dim, index)``
Example::
>>> a = torch.zeros(2, 2)
>>> b = torch.ones(2)
>>> a.select_scatter(b, 0, 0)
tensor([[1., 1.],
[0., 0.]])
"""
...
def selu(input: Tensor) -> Tensor: ...
def selu_(input: Tensor) -> Tensor: ...
def set_flush_denormal(mode: _bool) -> _bool:
r"""
set_flush_denormal(mode) -> bool
Disables denormal floating numbers on CPU.
Returns ``True`` if your system supports flushing denormal numbers and it
successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal`
is supported on x86 architectures supporting SSE3 and AArch64 architecture.
Args:
mode (bool): Controls whether to enable flush denormal mode or not
Example::
>>> torch.set_flush_denormal(True)
True
>>> torch.tensor([1e-323], dtype=torch.float64)
tensor([ 0.], dtype=torch.float64)
>>> torch.set_flush_denormal(False)
True
>>> torch.tensor([1e-323], dtype=torch.float64)
tensor(9.88131e-324 *
[ 1.0000], dtype=torch.float64)
"""
...
def set_num_interop_threads(num: _int) -> None:
r"""
set_num_interop_threads(int)
Sets the number of threads used for interop parallelism
(e.g. in JIT interpreter) on CPU.
.. warning::
Can only be called once and before any inter-op parallel work
is started (e.g. JIT execution).
"""
...
def set_num_threads(num: _int) -> None:
r"""
set_num_threads(int)
Sets the number of threads used for intraop parallelism on CPU.
.. warning::
To ensure that the correct number of threads is used, set_num_threads
must be called before running eager, JIT or autograd code.
"""
...
def sgn(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
sgn(input, *, out=None) -> Tensor
This function is an extension of torch.sign() to complex tensors.
It computes a new tensor whose elements have
the same angles as the corresponding elements of :attr:`input` and
absolute values (i.e. magnitudes) of one for complex tensors and
is equivalent to torch.sign() for non-complex tensors.
.. math::
\text{out}_{i} = \begin{cases}
0 & |\text{{input}}_i| == 0 \\
\frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise}
\end{cases}
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j])
>>> t.sgn()
tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j])
"""
...
def sigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
sigmoid(input, *, out=None) -> Tensor
Alias for :func:`torch.special.expit`.
"""
...
def sigmoid_(input: Tensor) -> Tensor: ...
def sign(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
sign(input, *, out=None) -> Tensor
Returns a new tensor with the signs of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \operatorname{sgn}(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([0.7, -1.2, 0., 2.3])
>>> a
tensor([ 0.7000, -1.2000, 0.0000, 2.3000])
>>> torch.sign(a)
tensor([ 1., -1., 0., 1.])
"""
...
def signbit(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
signbit(input, *, out=None) -> Tensor
Tests if each element of :attr:`input` has its sign bit set or not.
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([0.7, -1.2, 0., 2.3])
>>> torch.signbit(a)
tensor([ False, True, False, False])
>>> a = torch.tensor([-0.0, 0.0])
>>> torch.signbit(a)
tensor([ True, False])
.. note::
signbit handles signed zeros, so negative zero (-0) returns True.
"""
...
def sin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
sin(input, *, out=None) -> Tensor
Returns a new tensor with the sine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sin(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.5461, 0.1347, -2.7266, -0.2746])
>>> torch.sin(a)
tensor([-0.5194, 0.1343, -0.4032, -0.2711])
"""
...
def sin_(input: Tensor) -> Tensor: ...
def sinc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
sinc(input, *, out=None) -> Tensor
Alias for :func:`torch.special.sinc`.
"""
...
def sinc_(input: Tensor) -> Tensor: ...
def sinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
sinh(input, *, out=None) -> Tensor
Returns a new tensor with the hyperbolic sine of the elements of
:attr:`input`.
.. math::
\text{out}_{i} = \sinh(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.5380, -0.8632, -0.1265, 0.9399])
>>> torch.sinh(a)
tensor([ 0.5644, -0.9744, -0.1268, 1.0845])
.. note::
When :attr:`input` is on the CPU, the implementation of torch.sinh may use
the Sleef library, which rounds very large results to infinity or negative
infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
"""
...
def sinh_(input: Tensor) -> Tensor: ...
def slice_copy(input: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.slice`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def slice_inverse(input: Tensor, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor: ...
def slice_scatter(input: Tensor, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1, *, out: Optional[Tensor] = None) -> Tensor:
r"""
slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor
Embeds the values of the :attr:`src` tensor into :attr:`input` at the given
dimension.
This function returns a tensor with fresh storage; it does not create a view.
Args:
input (Tensor): the input tensor.
src (Tensor): The tensor to embed into :attr:`input`
dim (int): the dimension to insert the slice into
start (Optional[int]): the start index of where to insert the slice
end (Optional[int]): the end index of where to insert the slice
step (int): the how many elements to skip in
Example::
>>> a = torch.zeros(8, 8)
>>> b = torch.ones(2, 8)
>>> a.slice_scatter(b, start=6)
tensor([[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1.]])
>>> b = torch.ones(8, 2)
>>> a.slice_scatter(b, dim=1, start=2, end=6, step=2)
tensor([[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.]])
"""
...
def slogdet(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.slogdet:
r"""
slogdet(input) -> (Tensor, Tensor)
Alias for :func:`torch.linalg.slogdet`
"""
...
def smm(input: Tensor, mat2: Tensor) -> Tensor:
r"""
smm(input, mat) -> Tensor
Performs a matrix multiplication of the sparse matrix :attr:`input`
with the dense matrix :attr:`mat`.
Args:
input (Tensor): a sparse matrix to be matrix multiplied
mat (Tensor): a dense matrix to be matrix multiplied
"""
...
@overload
def softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor:
r"""
softmax(input, dim, *, dtype=None) -> Tensor
Alias for :func:`torch.nn.functional.softmax`.
"""
...
@overload
def softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor:
r"""
softmax(input, dim, *, dtype=None) -> Tensor
Alias for :func:`torch.nn.functional.softmax`.
"""
...
@overload
def sort(input: Tensor, *, stable: Optional[_bool], dim: _int = -1, descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort:
r"""
sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
Sorts the elements of the :attr:`input` tensor along a given dimension
in ascending order by value.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`descending` is ``True`` then the elements are sorted in descending
order by value.
If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
the order of equivalent elements.
A namedtuple of (values, indices) is returned, where the `values` are the
sorted values and `indices` are the indices of the elements in the original
`input` tensor.
Args:
input (Tensor): the input tensor.
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
stable (bool, optional): makes the sorting routine stable, which guarantees that the order
of equivalent elements is preserved.
Keyword args:
out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
be optionally given to be used as output buffers
Example::
>>> x = torch.randn(3, 4)
>>> sorted, indices = torch.sort(x)
>>> sorted
tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
[-0.5793, 0.0061, 0.6058, 0.9497],
[-0.5071, 0.3343, 0.9553, 1.0960]])
>>> indices
tensor([[ 1, 0, 2, 3],
[ 3, 1, 0, 2],
[ 0, 3, 1, 2]])
>>> sorted, indices = torch.sort(x, 0)
>>> sorted
tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
[ 0.0608, 0.0061, 0.9497, 0.3343],
[ 0.6058, 0.9553, 1.0960, 2.3332]])
>>> indices
tensor([[ 2, 0, 0, 1],
[ 0, 1, 1, 2],
[ 1, 2, 2, 0]])
>>> x = torch.tensor([0, 1] * 9)
>>> x.sort()
torch.return_types.sort(
values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
>>> x.sort(stable=True)
torch.return_types.sort(
values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
"""
...
@overload
def sort(input: Tensor, dim: _int = -1, descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort:
r"""
sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
Sorts the elements of the :attr:`input` tensor along a given dimension
in ascending order by value.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`descending` is ``True`` then the elements are sorted in descending
order by value.
If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
the order of equivalent elements.
A namedtuple of (values, indices) is returned, where the `values` are the
sorted values and `indices` are the indices of the elements in the original
`input` tensor.
Args:
input (Tensor): the input tensor.
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
stable (bool, optional): makes the sorting routine stable, which guarantees that the order
of equivalent elements is preserved.
Keyword args:
out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
be optionally given to be used as output buffers
Example::
>>> x = torch.randn(3, 4)
>>> sorted, indices = torch.sort(x)
>>> sorted
tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
[-0.5793, 0.0061, 0.6058, 0.9497],
[-0.5071, 0.3343, 0.9553, 1.0960]])
>>> indices
tensor([[ 1, 0, 2, 3],
[ 3, 1, 0, 2],
[ 0, 3, 1, 2]])
>>> sorted, indices = torch.sort(x, 0)
>>> sorted
tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
[ 0.0608, 0.0061, 0.9497, 0.3343],
[ 0.6058, 0.9553, 1.0960, 2.3332]])
>>> indices
tensor([[ 2, 0, 0, 1],
[ 0, 1, 1, 2],
[ 1, 2, 2, 0]])
>>> x = torch.tensor([0, 1] * 9)
>>> x.sort()
torch.return_types.sort(
values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
>>> x.sort(stable=True)
torch.return_types.sort(
values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
"""
...
@overload
def sort(input: Tensor, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort:
r"""
sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
Sorts the elements of the :attr:`input` tensor along a given dimension
in ascending order by value.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`descending` is ``True`` then the elements are sorted in descending
order by value.
If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
the order of equivalent elements.
A namedtuple of (values, indices) is returned, where the `values` are the
sorted values and `indices` are the indices of the elements in the original
`input` tensor.
Args:
input (Tensor): the input tensor.
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
stable (bool, optional): makes the sorting routine stable, which guarantees that the order
of equivalent elements is preserved.
Keyword args:
out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
be optionally given to be used as output buffers
Example::
>>> x = torch.randn(3, 4)
>>> sorted, indices = torch.sort(x)
>>> sorted
tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
[-0.5793, 0.0061, 0.6058, 0.9497],
[-0.5071, 0.3343, 0.9553, 1.0960]])
>>> indices
tensor([[ 1, 0, 2, 3],
[ 3, 1, 0, 2],
[ 0, 3, 1, 2]])
>>> sorted, indices = torch.sort(x, 0)
>>> sorted
tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
[ 0.0608, 0.0061, 0.9497, 0.3343],
[ 0.6058, 0.9553, 1.0960, 2.3332]])
>>> indices
tensor([[ 2, 0, 0, 1],
[ 0, 1, 1, 2],
[ 1, 2, 2, 0]])
>>> x = torch.tensor([0, 1] * 9)
>>> x.sort()
torch.return_types.sort(
values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
>>> x.sort(stable=True)
torch.return_types.sort(
values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
"""
...
@overload
def sort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort:
r"""
sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
Sorts the elements of the :attr:`input` tensor along a given dimension
in ascending order by value.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`descending` is ``True`` then the elements are sorted in descending
order by value.
If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
the order of equivalent elements.
A namedtuple of (values, indices) is returned, where the `values` are the
sorted values and `indices` are the indices of the elements in the original
`input` tensor.
Args:
input (Tensor): the input tensor.
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
stable (bool, optional): makes the sorting routine stable, which guarantees that the order
of equivalent elements is preserved.
Keyword args:
out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
be optionally given to be used as output buffers
Example::
>>> x = torch.randn(3, 4)
>>> sorted, indices = torch.sort(x)
>>> sorted
tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
[-0.5793, 0.0061, 0.6058, 0.9497],
[-0.5071, 0.3343, 0.9553, 1.0960]])
>>> indices
tensor([[ 1, 0, 2, 3],
[ 3, 1, 0, 2],
[ 0, 3, 1, 2]])
>>> sorted, indices = torch.sort(x, 0)
>>> sorted
tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
[ 0.0608, 0.0061, 0.9497, 0.3343],
[ 0.6058, 0.9553, 1.0960, 2.3332]])
>>> indices
tensor([[ 2, 0, 0, 1],
[ 0, 1, 1, 2],
[ 1, 2, 2, 0]])
>>> x = torch.tensor([0, 1] * 9)
>>> x.sort()
torch.return_types.sort(
values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
>>> x.sort(stable=True)
torch.return_types.sort(
values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
"""
...
def sparse_bsc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
r"""
sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse
Column)) <sparse-bsc-docs>` with specified 2-dimensional blocks at the
given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
multiplication operations in BSC format are typically faster than that
for sparse tensors in COO format. Make you have a look at :ref:`the
note on the data type of the indices <sparse-bsc-docs>`.
.. note::
If the ``device`` argument is not specified the device of the given
:attr:`values` and indices tensor(s) must match. If, however, the
argument is specified the input Tensors will be converted to the
given device and in turn determine the device of the constructed
sparse tensor.
Args:
ccol_indices (array_like): (B+1)-dimensional array of size
``(*batchsize, ncolblocks + 1)``. The last element of each
batch is the number of non-zeros. This tensor encodes the
index in values and row_indices depending on where the given
column starts. Each successive number in the tensor subtracted
by the number before it denotes the number of elements in a
given column.
row_indices (array_like): Row block co-ordinates of each block in
values. (B+1)-dimensional tensor with the same length
as values.
values (array_list): Initial blocks for the tensor. Can be a list,
tuple, NumPy ``ndarray``, and other types that
represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
number of dense dimensions.
size (list, tuple, :class:`torch.Size`, optional): Size of the
sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
blocksize[1], *densesize)`` If not provided, the size will be
inferred as the minimum size big enough to hold all non-zero
blocks.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of
returned tensor. Default: if None, infers data type from
:attr:`values`.
device (:class:`torch.device`, optional): the desired device of
returned tensor. Default: if None, uses the current device
for the default tensor type (see
:func:`torch.set_default_device`). :attr:`device` will be
the CPU for CPU tensor types and the current CUDA device for
CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
check_invariants (bool, optional): If sparse tensor invariants are checked.
Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
initially False.
Example::
>>> ccol_indices = [0, 1, 2]
>>> row_indices = [0, 1]
>>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
>>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
... torch.tensor(row_indices, dtype=torch.int64),
... torch.tensor(values), dtype=torch.double)
tensor(ccol_indices=tensor([0, 1, 2]),
row_indices=tensor([0, 1]),
values=tensor([[[1., 2.],
[3., 4.]],
[[5., 6.],
[7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
layout=torch.sparse_bsc)
"""
...
def sparse_bsr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
r"""
sparse_bsr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row))
<sparse-bsr-docs>` with specified 2-dimensional blocks at the given
:attr:`crow_indices` and :attr:`col_indices`. Sparse matrix
multiplication operations in BSR format are typically faster than that
for sparse tensors in COO format. Make you have a look at :ref:`the
note on the data type of the indices <sparse-bsr-docs>`.
.. note::
If the ``device`` argument is not specified the device of the given
:attr:`values` and indices tensor(s) must match. If, however, the
argument is specified the input Tensors will be converted to the
given device and in turn determine the device of the constructed
sparse tensor.
Args:
crow_indices (array_like): (B+1)-dimensional array of size
``(*batchsize, nrowblocks + 1)``. The last element of each
batch is the number of non-zeros. This tensor encodes the
block index in values and col_indices depending on where the
given row block starts. Each successive number in the tensor
subtracted by the number before it denotes the number of
blocks in a given row.
col_indices (array_like): Column block co-ordinates of each block
in values. (B+1)-dimensional tensor with the same length as
values.
values (array_list): Initial values for the tensor. Can be a list,
tuple, NumPy ``ndarray``, scalar, and other types that
represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
number of dense dimensions.
size (list, tuple, :class:`torch.Size`, optional): Size of the
sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
blocksize[1], *densesize)`` where ``blocksize ==
values.shape[1:3]``. If not provided, the size will be
inferred as the minimum size big enough to hold all non-zero
blocks.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of
returned tensor. Default: if None, infers data type from
:attr:`values`.
device (:class:`torch.device`, optional): the desired device of
returned tensor. Default: if None, uses the current device
for the default tensor type (see
:func:`torch.set_default_device`). :attr:`device` will be
the CPU for CPU tensor types and the current CUDA device for
CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
check_invariants (bool, optional): If sparse tensor invariants are checked.
Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
initially False.
Example::
>>> crow_indices = [0, 1, 2]
>>> col_indices = [0, 1]
>>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
>>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
... torch.tensor(col_indices, dtype=torch.int64),
... torch.tensor(values), dtype=torch.double)
tensor(crow_indices=tensor([0, 1, 2]),
col_indices=tensor([0, 1]),
values=tensor([[[1., 2.],
[3., 4.]],
[[5., 6.],
[7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
layout=torch.sparse_bsr)
"""
...
def sparse_compressed_tensor(compressed_indices: Union[Tensor, List], plain_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
r"""
sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, *, dtype=None, layout=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR,
CSC, BSR, or BSC - <sparse-compressed-docs>` with specified values at
the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse
matrix multiplication operations in Compressed Sparse format are
typically faster than that for sparse tensors in COO format. Make you
have a look at :ref:`the note on the data type of the indices
<sparse-compressed-docs>`.
.. note::
If the ``device`` argument is not specified the device of the given
:attr:`values` and indices tensor(s) must match. If, however, the
argument is specified the input Tensors will be converted to the
given device and in turn determine the device of the constructed
sparse tensor.
Args:
compressed_indices (array_like): (B+1)-dimensional array of size
``(*batchsize, compressed_dim_size + 1)``. The last element of
each batch is the number of non-zero elements or blocks. This
tensor encodes the index in ``values`` and ``plain_indices``
depending on where the given compressed dimension (row or
column) starts. Each successive number in the tensor
subtracted by the number before it denotes the number of
elements or blocks in a given compressed dimension.
plain_indices (array_like): Plain dimension (column or row)
co-ordinates of each element or block in values. (B+1)-dimensional
tensor with the same length as values.
values (array_list): Initial values for the tensor. Can be a list,
tuple, NumPy ``ndarray``, scalar, and other types. that
represents a (1+K)-dimensional (for CSR and CSC layouts) or
(1+2+K)-dimensional tensor (for BSR and BSC layouts) where
``K`` is the number of dense dimensions.
size (list, tuple, :class:`torch.Size`, optional): Size of the
sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
blocksize[1], *densesize)`` where ``blocksize[0] ==
blocksize[1] == 1`` for CSR and CSC formats. If not provided,
the size will be inferred as the minimum size big enough to
hold all non-zero elements or blocks.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of
returned tensor. Default: if None, infers data type from
:attr:`values`.
layout (:class:`torch.layout`, required): the desired layout of
returned tensor: :attr:`torch.sparse_csr`,
:attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or
:attr:`torch.sparse_bsc`.
device (:class:`torch.device`, optional): the desired device of
returned tensor. Default: if None, uses the current device
for the default tensor type (see
:func:`torch.set_default_device`). :attr:`device` will be
the CPU for CPU tensor types and the current CUDA device for
CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
check_invariants (bool, optional): If sparse tensor invariants are checked.
Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
initially False.
Example::
>>> compressed_indices = [0, 2, 4]
>>> plain_indices = [0, 1, 0, 1]
>>> values = [1, 2, 3, 4]
>>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64),
... torch.tensor(plain_indices, dtype=torch.int64),
... torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr)
tensor(crow_indices=tensor([0, 2, 4]),
col_indices=tensor([0, 1, 0, 1]),
values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
dtype=torch.float64, layout=torch.sparse_csr)
"""
...
def sparse_coo_tensor(indices: Tensor, values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None, is_coalesced: Optional[_bool] = None) -> Tensor:
r"""
sparse_coo_tensor(indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None, is_coalesced=None) -> Tensor
Constructs a :ref:`sparse tensor in COO(rdinate) format
<sparse-coo-docs>` with specified values at the given
:attr:`indices`.
.. note::
This function returns an :ref:`uncoalesced tensor
<sparse-uncoalesced-coo-docs>` when :attr:`is_coalesced` is
unspecified or ``None``.
.. note::
If the ``device`` argument is not specified the device of the given
:attr:`values` and indices tensor(s) must match. If, however, the
argument is specified the input Tensors will be converted to the
given device and in turn determine the device of the constructed
sparse tensor.
Args:
indices (array_like): Initial data for the tensor. Can be a list, tuple,
NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor`
internally. The indices are the coordinates of the non-zero values in the matrix, and thus
should be two-dimensional where the first dimension is the number of tensor dimensions and
the second dimension is the number of non-zero values.
values (array_like): Initial values for the tensor. Can be a list, tuple,
NumPy ``ndarray``, scalar, and other types.
size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not
provided the size will be inferred as the minimum size big enough to hold all non-zero
elements.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if None, infers data type from :attr:`values`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if None, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
check_invariants (bool, optional): If sparse tensor invariants are checked.
Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
initially False.
is_coalesced (bool, optional): When``True``, the caller is
responsible for providing tensor indices that correspond to a
coalesced tensor. If the :attr:`check_invariants` flag is
False, no error will be raised if the prerequisites are not
met and this will lead to silently incorrect results. To force
coalescion please use :meth:`coalesce` on the resulting
Tensor.
Default: None: except for trivial cases (e.g. nnz < 2) the
resulting Tensor has is_coalesced set to ``False```.
Example::
>>> i = torch.tensor([[0, 1, 1],
... [2, 0, 2]])
>>> v = torch.tensor([3, 4, 5], dtype=torch.float32)
>>> torch.sparse_coo_tensor(i, v, [2, 4])
tensor(indices=tensor([[0, 1, 1],
[2, 0, 2]]),
values=tensor([3., 4., 5.]),
size=(2, 4), nnz=3, layout=torch.sparse_coo)
>>> torch.sparse_coo_tensor(i, v) # Shape inference
tensor(indices=tensor([[0, 1, 1],
[2, 0, 2]]),
values=tensor([3., 4., 5.]),
size=(2, 3), nnz=3, layout=torch.sparse_coo)
>>> torch.sparse_coo_tensor(i, v, [2, 4],
... dtype=torch.float64,
... device=torch.device('cuda:0'))
tensor(indices=tensor([[0, 1, 1],
[2, 0, 2]]),
values=tensor([3., 4., 5.]),
device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64,
layout=torch.sparse_coo)
# Create an empty sparse tensor with the following invariants:
# 1. sparse_dim + dense_dim = len(SparseTensor.shape)
# 2. SparseTensor._indices().shape = (sparse_dim, nnz)
# 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
#
# For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and
# sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0))
>>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
tensor(indices=tensor([], size=(1, 0)),
values=tensor([], size=(0,)),
size=(1,), nnz=0, layout=torch.sparse_coo)
# and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and
# sparse_dim = 1
>>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2])
tensor(indices=tensor([], size=(1, 0)),
values=tensor([], size=(0, 2)),
size=(1, 2), nnz=0, layout=torch.sparse_coo)
.. _torch.sparse: https://pytorch.org/docs/stable/sparse.html
"""
...
def sparse_csc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
r"""
sparse_csc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column)
<sparse-csc-docs>` with specified values at the given
:attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
multiplication operations in CSC format are typically faster than that
for sparse tensors in COO format. Make you have a look at :ref:`the
note on the data type of the indices <sparse-csc-docs>`.
.. note::
If the ``device`` argument is not specified the device of the given
:attr:`values` and indices tensor(s) must match. If, however, the
argument is specified the input Tensors will be converted to the
given device and in turn determine the device of the constructed
sparse tensor.
Args:
ccol_indices (array_like): (B+1)-dimensional array of size
``(*batchsize, ncols + 1)``. The last element of each batch
is the number of non-zeros. This tensor encodes the index in
values and row_indices depending on where the given column
starts. Each successive number in the tensor subtracted by the
number before it denotes the number of elements in a given
column.
row_indices (array_like): Row co-ordinates of each element in
values. (B+1)-dimensional tensor with the same length as
values.
values (array_list): Initial values for the tensor. Can be a list,
tuple, NumPy ``ndarray``, scalar, and other types that
represents a (1+K)-dimensional tensor where ``K`` is the number
of dense dimensions.
size (list, tuple, :class:`torch.Size`, optional): Size of the
sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
not provided, the size will be inferred as the minimum size
big enough to hold all non-zero elements.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of
returned tensor. Default: if None, infers data type from
:attr:`values`.
device (:class:`torch.device`, optional): the desired device of
returned tensor. Default: if None, uses the current device
for the default tensor type (see
:func:`torch.set_default_device`). :attr:`device` will be
the CPU for CPU tensor types and the current CUDA device for
CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
check_invariants (bool, optional): If sparse tensor invariants are checked.
Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
initially False.
Example::
>>> ccol_indices = [0, 2, 4]
>>> row_indices = [0, 1, 0, 1]
>>> values = [1, 2, 3, 4]
>>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
... torch.tensor(row_indices, dtype=torch.int64),
... torch.tensor(values), dtype=torch.double)
tensor(ccol_indices=tensor([0, 2, 4]),
row_indices=tensor([0, 1, 0, 1]),
values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
dtype=torch.float64, layout=torch.sparse_csc)
"""
...
def sparse_csr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
r"""
sparse_csr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) <sparse-csr-docs>` with specified
values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations
in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look
at :ref:`the note on the data type of the indices <sparse-csr-docs>`.
.. note::
If the ``device`` argument is not specified the device of the given
:attr:`values` and indices tensor(s) must match. If, however, the
argument is specified the input Tensors will be converted to the
given device and in turn determine the device of the constructed
sparse tensor.
Args:
crow_indices (array_like): (B+1)-dimensional array of size
``(*batchsize, nrows + 1)``. The last element of each batch
is the number of non-zeros. This tensor encodes the index in
values and col_indices depending on where the given row
starts. Each successive number in the tensor subtracted by the
number before it denotes the number of elements in a given
row.
col_indices (array_like): Column co-ordinates of each element in
values. (B+1)-dimensional tensor with the same length
as values.
values (array_list): Initial values for the tensor. Can be a list,
tuple, NumPy ``ndarray``, scalar, and other types that
represents a (1+K)-dimensional tensor where ``K`` is the number
of dense dimensions.
size (list, tuple, :class:`torch.Size`, optional): Size of the
sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
not provided, the size will be inferred as the minimum size
big enough to hold all non-zero elements.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of
returned tensor. Default: if None, infers data type from
:attr:`values`.
device (:class:`torch.device`, optional): the desired device of
returned tensor. Default: if None, uses the current device
for the default tensor type (see
:func:`torch.set_default_device`). :attr:`device` will be
the CPU for CPU tensor types and the current CUDA device for
CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
check_invariants (bool, optional): If sparse tensor invariants are checked.
Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
initially False.
Example::
>>> crow_indices = [0, 2, 4]
>>> col_indices = [0, 1, 0, 1]
>>> values = [1, 2, 3, 4]
>>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
... torch.tensor(col_indices, dtype=torch.int64),
... torch.tensor(values), dtype=torch.double)
tensor(crow_indices=tensor([0, 2, 4]),
col_indices=tensor([0, 1, 0, 1]),
values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
dtype=torch.float64, layout=torch.sparse_csr)
"""
...
def split_copy(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None:
r"""
Performs the same operation as :func:`torch.split`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ...
def split_with_sizes_copy(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None:
r"""
Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def spmm(input: Tensor, mat2: Tensor) -> Tensor: ...
def sqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
sqrt(input, *, out=None) -> Tensor
Returns a new tensor with the square-root of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sqrt{\text{input}_{i}}
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([-2.0755, 1.0226, 0.0831, 0.4806])
>>> torch.sqrt(a)
tensor([ nan, 1.0112, 0.2883, 0.6933])
"""
...
def sqrt_(input: Tensor) -> Tensor: ...
def square(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
square(input, *, out=None) -> Tensor
Returns a new tensor with the square of the elements of :attr:`input`.
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([-2.0755, 1.0226, 0.0831, 0.4806])
>>> torch.square(a)
tensor([ 4.3077, 1.0457, 0.0069, 0.2310])
"""
...
def square_(input: Tensor) -> Tensor: ...
@overload
def squeeze(input: Tensor) -> Tensor:
r"""
squeeze(input, dim=None) -> Tensor
Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
For example, if `input` is of shape:
:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
will be of shape: :math:`(A \times B \times C \times D)`.
When :attr:`dim` is given, a squeeze operation is done only in the given
dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
will squeeze the tensor to the shape :math:`(A \times B)`.
.. note:: The returned tensor shares the storage with the input tensor,
so changing the contents of one will change the contents of the other.
.. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
will also remove the batch dimension, which can lead to unexpected
errors. Consider specifying only the dims you wish to be squeezed.
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): if given, the input will be squeezed
only in the specified dimensions.
.. versionchanged:: 2.0
:attr:`dim` now accepts tuples of dimensions.
Example::
>>> x = torch.zeros(2, 1, 2, 1, 2)
>>> x.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x)
>>> y.size()
torch.Size([2, 2, 2])
>>> y = torch.squeeze(x, 0)
>>> y.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x, 1)
>>> y.size()
torch.Size([2, 2, 1, 2])
>>> y = torch.squeeze(x, (1, 2, 3))
torch.Size([2, 2, 2])
"""
...
@overload
def squeeze(input: Tensor, dim: _int) -> Tensor:
r"""
squeeze(input, dim=None) -> Tensor
Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
For example, if `input` is of shape:
:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
will be of shape: :math:`(A \times B \times C \times D)`.
When :attr:`dim` is given, a squeeze operation is done only in the given
dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
will squeeze the tensor to the shape :math:`(A \times B)`.
.. note:: The returned tensor shares the storage with the input tensor,
so changing the contents of one will change the contents of the other.
.. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
will also remove the batch dimension, which can lead to unexpected
errors. Consider specifying only the dims you wish to be squeezed.
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): if given, the input will be squeezed
only in the specified dimensions.
.. versionchanged:: 2.0
:attr:`dim` now accepts tuples of dimensions.
Example::
>>> x = torch.zeros(2, 1, 2, 1, 2)
>>> x.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x)
>>> y.size()
torch.Size([2, 2, 2])
>>> y = torch.squeeze(x, 0)
>>> y.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x, 1)
>>> y.size()
torch.Size([2, 2, 1, 2])
>>> y = torch.squeeze(x, (1, 2, 3))
torch.Size([2, 2, 2])
"""
...
@overload
def squeeze(input: Tensor, dim: _size) -> Tensor:
r"""
squeeze(input, dim=None) -> Tensor
Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
For example, if `input` is of shape:
:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
will be of shape: :math:`(A \times B \times C \times D)`.
When :attr:`dim` is given, a squeeze operation is done only in the given
dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
will squeeze the tensor to the shape :math:`(A \times B)`.
.. note:: The returned tensor shares the storage with the input tensor,
so changing the contents of one will change the contents of the other.
.. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
will also remove the batch dimension, which can lead to unexpected
errors. Consider specifying only the dims you wish to be squeezed.
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): if given, the input will be squeezed
only in the specified dimensions.
.. versionchanged:: 2.0
:attr:`dim` now accepts tuples of dimensions.
Example::
>>> x = torch.zeros(2, 1, 2, 1, 2)
>>> x.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x)
>>> y.size()
torch.Size([2, 2, 2])
>>> y = torch.squeeze(x, 0)
>>> y.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x, 1)
>>> y.size()
torch.Size([2, 2, 1, 2])
>>> y = torch.squeeze(x, (1, 2, 3))
torch.Size([2, 2, 2])
"""
...
@overload
def squeeze(input: Tensor, dim: Union[str, ellipsis, None]) -> Tensor:
r"""
squeeze(input, dim=None) -> Tensor
Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
For example, if `input` is of shape:
:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
will be of shape: :math:`(A \times B \times C \times D)`.
When :attr:`dim` is given, a squeeze operation is done only in the given
dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
will squeeze the tensor to the shape :math:`(A \times B)`.
.. note:: The returned tensor shares the storage with the input tensor,
so changing the contents of one will change the contents of the other.
.. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
will also remove the batch dimension, which can lead to unexpected
errors. Consider specifying only the dims you wish to be squeezed.
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): if given, the input will be squeezed
only in the specified dimensions.
.. versionchanged:: 2.0
:attr:`dim` now accepts tuples of dimensions.
Example::
>>> x = torch.zeros(2, 1, 2, 1, 2)
>>> x.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x)
>>> y.size()
torch.Size([2, 2, 2])
>>> y = torch.squeeze(x, 0)
>>> y.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x, 1)
>>> y.size()
torch.Size([2, 2, 1, 2])
>>> y = torch.squeeze(x, (1, 2, 3))
torch.Size([2, 2, 2])
"""
...
@overload
def squeeze_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.squeeze`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
@overload
def squeeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.squeeze`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
@overload
def squeeze_copy(input: Tensor, dim: _size, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.squeeze`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
@overload
def sspaddmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor:
r"""
sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
:attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
Note: This function is equivalent to :func:`torch.addmm`, except
:attr:`input` and :attr:`mat1` are sparse.
Args:
input (Tensor): a sparse matrix to be added
mat1 (Tensor): a sparse matrix to be matrix multiplied
mat2 (Tensor): a dense matrix to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
"""
...
@overload
def sspaddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
r"""
sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
:attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
Note: This function is equivalent to :func:`torch.addmm`, except
:attr:`input` and :attr:`mat1` are sparse.
Args:
input (Tensor): a sparse matrix to be added
mat1 (Tensor): a sparse matrix to be matrix multiplied
mat2 (Tensor): a dense matrix to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
"""
...
@overload
def sspaddmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor:
r"""
sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
:attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
Note: This function is equivalent to :func:`torch.addmm`, except
:attr:`input` and :attr:`mat1` are sparse.
Args:
input (Tensor): a sparse matrix to be added
mat1 (Tensor): a sparse matrix to be matrix multiplied
mat2 (Tensor): a dense matrix to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
out (Tensor, optional): the output tensor.
"""
...
def stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
r"""
stack(tensors, dim=0, *, out=None) -> Tensor
Concatenates a sequence of tensors along a new dimension.
All tensors need to be of the same size.
.. seealso::
:func:`torch.cat` concatenates the given sequence along an existing dimension.
Arguments:
tensors (sequence of Tensors): sequence of tensors to concatenate
dim (int, optional): dimension to insert. Has to be between 0 and the number
of dimensions of concatenated tensors (inclusive). Default: 0
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 0.3367, 0.1288, 0.2345],
[ 0.2303, -1.1229, -0.1863]])
>>> x = torch.stack((x, x)) # same as torch.stack((x, x), dim=0)
>>> x
tensor([[[ 0.3367, 0.1288, 0.2345],
[ 0.2303, -1.1229, -0.1863]],
[[ 0.3367, 0.1288, 0.2345],
[ 0.2303, -1.1229, -0.1863]]])
>>> x.size()
torch.Size([2, 2, 3])
>>> x = torch.stack((x, x), dim=1)
tensor([[[ 0.3367, 0.1288, 0.2345],
[ 0.3367, 0.1288, 0.2345]],
[[ 0.2303, -1.1229, -0.1863],
[ 0.2303, -1.1229, -0.1863]]])
>>> x = torch.stack((x, x), dim=2)
tensor([[[ 0.3367, 0.3367],
[ 0.1288, 0.1288],
[ 0.2345, 0.2345]],
[[ 0.2303, 0.2303],
[-1.1229, -1.1229],
[-0.1863, -0.1863]]])
>>> x = torch.stack((x, x), dim=-1)
tensor([[[ 0.3367, 0.3367],
[ 0.1288, 0.1288],
[ 0.2345, 0.2345]],
[[ 0.2303, 0.2303],
[-1.1229, -1.1229],
[-0.1863, -0.1863]]])
"""
...
@overload
def std(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
Calculates the standard deviation over the dimensions specified by :attr:`dim`.
:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
reduce over all dimensions.
The standard deviation (:math:`\sigma`) is calculated as
.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.std(a, dim=1, keepdim=True)
tensor([[1.0311],
[0.7477],
[1.2204],
[0.9087]])
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def std(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor:
r"""
std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
Calculates the standard deviation over the dimensions specified by :attr:`dim`.
:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
reduce over all dimensions.
The standard deviation (:math:`\sigma`) is calculated as
.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.std(a, dim=1, keepdim=True)
tensor([[1.0311],
[0.7477],
[1.2204],
[0.9087]])
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def std(input: Tensor, unbiased: _bool = True) -> Tensor:
r"""
std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
Calculates the standard deviation over the dimensions specified by :attr:`dim`.
:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
reduce over all dimensions.
The standard deviation (:math:`\sigma`) is calculated as
.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.std(a, dim=1, keepdim=True)
tensor([[1.0311],
[0.7477],
[1.2204],
[0.9087]])
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor:
r"""
std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
Calculates the standard deviation over the dimensions specified by :attr:`dim`.
:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
reduce over all dimensions.
The standard deviation (:math:`\sigma`) is calculated as
.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.std(a, dim=1, keepdim=True)
tensor([[1.0311],
[0.7477],
[1.2204],
[0.9087]])
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
Calculates the standard deviation over the dimensions specified by :attr:`dim`.
:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
reduce over all dimensions.
The standard deviation (:math:`\sigma`) is calculated as
.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints): the dimension or dimensions to reduce.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.std(a, dim=1, keepdim=True)
tensor([[1.0311],
[0.7477],
[1.2204],
[0.9087]])
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def std_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
r"""
std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
Calculates the standard deviation and mean over the dimensions specified by
:attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
``None`` to reduce over all dimensions.
The standard deviation (:math:`\sigma`) is calculated as
.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Returns:
A tuple (std, mean) containing the standard deviation and mean.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.std_mean(a, dim=0, keepdim=True)
(tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def std_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
r"""
std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
Calculates the standard deviation and mean over the dimensions specified by
:attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
``None`` to reduce over all dimensions.
The standard deviation (:math:`\sigma`) is calculated as
.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Returns:
A tuple (std, mean) containing the standard deviation and mean.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.std_mean(a, dim=0, keepdim=True)
(tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def std_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]:
r"""
std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
Calculates the standard deviation and mean over the dimensions specified by
:attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
``None`` to reduce over all dimensions.
The standard deviation (:math:`\sigma`) is calculated as
.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Returns:
A tuple (std, mean) containing the standard deviation and mean.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.std_mean(a, dim=0, keepdim=True)
(tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
r"""
std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
Calculates the standard deviation and mean over the dimensions specified by
:attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
``None`` to reduce over all dimensions.
The standard deviation (:math:`\sigma`) is calculated as
.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Returns:
A tuple (std, mean) containing the standard deviation and mean.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.std_mean(a, dim=0, keepdim=True)
(tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
r"""
std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
Calculates the standard deviation and mean over the dimensions specified by
:attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
``None`` to reduce over all dimensions.
The standard deviation (:math:`\sigma`) is calculated as
.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Returns:
A tuple (std, mean) containing the standard deviation and mean.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.std_mean(a, dim=0, keepdim=True)
(tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def sub(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor:
r"""
sub(input, other, *, alpha=1, out=None) -> Tensor
Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
.. math::
\text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Args:
input (Tensor): the input tensor.
other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
Keyword args:
alpha (Number): the multiplier for :attr:`other`.
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor((1, 2))
>>> b = torch.tensor((0, 1))
>>> torch.sub(a, b, alpha=2)
tensor([1, 0])
"""
...
@overload
def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor:
r"""
sub(input, other, *, alpha=1, out=None) -> Tensor
Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
.. math::
\text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Args:
input (Tensor): the input tensor.
other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
Keyword args:
alpha (Number): the multiplier for :attr:`other`.
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor((1, 2))
>>> b = torch.tensor((0, 1))
>>> torch.sub(a, b, alpha=2)
tensor([1, 0])
"""
...
@overload
def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor:
r"""
sub(input, other, *, alpha=1, out=None) -> Tensor
Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
.. math::
\text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Args:
input (Tensor): the input tensor.
other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
Keyword args:
alpha (Number): the multiplier for :attr:`other`.
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor((1, 2))
>>> b = torch.tensor((0, 1))
>>> torch.sub(a, b, alpha=2)
tensor([1, 0])
"""
...
@overload
def subtract(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
r"""
subtract(input, other, *, alpha=1, out=None) -> Tensor
Alias for :func:`torch.sub`.
"""
...
@overload
def subtract(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor:
r"""
subtract(input, other, *, alpha=1, out=None) -> Tensor
Alias for :func:`torch.sub`.
"""
...
@overload
def sum(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor:
r"""
sum(input, *, dtype=None) -> Tensor
Returns the sum of all elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.1133, -0.9567, 0.2958]])
>>> torch.sum(a)
tensor(-0.5475)
.. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
:noindex:
Returns the sum of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
[-0.2993, 0.9138, 0.9337, -1.6864],
[ 0.1132, 0.7892, -0.1003, 0.5688],
[ 0.3637, -0.9906, -0.4752, -1.5197]])
>>> torch.sum(a, 1)
tensor([-0.4598, -0.1381, 1.3708, -2.6217])
>>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
>>> torch.sum(b, (2, 1))
tensor([ 435., 1335., 2235., 3135.])
"""
...
@overload
def sum(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
sum(input, *, dtype=None) -> Tensor
Returns the sum of all elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.1133, -0.9567, 0.2958]])
>>> torch.sum(a)
tensor(-0.5475)
.. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
:noindex:
Returns the sum of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
[-0.2993, 0.9138, 0.9337, -1.6864],
[ 0.1132, 0.7892, -0.1003, 0.5688],
[ 0.3637, -0.9906, -0.4752, -1.5197]])
>>> torch.sum(a, 1)
tensor([-0.4598, -0.1381, 1.3708, -2.6217])
>>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
>>> torch.sum(b, (2, 1))
tensor([ 435., 1335., 2235., 3135.])
"""
...
@overload
def sum(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
r"""
sum(input, *, dtype=None) -> Tensor
Returns the sum of all elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.1133, -0.9567, 0.2958]])
>>> torch.sum(a)
tensor(-0.5475)
.. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
:noindex:
Returns the sum of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
[-0.2993, 0.9138, 0.9337, -1.6864],
[ 0.1132, 0.7892, -0.1003, 0.5688],
[ 0.3637, -0.9906, -0.4752, -1.5197]])
>>> torch.sum(a, 1)
tensor([-0.4598, -0.1381, 1.3708, -2.6217])
>>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
>>> torch.sum(b, (2, 1))
tensor([ 435., 1335., 2235., 3135.])
"""
...
def svd(input: Tensor, some: _bool = True, compute_uv: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.svd:
r"""
svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor)
Computes the singular value decomposition of either a matrix or batch of
matrices :attr:`input`. The singular value decomposition is represented as a
namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`.
where :math:`V^{\text{H}}` is the transpose of `V` for real inputs,
and the conjugate transpose of `V` for complex inputs.
If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also
batched with the same batch dimensions as :attr:`input`.
If :attr:`some` is `True` (default), the method returns the reduced singular
value decomposition. In this case, if the last two dimensions of :attr:`input` are
`m` and `n`, then the returned `U` and `V` matrices will contain only
`min(n, m)` orthonormal columns.
If :attr:`compute_uv` is `False`, the returned `U` and `V` will be
zero-filled matrices of shape `(m, m)` and `(n, n)`
respectively, and the same device as :attr:`input`. The argument :attr:`some`
has no effect when :attr:`compute_uv` is `False`.
Supports :attr:`input` of float, double, cfloat and cdouble data types.
The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will
always be real-valued, even if :attr:`input` is complex.
.. warning::
:func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd`
and will be removed in a future PyTorch release.
``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with
.. code:: python
U, S, Vh = torch.linalg.svd(A, full_matrices=not some)
V = Vh.mH
``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with
.. code:: python
S = torch.linalg.svdvals(A)
.. note:: Differences with :func:`torch.linalg.svd`:
* :attr:`some` is the opposite of
:func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that
default value for both is `True`, so the default behavior is
effectively the opposite.
* :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns
`Vh`, that is, :math:`V^{\text{H}}`.
* If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled
tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns
empty tensors.
.. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices,
then the singular values of each matrix in the batch are returned in descending order.
.. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`.
.. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]`
and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors
can be arbitrary bases of the corresponding subspaces.
.. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd`
(a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously,
on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243
and later, and MAGMA's routine `gesdd` on earlier versions of CUDA.
.. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will
be represented as a column-major matrix (i.e. Fortran-contiguous).
.. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not
have zero nor repeated singular values.
.. warning:: If the distance between any two singular values is close to zero, the gradients with respect to
`U` and `V` will be numerically unstable, as they depends on
:math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix
has small singular values, as these gradients also depend on `S^{-1}`.
.. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique,
as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column.
The same happens when :attr:`input` has repeated singular values, where one may multiply
the columns of the spanning subspace in `U` and `V` by a rotation matrix
and `the resulting vectors will span the same subspace`_.
Different platforms, like NumPy, or inputs on different device types,
may produce different `U` and `V` tensors.
Args:
input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more
batch dimensions consisting of `(m, n)` matrices.
some (bool, optional): controls whether to compute the reduced or full decomposition, and
consequently, the shape of returned `U` and `V`. Default: `True`.
compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`.
Keyword args:
out (tuple, optional): the output tuple of tensors
Example::
>>> a = torch.randn(5, 3)
>>> a
tensor([[ 0.2364, -0.7752, 0.6372],
[ 1.7201, 0.7394, -0.0504],
[-0.3371, -1.0584, 0.5296],
[ 0.3550, -0.4022, 1.5569],
[ 0.2445, -0.0158, 1.1414]])
>>> u, s, v = torch.svd(a)
>>> u
tensor([[ 0.4027, 0.0287, 0.5434],
[-0.1946, 0.8833, 0.3679],
[ 0.4296, -0.2890, 0.5261],
[ 0.6604, 0.2717, -0.2618],
[ 0.4234, 0.2481, -0.4733]])
>>> s
tensor([2.3289, 2.0315, 0.7806])
>>> v
tensor([[-0.0199, 0.8766, 0.4809],
[-0.5080, 0.4054, -0.7600],
[ 0.8611, 0.2594, -0.4373]])
>>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))
tensor(8.6531e-07)
>>> a_big = torch.randn(7, 5, 3)
>>> u, s, v = torch.svd(a_big)
>>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT))
tensor(2.6503e-06)
.. _the resulting vectors will span the same subspace:
(https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD)
"""
...
def swapaxes(input: Tensor, axis0: _int, axis1: _int) -> Tensor:
r"""
swapaxes(input, axis0, axis1) -> Tensor
Alias for :func:`torch.transpose`.
This function is equivalent to NumPy's swapaxes function.
Examples::
>>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
tensor([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> torch.swapaxes(x, 0, 1)
tensor([[[0, 1],
[4, 5]],
[[2, 3],
[6, 7]]])
>>> torch.swapaxes(x, 0, 2)
tensor([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
...
def swapdims(input: Tensor, dim0: _int, dim1: _int) -> Tensor:
r"""
swapdims(input, dim0, dim1) -> Tensor
Alias for :func:`torch.transpose`.
This function is equivalent to NumPy's swapaxes function.
Examples::
>>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
tensor([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> torch.swapdims(x, 0, 1)
tensor([[[0, 1],
[4, 5]],
[[2, 3],
[6, 7]]])
>>> torch.swapdims(x, 0, 2)
tensor([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
...
def sym_constrain_range(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ...
def sym_constrain_range_for_size(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ...
def t(input: Tensor) -> Tensor:
r"""
t(input) -> Tensor
Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0
and 1.
0-D and 1-D tensors are returned as is. When input is a 2-D tensor this
is equivalent to ``transpose(input, 0, 1)``.
Args:
input (Tensor): the input tensor.
Example::
>>> x = torch.randn(())
>>> x
tensor(0.1995)
>>> torch.t(x)
tensor(0.1995)
>>> x = torch.randn(3)
>>> x
tensor([ 2.4320, -0.4608, 0.7702])
>>> torch.t(x)
tensor([ 2.4320, -0.4608, 0.7702])
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 0.4875, 0.9158, -0.5872],
[ 0.3938, -0.6929, 0.6932]])
>>> torch.t(x)
tensor([[ 0.4875, 0.3938],
[ 0.9158, -0.6929],
[-0.5872, 0.6932]])
See also :func:`torch.transpose`.
"""
...
def t_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.t`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def take(input: Tensor, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
take(input, index) -> Tensor
Returns a new tensor with the elements of :attr:`input` at the given indices.
The input tensor is treated as if it were viewed as a 1-D tensor. The result
takes the same shape as the indices.
Args:
input (Tensor): the input tensor.
index (LongTensor): the indices into tensor
Example::
>>> src = torch.tensor([[4, 3, 5],
... [6, 7, 8]])
>>> torch.take(src, torch.tensor([0, 2, 5]))
tensor([ 4, 5, 8])
"""
...
def take_along_dim(input: Tensor, indices: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor:
r"""
take_along_dim(input, indices, dim=None, *, out=None) -> Tensor
Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`.
If :attr:`dim` is None, the input array is treated as if it has been flattened to 1d.
Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`,
are designed to work with this function. See the examples below.
.. note::
This function is similar to NumPy's `take_along_axis`.
See also :func:`torch.gather`.
Args:
input (Tensor): the input tensor.
indices (tensor): the indices into :attr:`input`. Must have long dtype.
dim (int, optional): dimension to select along.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> t = torch.tensor([[10, 30, 20], [60, 40, 50]])
>>> max_idx = torch.argmax(t)
>>> torch.take_along_dim(t, max_idx)
tensor([60])
>>> sorted_idx = torch.argsort(t, dim=1)
>>> torch.take_along_dim(t, sorted_idx, dim=1)
tensor([[10, 20, 30],
[40, 50, 60]])
"""
...
def tan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
tan(input, *, out=None) -> Tensor
Returns a new tensor with the tangent of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \tan(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([-1.2027, -1.7687, 0.4412, -1.3856])
>>> torch.tan(a)
tensor([-2.5930, 4.9859, 0.4722, -5.3366])
"""
...
def tan_(input: Tensor) -> Tensor: ...
def tanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
tanh(input, *, out=None) -> Tensor
Returns a new tensor with the hyperbolic tangent of the elements
of :attr:`input`.
.. math::
\text{out}_{i} = \tanh(\text{input}_{i})
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.8986, -0.7279, 1.1745, 0.2611])
>>> torch.tanh(a)
tensor([ 0.7156, -0.6218, 0.8257, 0.2553])
"""
...
def tanh_(input: Tensor) -> Tensor: ...
def tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
r"""
tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`.
.. warning::
When working with tensors prefer using :func:`torch.Tensor.clone`,
:func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for
readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to
``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)``
is equivalent to ``t.clone().detach().requires_grad_(True)``.
.. seealso::
:func:`torch.as_tensor` preserves autograd history and avoids copies where possible.
:func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array.
Args:
data (array_like): Initial data for the tensor. Can be a list, tuple,
NumPy ``ndarray``, scalar, and other types.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, infers data type from :attr:`data`.
device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
then the device of data is used. If None and data is not a tensor then
the result tensor is constructed on the current device.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
tensor([[ 0.1000, 1.2000],
[ 2.2000, 3.1000],
[ 4.9000, 5.2000]])
>>> torch.tensor([0, 1]) # Type inference on data
tensor([ 0, 1])
>>> torch.tensor([[0.11111, 0.222222, 0.3333333]],
... dtype=torch.float64,
... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device
tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0')
>>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor
tensor(3.1416)
>>> torch.tensor([]) # Create an empty tensor (of size (0,))
tensor([])
"""
...
@overload
def tensor_split(input: Tensor, tensor_indices_or_sections: Tensor, dim: _int = 0) -> Tuple[Tensor, ...]:
r"""
tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
along dimension :attr:`dim` according to the indices or number of sections specified
by :attr:`indices_or_sections`. This function is based on NumPy's
:func:`numpy.array_split`.
Args:
input (Tensor): the tensor to split
indices_or_sections (Tensor, int or list or tuple of ints):
If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
have size :code:`int(input.size(dim) / n)`.
If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
long tensor on the CPU.
dim (int, optional): dimension along which to split the tensor. Default: ``0``
Example::
>>> x = torch.arange(8)
>>> torch.tensor_split(x, 3)
(tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
>>> x = torch.arange(7)
>>> torch.tensor_split(x, 3)
(tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
>>> torch.tensor_split(x, (1, 6))
(tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
>>> x = torch.arange(14).reshape(2, 7)
>>> x
tensor([[ 0, 1, 2, 3, 4, 5, 6],
[ 7, 8, 9, 10, 11, 12, 13]])
>>> torch.tensor_split(x, 3, dim=1)
(tensor([[0, 1, 2],
[7, 8, 9]]),
tensor([[ 3, 4],
[10, 11]]),
tensor([[ 5, 6],
[12, 13]]))
>>> torch.tensor_split(x, (1, 6), dim=1)
(tensor([[0],
[7]]),
tensor([[ 1, 2, 3, 4, 5],
[ 8, 9, 10, 11, 12]]),
tensor([[ 6],
[13]]))
"""
...
@overload
def tensor_split(input: Tensor, sections: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]:
r"""
tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
along dimension :attr:`dim` according to the indices or number of sections specified
by :attr:`indices_or_sections`. This function is based on NumPy's
:func:`numpy.array_split`.
Args:
input (Tensor): the tensor to split
indices_or_sections (Tensor, int or list or tuple of ints):
If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
have size :code:`int(input.size(dim) / n)`.
If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
long tensor on the CPU.
dim (int, optional): dimension along which to split the tensor. Default: ``0``
Example::
>>> x = torch.arange(8)
>>> torch.tensor_split(x, 3)
(tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
>>> x = torch.arange(7)
>>> torch.tensor_split(x, 3)
(tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
>>> torch.tensor_split(x, (1, 6))
(tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
>>> x = torch.arange(14).reshape(2, 7)
>>> x
tensor([[ 0, 1, 2, 3, 4, 5, 6],
[ 7, 8, 9, 10, 11, 12, 13]])
>>> torch.tensor_split(x, 3, dim=1)
(tensor([[0, 1, 2],
[7, 8, 9]]),
tensor([[ 3, 4],
[10, 11]]),
tensor([[ 5, 6],
[12, 13]]))
>>> torch.tensor_split(x, (1, 6), dim=1)
(tensor([[0],
[7]]),
tensor([[ 1, 2, 3, 4, 5],
[ 8, 9, 10, 11, 12]]),
tensor([[ 6],
[13]]))
"""
...
@overload
def tensor_split(input: Tensor, indices: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]:
r"""
tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
along dimension :attr:`dim` according to the indices or number of sections specified
by :attr:`indices_or_sections`. This function is based on NumPy's
:func:`numpy.array_split`.
Args:
input (Tensor): the tensor to split
indices_or_sections (Tensor, int or list or tuple of ints):
If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
have size :code:`int(input.size(dim) / n)`.
If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
long tensor on the CPU.
dim (int, optional): dimension along which to split the tensor. Default: ``0``
Example::
>>> x = torch.arange(8)
>>> torch.tensor_split(x, 3)
(tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
>>> x = torch.arange(7)
>>> torch.tensor_split(x, 3)
(tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
>>> torch.tensor_split(x, (1, 6))
(tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
>>> x = torch.arange(14).reshape(2, 7)
>>> x
tensor([[ 0, 1, 2, 3, 4, 5, 6],
[ 7, 8, 9, 10, 11, 12, 13]])
>>> torch.tensor_split(x, 3, dim=1)
(tensor([[0, 1, 2],
[7, 8, 9]]),
tensor([[ 3, 4],
[10, 11]]),
tensor([[ 5, 6],
[12, 13]]))
>>> torch.tensor_split(x, (1, 6), dim=1)
(tensor([[0],
[7]]),
tensor([[ 1, 2, 3, 4, 5],
[ 8, 9, 10, 11, 12]]),
tensor([[ 6],
[13]]))
"""
...
def threshold(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
def threshold_(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex]) -> Tensor: ...
def tile(input: Tensor, dims: Sequence[Union[_int, SymInt]]) -> Tensor:
r"""
tile(input, dims) -> Tensor
Constructs a tensor by repeating the elements of :attr:`input`.
The :attr:`dims` argument specifies the number of repetitions
in each dimension.
If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then
ones are prepended to :attr:`dims` until all dimensions are specified.
For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims`
is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2).
Analogously, if :attr:`input` has fewer dimensions than :attr:`dims`
specifies, then :attr:`input` is treated as if it were unsqueezed at
dimension zero until it has as many dimensions as :attr:`dims` specifies.
For example, if :attr:`input` has shape (4, 2) and :attr:`dims`
is (3, 3, 2, 2), then :attr:`input` is treated as if it had the
shape (1, 1, 4, 2).
.. note::
This function is similar to NumPy's tile function.
Args:
input (Tensor): the tensor whose elements to repeat.
dims (tuple): the number of repetitions per dimension.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> x.tile((2,))
tensor([1, 2, 3, 1, 2, 3])
>>> y = torch.tensor([[1, 2], [3, 4]])
>>> torch.tile(y, (2, 2))
tensor([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
"""
...
def topk(input: Tensor, k: Union[_int, SymInt], dim: _int = -1, largest: _bool = True, sorted: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.topk:
r"""
topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor)
Returns the :attr:`k` largest elements of the given :attr:`input` tensor along
a given dimension.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`largest` is ``False`` then the `k` smallest elements are returned.
A namedtuple of `(values, indices)` is returned with the `values` and
`indices` of the largest `k` elements of each row of the `input` tensor in the
given dimension `dim`.
The boolean option :attr:`sorted` if ``True``, will make sure that the returned
`k` elements are themselves sorted
Args:
input (Tensor): the input tensor.
k (int): the k in "top-k"
dim (int, optional): the dimension to sort along
largest (bool, optional): controls whether to return largest or
smallest elements
sorted (bool, optional): controls whether to return the elements
in sorted order
Keyword args:
out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be
optionally given to be used as output buffers
Example::
>>> x = torch.arange(1., 6.)
>>> x
tensor([ 1., 2., 3., 4., 5.])
>>> torch.topk(x, 3)
torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2]))
"""
...
def trace(input: Tensor) -> Tensor:
r"""
trace(input) -> Tensor
Returns the sum of the elements of the diagonal of the input 2-D matrix.
Example::
>>> x = torch.arange(1., 10.).view(3, 3)
>>> x
tensor([[ 1., 2., 3.],
[ 4., 5., 6.],
[ 7., 8., 9.]])
>>> torch.trace(x)
tensor(15.)
"""
...
@overload
def transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor:
r"""
transpose(input, dim0, dim1) -> Tensor
Returns a tensor that is a transposed version of :attr:`input`.
The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
If :attr:`input` is a strided tensor then the resulting :attr:`out`
tensor shares its underlying storage with the :attr:`input` tensor, so
changing the content of one would change the content of the other.
If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` then the
resulting :attr:`out` tensor *does not* share the underlying storage
with the :attr:`input` tensor.
If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` with compressed
layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments
:attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must
both be sparse dimensions. The batch dimensions of a sparse tensor are the
dimensions preceding the sparse dimensions.
.. note::
Transpositions which interchange the sparse dimensions of a `SparseCSR`
or `SparseCSC` layout tensor will result in the layout changing between
the two options. Transposition of the sparse dimensions of a ` SparseBSR`
or `SparseBSC` layout tensor will likewise generate a result with the
opposite layout.
Args:
input (Tensor): the input tensor.
dim0 (int): the first dimension to be transposed
dim1 (int): the second dimension to be transposed
Example::
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 1.0028, -0.9893, 0.5809],
[-0.1669, 0.7299, 0.4942]])
>>> torch.transpose(x, 0, 1)
tensor([[ 1.0028, -0.1669],
[-0.9893, 0.7299],
[ 0.5809, 0.4942]])
See also :func:`torch.t`.
"""
...
@overload
def transpose(input: Tensor, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor:
r"""
transpose(input, dim0, dim1) -> Tensor
Returns a tensor that is a transposed version of :attr:`input`.
The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
If :attr:`input` is a strided tensor then the resulting :attr:`out`
tensor shares its underlying storage with the :attr:`input` tensor, so
changing the content of one would change the content of the other.
If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` then the
resulting :attr:`out` tensor *does not* share the underlying storage
with the :attr:`input` tensor.
If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` with compressed
layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments
:attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must
both be sparse dimensions. The batch dimensions of a sparse tensor are the
dimensions preceding the sparse dimensions.
.. note::
Transpositions which interchange the sparse dimensions of a `SparseCSR`
or `SparseCSC` layout tensor will result in the layout changing between
the two options. Transposition of the sparse dimensions of a ` SparseBSR`
or `SparseBSC` layout tensor will likewise generate a result with the
opposite layout.
Args:
input (Tensor): the input tensor.
dim0 (int): the first dimension to be transposed
dim1 (int): the second dimension to be transposed
Example::
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 1.0028, -0.9893, 0.5809],
[-0.1669, 0.7299, 0.4942]])
>>> torch.transpose(x, 0, 1)
tensor([[ 1.0028, -0.1669],
[-0.9893, 0.7299],
[ 0.5809, 0.4942]])
See also :func:`torch.t`.
"""
...
def transpose_copy(input: Tensor, dim0: _int, dim1: _int, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.transpose`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
@overload
def trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor:
r"""
trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
Computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ along
:attr:`dim`. By default the spacing between elements is assumed to be 1, but
:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
used to specify arbitrary spacing along :attr:`dim`.
Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`,
the default computation is
.. math::
\begin{aligned}
\sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1})
\end{aligned}
When :attr:`dx` is specified the computation becomes
.. math::
\begin{aligned}
\sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1})
\end{aligned}
effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified,
assuming :attr:`x` is also a one-dimensional tensor with
elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes
.. math::
\begin{aligned}
\sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1})
\end{aligned}
When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed.
The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x`
and :attr:`y`, the function computes the difference between consecutive elements along
dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have
the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1.
After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule.
See the examples below for details.
.. note::
The trapezoidal rule is a technique for approximating the definite integral of a function
by averaging its left and right Riemann sums. The approximation becomes more accurate as
the resolution of the partition increases.
Arguments:
y (Tensor): Values to use when computing the trapezoidal rule.
x (Tensor): If specified, defines spacing between values as specified above.
Keyword arguments:
dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
are specified then this defaults to 1. Effectively multiplies the result by its value.
dim (int): The dimension along which to compute the trapezoidal rule.
The last (inner-most) dimension by default.
Examples::
>>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1
>>> y = torch.tensor([1, 5, 10])
>>> torch.trapezoid(y)
tensor(10.5)
>>> # Computes the same trapezoidal rule directly to verify
>>> (1 + 10 + 10) / 2
10.5
>>> # Computes the trapezoidal rule in 1D with constant spacing of 2
>>> # NOTE: the result is the same as before, but multiplied by 2
>>> torch.trapezoid(y, dx=2)
21.0
>>> # Computes the trapezoidal rule in 1D with arbitrary spacing
>>> x = torch.tensor([1, 3, 6])
>>> torch.trapezoid(y, x)
28.5
>>> # Computes the same trapezoidal rule directly to verify
>>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
28.5
>>> # Computes the trapezoidal rule for each row of a 3x3 matrix
>>> y = torch.arange(9).reshape(3, 3)
tensor([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> torch.trapezoid(y)
tensor([ 2., 8., 14.])
>>> # Computes the trapezoidal rule for each column of the matrix
>>> torch.trapezoid(y, dim=0)
tensor([ 6., 8., 10.])
>>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with the same arbitrary spacing
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([1, 3, 6])
>>> torch.trapezoid(y, x)
array([5., 5., 5.])
>>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with different arbitrary spacing per row
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
>>> torch.trapezoid(y, x)
array([2., 4., 6.])
"""
...
@overload
def trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor:
r"""
trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
Computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ along
:attr:`dim`. By default the spacing between elements is assumed to be 1, but
:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
used to specify arbitrary spacing along :attr:`dim`.
Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`,
the default computation is
.. math::
\begin{aligned}
\sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1})
\end{aligned}
When :attr:`dx` is specified the computation becomes
.. math::
\begin{aligned}
\sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1})
\end{aligned}
effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified,
assuming :attr:`x` is also a one-dimensional tensor with
elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes
.. math::
\begin{aligned}
\sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1})
\end{aligned}
When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed.
The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x`
and :attr:`y`, the function computes the difference between consecutive elements along
dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have
the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1.
After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule.
See the examples below for details.
.. note::
The trapezoidal rule is a technique for approximating the definite integral of a function
by averaging its left and right Riemann sums. The approximation becomes more accurate as
the resolution of the partition increases.
Arguments:
y (Tensor): Values to use when computing the trapezoidal rule.
x (Tensor): If specified, defines spacing between values as specified above.
Keyword arguments:
dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
are specified then this defaults to 1. Effectively multiplies the result by its value.
dim (int): The dimension along which to compute the trapezoidal rule.
The last (inner-most) dimension by default.
Examples::
>>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1
>>> y = torch.tensor([1, 5, 10])
>>> torch.trapezoid(y)
tensor(10.5)
>>> # Computes the same trapezoidal rule directly to verify
>>> (1 + 10 + 10) / 2
10.5
>>> # Computes the trapezoidal rule in 1D with constant spacing of 2
>>> # NOTE: the result is the same as before, but multiplied by 2
>>> torch.trapezoid(y, dx=2)
21.0
>>> # Computes the trapezoidal rule in 1D with arbitrary spacing
>>> x = torch.tensor([1, 3, 6])
>>> torch.trapezoid(y, x)
28.5
>>> # Computes the same trapezoidal rule directly to verify
>>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
28.5
>>> # Computes the trapezoidal rule for each row of a 3x3 matrix
>>> y = torch.arange(9).reshape(3, 3)
tensor([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> torch.trapezoid(y)
tensor([ 2., 8., 14.])
>>> # Computes the trapezoidal rule for each column of the matrix
>>> torch.trapezoid(y, dim=0)
tensor([ 6., 8., 10.])
>>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with the same arbitrary spacing
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([1, 3, 6])
>>> torch.trapezoid(y, x)
array([5., 5., 5.])
>>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with different arbitrary spacing per row
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
>>> torch.trapezoid(y, x)
array([2., 4., 6.])
"""
...
@overload
def trapz(y: Tensor, *, dx: _float = 1, dim: _int = -1) -> Tensor:
r"""
trapz(y, x, *, dim=-1) -> Tensor
Alias for :func:`torch.trapezoid`.
"""
...
@overload
def trapz(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor:
r"""
trapz(y, x, *, dim=-1) -> Tensor
Alias for :func:`torch.trapezoid`.
"""
...
def triangular_solve(input: Tensor, A: Tensor, upper: _bool = True, transpose: _bool = False, unitriangular: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.triangular_solve:
r"""
triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor)
Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A`
and multiple right-hand sides :math:`b`.
In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular
(or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal.
`torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are
batches of 2D matrices. If the inputs are batches, then returns
batched outputs `X`
If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and
:attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned,
the result may contain `NaN` s.
Supports input of float, double, cfloat and cdouble data types.
.. warning::
:func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular`
and will be removed in a future PyTorch release.
:func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a
copy of one of the inputs.
``X = torch.triangular_solve(B, A).solution`` should be replaced with
.. code:: python
X = torch.linalg.solve_triangular(A, B)
Args:
b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where
:math:`*` is zero of more batch dimensions
A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)`
where :math:`*` is zero or more batch dimensions
upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``.
transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``,
and `op(A) = A` if it is ``False``. Default: ``False``.
unitriangular (bool, optional): whether :math:`A` is unit triangular.
If True, the diagonal elements of :math:`A` are assumed to be
1 and not referenced from :math:`A`. Default: ``False``.
Keyword args:
out ((Tensor, Tensor), optional): tuple of two tensors to write
the output to. Ignored if `None`. Default: `None`.
Returns:
A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient`
is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b`
(or whatever variant of the system of equations, depending on the keyword arguments.)
Examples::
>>> A = torch.randn(2, 2).triu()
>>> A
tensor([[ 1.1527, -1.0753],
[ 0.0000, 0.7986]])
>>> b = torch.randn(2, 3)
>>> b
tensor([[-0.0210, 2.3513, -1.5492],
[ 1.5429, 0.7403, -1.0243]])
>>> torch.triangular_solve(b, A)
torch.return_types.triangular_solve(
solution=tensor([[ 1.7841, 2.9046, -2.5405],
[ 1.9320, 0.9270, -1.2826]]),
cloned_coefficient=tensor([[ 1.1527, -1.0753],
[ 0.0000, 0.7986]]))
"""
...
def tril(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
r"""
tril(input, diagonal=0, *, out=None) -> Tensor
Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices
:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
The lower triangular part of the matrix is defined as the elements on and
below the diagonal.
The argument :attr:`diagonal` controls which diagonal to consider. If
:attr:`diagonal` = 0, all elements on and below the main diagonal are
retained. A positive value includes just as many diagonals above the main
diagonal, and similarly a negative value excludes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
:math:`d_{1}, d_{2}` are the dimensions of the matrix.
Args:
input (Tensor): the input tensor.
diagonal (int, optional): the diagonal to consider
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-1.0813, -0.8619, 0.7105],
[ 0.0935, 0.1380, 2.2112],
[-0.3409, -0.9828, 0.0289]])
>>> torch.tril(a)
tensor([[-1.0813, 0.0000, 0.0000],
[ 0.0935, 0.1380, 0.0000],
[-0.3409, -0.9828, 0.0289]])
>>> b = torch.randn(4, 6)
>>> b
tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461],
[ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145],
[ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864],
[-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]])
>>> torch.tril(b, diagonal=1)
tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000],
[ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000],
[-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]])
>>> torch.tril(b, diagonal=-1)
tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000],
[-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]])
"""
...
def tril_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
Returns the indices of the lower triangular part of a :attr:`row`-by-
:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
coordinates of all indices and the second row contains column coordinates.
Indices are ordered based on rows and then columns.
The lower triangular part of the matrix is defined as the elements on and
below the diagonal.
The argument :attr:`offset` controls which diagonal to consider. If
:attr:`offset` = 0, all elements on and below the main diagonal are
retained. A positive value includes just as many diagonals above the main
diagonal, and similarly a negative value excludes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
.. note::
When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
prevent overflow during calculation.
Args:
row (``int``): number of rows in the 2-D matrix.
col (``int``): number of columns in the 2-D matrix.
offset (``int``): diagonal offset from the main diagonal.
Default: if not provided, 0.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, ``torch.long``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
Example::
>>> a = torch.tril_indices(3, 3)
>>> a
tensor([[0, 1, 1, 2, 2, 2],
[0, 0, 1, 0, 1, 2]])
>>> a = torch.tril_indices(4, 3, -1)
>>> a
tensor([[1, 2, 2, 3, 3, 3],
[0, 0, 1, 0, 1, 2]])
>>> a = torch.tril_indices(4, 3, 1)
>>> a
tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
[0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]])
"""
...
def triplet_margin_loss(anchor: Tensor, positive: Tensor, negative: Tensor, margin: _float = 1.0, p: _float = 2, eps: _float = 1e-06, swap: _bool = False, reduction: _int = 1) -> Tensor: ...
def triu(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
r"""
triu(input, diagonal=0, *, out=None) -> Tensor
Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
The upper triangular part of the matrix is defined as the elements on and
above the diagonal.
The argument :attr:`diagonal` controls which diagonal to consider. If
:attr:`diagonal` = 0, all elements on and above the main diagonal are
retained. A positive value excludes just as many diagonals above the main
diagonal, and similarly a negative value includes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
:math:`d_{1}, d_{2}` are the dimensions of the matrix.
Args:
input (Tensor): the input tensor.
diagonal (int, optional): the diagonal to consider
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(3, 3)
>>> a
tensor([[ 0.2309, 0.5207, 2.0049],
[ 0.2072, -1.0680, 0.6602],
[ 0.3480, -0.5211, -0.4573]])
>>> torch.triu(a)
tensor([[ 0.2309, 0.5207, 2.0049],
[ 0.0000, -1.0680, 0.6602],
[ 0.0000, 0.0000, -0.4573]])
>>> torch.triu(a, diagonal=1)
tensor([[ 0.0000, 0.5207, 2.0049],
[ 0.0000, 0.0000, 0.6602],
[ 0.0000, 0.0000, 0.0000]])
>>> torch.triu(a, diagonal=-1)
tensor([[ 0.2309, 0.5207, 2.0049],
[ 0.2072, -1.0680, 0.6602],
[ 0.0000, -0.5211, -0.4573]])
>>> b = torch.randn(4, 6)
>>> b
tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
[-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
[ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
[-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]])
>>> torch.triu(b, diagonal=1)
tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
[ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857],
[ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]])
>>> torch.triu(b, diagonal=-1)
tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
[-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
[ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
[ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]])
"""
...
def triu_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
Returns the indices of the upper triangular part of a :attr:`row` by
:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
coordinates of all indices and the second row contains column coordinates.
Indices are ordered based on rows and then columns.
The upper triangular part of the matrix is defined as the elements on and
above the diagonal.
The argument :attr:`offset` controls which diagonal to consider. If
:attr:`offset` = 0, all elements on and above the main diagonal are
retained. A positive value excludes just as many diagonals above the main
diagonal, and similarly a negative value includes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
.. note::
When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
prevent overflow during calculation.
Args:
row (``int``): number of rows in the 2-D matrix.
col (``int``): number of columns in the 2-D matrix.
offset (``int``): diagonal offset from the main diagonal.
Default: if not provided, 0.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, ``torch.long``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
Example::
>>> a = torch.triu_indices(3, 3)
>>> a
tensor([[0, 0, 0, 1, 1, 2],
[0, 1, 2, 1, 2, 2]])
>>> a = torch.triu_indices(4, 3, -1)
>>> a
tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3],
[0, 1, 2, 0, 1, 2, 1, 2, 2]])
>>> a = torch.triu_indices(4, 3, 1)
>>> a
tensor([[0, 0, 1],
[1, 2, 2]])
"""
...
def true_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor:
r"""
true_divide(dividend, divisor, *, out) -> Tensor
Alias for :func:`torch.div` with ``rounding_mode=None``.
"""
...
def trunc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
trunc(input, *, out=None) -> Tensor
Returns a new tensor with the truncated integer values of
the elements of :attr:`input`.
For integer inputs, follows the array-api convention of returning a
copy of the input tensor.
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 3.4742, 0.5466, -0.8008, -0.9079])
>>> torch.trunc(a)
tensor([ 3., 0., -0., -0.])
"""
...
def trunc_(input: Tensor) -> Tensor: ...
@overload
def unbind(input: Tensor, dim: _int = 0) -> Tuple[Tensor, ...]:
r"""
unbind(input, dim=0) -> seq
Removes a tensor dimension.
Returns a tuple of all slices along a given dimension, already without it.
Arguments:
input (Tensor): the tensor to unbind
dim (int): dimension to remove
Example::
>>> torch.unbind(torch.tensor([[1, 2, 3],
>>> [4, 5, 6],
>>> [7, 8, 9]]))
(tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
"""
...
@overload
def unbind(input: Tensor, dim: Union[str, ellipsis, None]) -> Tuple[Tensor, ...]:
r"""
unbind(input, dim=0) -> seq
Removes a tensor dimension.
Returns a tuple of all slices along a given dimension, already without it.
Arguments:
input (Tensor): the tensor to unbind
dim (int): dimension to remove
Example::
>>> torch.unbind(torch.tensor([[1, 2, 3],
>>> [4, 5, 6],
>>> [7, 8, 9]]))
(tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
"""
...
def unbind_copy(input: Tensor, dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None:
r"""
Performs the same operation as :func:`torch.unbind`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
@overload
def unflatten(input: Tensor, dim: Union[str, ellipsis, None], sizes: Sequence[Union[_int, SymInt]], names: Sequence[Union[str, ellipsis, None]]) -> Tensor:
r"""
unflatten(input, dim, sizes) -> Tensor
Expands a dimension of the input tensor over multiple dimensions.
.. seealso::
:func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one.
Args:
input (Tensor): the input tensor.
dim (int): Dimension to be unflattened, specified as an index into
``input.shape``.
sizes (Tuple[int]): New shape of the unflattened dimension.
One of its elements can be `-1` in which case the corresponding output
dimension is inferred. Otherwise, the product of ``sizes`` *must*
equal ``input.shape[dim]``.
Returns:
A View of input with the specified dimension unflattened.
Examples::
>>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape
torch.Size([3, 2, 2, 1])
>>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape
torch.Size([3, 2, 2, 1])
>>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape
torch.Size([5, 2, 2, 3, 1, 1, 3])
"""
...
@overload
def unflatten(input: Tensor, dim: _int, sizes: Sequence[Union[_int, SymInt]]) -> Tensor:
r"""
unflatten(input, dim, sizes) -> Tensor
Expands a dimension of the input tensor over multiple dimensions.
.. seealso::
:func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one.
Args:
input (Tensor): the input tensor.
dim (int): Dimension to be unflattened, specified as an index into
``input.shape``.
sizes (Tuple[int]): New shape of the unflattened dimension.
One of its elements can be `-1` in which case the corresponding output
dimension is inferred. Otherwise, the product of ``sizes`` *must*
equal ``input.shape[dim]``.
Returns:
A View of input with the specified dimension unflattened.
Examples::
>>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape
torch.Size([3, 2, 2, 1])
>>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape
torch.Size([3, 2, 2, 1])
>>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape
torch.Size([5, 2, 2, 3, 1, 1, 3])
"""
...
def unfold_copy(input: Tensor, dimension: _int, size: _int, step: _int, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.unfold`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def unique_dim(input: Tensor, dim: _int, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ...
def unsafe_chunk(input: Tensor, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]:
r"""
unsafe_chunk(input, chunks, dim=0) -> List of Tensors
Works like :func:`torch.chunk` but without enforcing the autograd restrictions
on inplace modification of the outputs.
.. warning::
This function is safe to use as long as only the input, or only the outputs
are modified inplace after calling this function. It is user's
responsibility to ensure that is the case. If both the input and one or more
of the outputs are modified inplace, gradients computed by autograd will be
silently incorrect.
"""
...
def unsafe_split(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]:
r"""
unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors
Works like :func:`torch.split` but without enforcing the autograd restrictions
on inplace modification of the outputs.
.. warning::
This function is safe to use as long as only the input, or only the outputs
are modified inplace after calling this function. It is user's
responsibility to ensure that is the case. If both the input and one or more
of the outputs are modified inplace, gradients computed by autograd will be
silently incorrect.
"""
...
def unsafe_split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ...
def unsqueeze(input: Tensor, dim: _int) -> Tensor:
r"""
unsqueeze(input, dim) -> Tensor
Returns a new tensor with a dimension of size one inserted at the
specified position.
The returned tensor shares the same underlying data with this tensor.
A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)``
can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze`
applied at :attr:`dim` = ``dim + input.dim() + 1``.
Args:
input (Tensor): the input tensor.
dim (int): the index at which to insert the singleton dimension
Example::
>>> x = torch.tensor([1, 2, 3, 4])
>>> torch.unsqueeze(x, 0)
tensor([[ 1, 2, 3, 4]])
>>> torch.unsqueeze(x, 1)
tensor([[ 1],
[ 2],
[ 3],
[ 4]])
"""
...
def unsqueeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.unsqueeze`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.values`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def vander(x: Tensor, N: Optional[_int] = None, increasing: _bool = False) -> Tensor:
r"""
vander(x, N=None, increasing=False) -> Tensor
Generates a Vandermonde matrix.
The columns of the output matrix are elementwise powers of the input vector :math:`x^{(N-1)}, x^{(N-2)}, ..., x^0`.
If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{(N-1)}`. Such a
matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde.
Arguments:
x (Tensor): 1-D input tensor.
N (int, optional): Number of columns in the output. If N is not specified,
a square array is returned :math:`(N = len(x))`.
increasing (bool, optional): Order of the powers of the columns. If True,
the powers increase from left to right, if False (the default) they are reversed.
Returns:
Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{(N-1)}`,
the second :math:`x^{(N-2)}` and so forth. If increasing is True, the columns
are :math:`x^0, x^1, ..., x^{(N-1)}`.
Example::
>>> x = torch.tensor([1, 2, 3, 5])
>>> torch.vander(x)
tensor([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> torch.vander(x, N=3)
tensor([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> torch.vander(x, N=3, increasing=True)
tensor([[ 1, 1, 1],
[ 1, 2, 4],
[ 1, 3, 9],
[ 1, 5, 25]])
"""
...
@overload
def var(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
can be a single dimension, list of dimensions, or ``None`` to reduce over all
dimensions.
The variance (:math:`\sigma^2`) is calculated as
.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.var(a, dim=1, keepdim=True)
tensor([[1.0631],
[0.5590],
[1.4893],
[0.8258]])
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def var(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor:
r"""
var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
can be a single dimension, list of dimensions, or ``None`` to reduce over all
dimensions.
The variance (:math:`\sigma^2`) is calculated as
.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.var(a, dim=1, keepdim=True)
tensor([[1.0631],
[0.5590],
[1.4893],
[0.8258]])
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def var(input: Tensor, unbiased: _bool = True) -> Tensor:
r"""
var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
can be a single dimension, list of dimensions, or ``None`` to reduce over all
dimensions.
The variance (:math:`\sigma^2`) is calculated as
.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.var(a, dim=1, keepdim=True)
tensor([[1.0631],
[0.5590],
[1.4893],
[0.8258]])
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor:
r"""
var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
can be a single dimension, list of dimensions, or ``None`` to reduce over all
dimensions.
The variance (:math:`\sigma^2`) is calculated as
.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.var(a, dim=1, keepdim=True)
tensor([[1.0631],
[0.5590],
[1.4893],
[0.8258]])
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
r"""
var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
can be a single dimension, list of dimensions, or ``None`` to reduce over all
dimensions.
The variance (:math:`\sigma^2`) is calculated as
.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.var(a, dim=1, keepdim=True)
tensor([[1.0631],
[0.5590],
[1.4893],
[0.8258]])
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def var_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
r"""
var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
Calculates the variance and mean over the dimensions specified by :attr:`dim`.
:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
reduce over all dimensions.
The variance (:math:`\sigma^2`) is calculated as
.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Returns:
A tuple (var, mean) containing the variance and mean.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.var_mean(a, dim=0, keepdim=True)
(tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def var_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
r"""
var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
Calculates the variance and mean over the dimensions specified by :attr:`dim`.
:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
reduce over all dimensions.
The variance (:math:`\sigma^2`) is calculated as
.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Returns:
A tuple (var, mean) containing the variance and mean.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.var_mean(a, dim=0, keepdim=True)
(tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def var_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]:
r"""
var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
Calculates the variance and mean over the dimensions specified by :attr:`dim`.
:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
reduce over all dimensions.
The variance (:math:`\sigma^2`) is calculated as
.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Returns:
A tuple (var, mean) containing the variance and mean.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.var_mean(a, dim=0, keepdim=True)
(tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
r"""
var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
Calculates the variance and mean over the dimensions specified by :attr:`dim`.
:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
reduce over all dimensions.
The variance (:math:`\sigma^2`) is calculated as
.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Returns:
A tuple (var, mean) containing the variance and mean.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.var_mean(a, dim=0, keepdim=True)
(tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
@overload
def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
r"""
var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
Calculates the variance and mean over the dimensions specified by :attr:`dim`.
:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
reduce over all dimensions.
The variance (:math:`\sigma^2`) is calculated as
.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
the :attr:`correction`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
Keyword args:
correction (int): difference between the sample size and sample degrees of freedom.
Defaults to `Bessel's correction`_, ``correction=1``.
.. versionchanged:: 2.0
Previously this argument was called ``unbiased`` and was a boolean
with ``True`` corresponding to ``correction=1`` and ``False`` being
``correction=0``.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
out (Tensor, optional): the output tensor.
Returns:
A tuple (var, mean) containing the variance and mean.
Example:
>>> a = torch.tensor(
... [[ 0.2035, 1.2959, 1.8101, -0.4644],
... [ 1.5027, -0.3270, 0.5905, 0.6538],
... [-1.5745, 1.3330, -0.5596, -0.6548],
... [ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.var_mean(a, dim=0, keepdim=True)
(tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
"""
...
def vdot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
vdot(input, other, *, out=None) -> Tensor
Computes the dot product of two 1D vectors along a dimension.
In symbols, this function computes
.. math::
\sum_{i=1}^n \overline{x_i}y_i.
where :math:`\overline{x_i}` denotes the conjugate for complex
vectors, and it is the identity for real vectors.
.. note::
Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product
of two 1D tensors with the same number of elements.
.. seealso::
:func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension.
Args:
input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex.
other (Tensor): second tensor in the dot product, must be 1D.
Keyword args:
.. note:: out (Tensor, optional): the output tensor.
Example::
>>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1]))
tensor(7)
>>> a = torch.tensor((1 +2j, 3 - 1j))
>>> b = torch.tensor((2 +1j, 4 - 0j))
>>> torch.vdot(a, b)
tensor([16.+1.j])
>>> torch.vdot(b, a)
tensor([16.-1.j])
"""
...
def view_as_complex(input: Tensor) -> Tensor:
r"""
view_as_complex(input) -> Tensor
Returns a view of :attr:`input` as a complex tensor. For an input complex
tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a
new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last
dimension of the input tensor is expected to represent the real and imaginary
components of complex numbers.
.. warning::
:func:`view_as_complex` is only supported for tensors with
:class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is
expected to have the last dimension of :attr:`size` 2. In addition, the
tensor must have a `stride` of 1 for its last dimension. The strides of all
other dimensions must be even numbers.
Args:
input (Tensor): the input tensor.
Example::
>>> x=torch.randn(4, 2)
>>> x
tensor([[ 1.6116, -0.5772],
[-1.4606, -0.9120],
[ 0.0786, -1.7497],
[-0.6561, -1.6623]])
>>> torch.view_as_complex(x)
tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)])
"""
...
def view_as_complex_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.view_as_complex`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
def view_as_real(input: Tensor) -> Tensor:
r"""
view_as_real(input) -> Tensor
Returns a view of :attr:`input` as a real tensor. For an input complex tensor of
:attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new
real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2
represents the real and imaginary components of complex numbers.
.. warning::
:func:`view_as_real` is only supported for tensors with ``complex dtypes``.
Args:
input (Tensor): the input tensor.
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)])
>>> torch.view_as_real(x)
tensor([[ 0.4737, -0.3839],
[-0.2098, -0.6699],
[ 0.3470, -0.9451],
[-0.5174, -1.3136]])
"""
...
def view_as_real_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.view_as_real`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
@overload
def view_copy(input: Tensor, dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.view`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
@overload
def view_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor:
r"""
Performs the same operation as :func:`torch.view`, but all output tensors
are freshly created instead of aliasing the input.
"""
...
@overload
def vsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]:
r"""
vsplit(input, indices_or_sections) -> List of Tensors
Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors
vertically according to :attr:`indices_or_sections`. Each split is a view of
:attr:`input`.
This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0)
(the split dimension is 0), except that if :attr:`indices_or_sections` is an integer
it must evenly divide the split dimension or a runtime error will be thrown.
This function is based on NumPy's :func:`numpy.vsplit`.
Args:
input (Tensor): tensor to split.
indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
Example::
>>> t = torch.arange(16.0).reshape(4,4)
>>> t
tensor([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> torch.vsplit(t, 2)
(tensor([[0., 1., 2., 3.],
[4., 5., 6., 7.]]),
tensor([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]))
>>> torch.vsplit(t, [3, 6])
(tensor([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
tensor([[12., 13., 14., 15.]]),
tensor([], size=(0, 4)))
"""
...
@overload
def vsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]:
r"""
vsplit(input, indices_or_sections) -> List of Tensors
Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors
vertically according to :attr:`indices_or_sections`. Each split is a view of
:attr:`input`.
This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0)
(the split dimension is 0), except that if :attr:`indices_or_sections` is an integer
it must evenly divide the split dimension or a runtime error will be thrown.
This function is based on NumPy's :func:`numpy.vsplit`.
Args:
input (Tensor): tensor to split.
indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
Example::
>>> t = torch.arange(16.0).reshape(4,4)
>>> t
tensor([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> torch.vsplit(t, 2)
(tensor([[0., 1., 2., 3.],
[4., 5., 6., 7.]]),
tensor([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]))
>>> torch.vsplit(t, [3, 6])
(tensor([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
tensor([[12., 13., 14., 15.]]),
tensor([], size=(0, 4)))
"""
...
def vstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
r"""
vstack(tensors, *, out=None) -> Tensor
Stack tensors in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`.
Args:
tensors (sequence of Tensors): sequence of tensors to concatenate
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.tensor([1, 2, 3])
>>> b = torch.tensor([4, 5, 6])
>>> torch.vstack((a,b))
tensor([[1, 2, 3],
[4, 5, 6]])
>>> a = torch.tensor([[1],[2],[3]])
>>> b = torch.tensor([[4],[5],[6]])
>>> torch.vstack((a,b))
tensor([[1],
[2],
[3],
[4],
[5],
[6]])
"""
...
@overload
def where(condition: Tensor) -> Tuple[Tensor, ...]:
r"""
where(condition, input, other, *, out=None) -> Tensor
Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
The operation is defined as:
.. math::
\text{out}_i = \begin{cases}
\text{input}_i & \text{if } \text{condition}_i \\
\text{other}_i & \text{otherwise} \\
\end{cases}
.. note::
The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
Arguments:
condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
where :attr:`condition` is ``True``
other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
where :attr:`condition` is ``False``
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
Example::
>>> x = torch.randn(3, 2)
>>> y = torch.ones(3, 2)
>>> x
tensor([[-0.4620, 0.3139],
[ 0.3898, -0.7197],
[ 0.0478, -0.1657]])
>>> torch.where(x > 0, 1.0, 0.0)
tensor([[0., 1.],
[1., 0.],
[1., 0.]])
>>> torch.where(x > 0, x, y)
tensor([[ 1.0000, 0.3139],
[ 0.3898, 1.0000],
[ 0.0478, 1.0000]])
>>> x = torch.randn(2, 2, dtype=torch.double)
>>> x
tensor([[ 1.0779, 0.0383],
[-0.8785, -1.1089]], dtype=torch.float64)
>>> torch.where(x > 0, x, 0.)
tensor([[1.0779, 0.0383],
[0.0000, 0.0000]], dtype=torch.float64)
.. function:: where(condition) -> tuple of LongTensor
:noindex:
``torch.where(condition)`` is identical to
``torch.nonzero(condition, as_tuple=True)``.
.. note::
See also :func:`torch.nonzero`.
"""
...
@overload
def where(condition: Tensor, input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
where(condition, input, other, *, out=None) -> Tensor
Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
The operation is defined as:
.. math::
\text{out}_i = \begin{cases}
\text{input}_i & \text{if } \text{condition}_i \\
\text{other}_i & \text{otherwise} \\
\end{cases}
.. note::
The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
Arguments:
condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
where :attr:`condition` is ``True``
other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
where :attr:`condition` is ``False``
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
Example::
>>> x = torch.randn(3, 2)
>>> y = torch.ones(3, 2)
>>> x
tensor([[-0.4620, 0.3139],
[ 0.3898, -0.7197],
[ 0.0478, -0.1657]])
>>> torch.where(x > 0, 1.0, 0.0)
tensor([[0., 1.],
[1., 0.],
[1., 0.]])
>>> torch.where(x > 0, x, y)
tensor([[ 1.0000, 0.3139],
[ 0.3898, 1.0000],
[ 0.0478, 1.0000]])
>>> x = torch.randn(2, 2, dtype=torch.double)
>>> x
tensor([[ 1.0779, 0.0383],
[-0.8785, -1.1089]], dtype=torch.float64)
>>> torch.where(x > 0, x, 0.)
tensor([[1.0779, 0.0383],
[0.0000, 0.0000]], dtype=torch.float64)
.. function:: where(condition) -> tuple of LongTensor
:noindex:
``torch.where(condition)`` is identical to
``torch.nonzero(condition, as_tuple=True)``.
.. note::
See also :func:`torch.nonzero`.
"""
...
@overload
def where(condition: Tensor, self: Union[Number, _complex], other: Tensor) -> Tensor:
r"""
where(condition, input, other, *, out=None) -> Tensor
Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
The operation is defined as:
.. math::
\text{out}_i = \begin{cases}
\text{input}_i & \text{if } \text{condition}_i \\
\text{other}_i & \text{otherwise} \\
\end{cases}
.. note::
The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
Arguments:
condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
where :attr:`condition` is ``True``
other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
where :attr:`condition` is ``False``
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
Example::
>>> x = torch.randn(3, 2)
>>> y = torch.ones(3, 2)
>>> x
tensor([[-0.4620, 0.3139],
[ 0.3898, -0.7197],
[ 0.0478, -0.1657]])
>>> torch.where(x > 0, 1.0, 0.0)
tensor([[0., 1.],
[1., 0.],
[1., 0.]])
>>> torch.where(x > 0, x, y)
tensor([[ 1.0000, 0.3139],
[ 0.3898, 1.0000],
[ 0.0478, 1.0000]])
>>> x = torch.randn(2, 2, dtype=torch.double)
>>> x
tensor([[ 1.0779, 0.0383],
[-0.8785, -1.1089]], dtype=torch.float64)
>>> torch.where(x > 0, x, 0.)
tensor([[1.0779, 0.0383],
[0.0000, 0.0000]], dtype=torch.float64)
.. function:: where(condition) -> tuple of LongTensor
:noindex:
``torch.where(condition)`` is identical to
``torch.nonzero(condition, as_tuple=True)``.
.. note::
See also :func:`torch.nonzero`.
"""
...
@overload
def where(condition: Tensor, input: Tensor, other: Union[Number, _complex]) -> Tensor:
r"""
where(condition, input, other, *, out=None) -> Tensor
Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
The operation is defined as:
.. math::
\text{out}_i = \begin{cases}
\text{input}_i & \text{if } \text{condition}_i \\
\text{other}_i & \text{otherwise} \\
\end{cases}
.. note::
The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
Arguments:
condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
where :attr:`condition` is ``True``
other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
where :attr:`condition` is ``False``
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
Example::
>>> x = torch.randn(3, 2)
>>> y = torch.ones(3, 2)
>>> x
tensor([[-0.4620, 0.3139],
[ 0.3898, -0.7197],
[ 0.0478, -0.1657]])
>>> torch.where(x > 0, 1.0, 0.0)
tensor([[0., 1.],
[1., 0.],
[1., 0.]])
>>> torch.where(x > 0, x, y)
tensor([[ 1.0000, 0.3139],
[ 0.3898, 1.0000],
[ 0.0478, 1.0000]])
>>> x = torch.randn(2, 2, dtype=torch.double)
>>> x
tensor([[ 1.0779, 0.0383],
[-0.8785, -1.1089]], dtype=torch.float64)
>>> torch.where(x > 0, x, 0.)
tensor([[1.0779, 0.0383],
[0.0000, 0.0000]], dtype=torch.float64)
.. function:: where(condition) -> tuple of LongTensor
:noindex:
``torch.where(condition)`` is identical to
``torch.nonzero(condition, as_tuple=True)``.
.. note::
See also :func:`torch.nonzero`.
"""
...
@overload
def where(condition: Tensor, self: Union[Number, _complex], other: Union[Number, _complex]) -> Tensor:
r"""
where(condition, input, other, *, out=None) -> Tensor
Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
The operation is defined as:
.. math::
\text{out}_i = \begin{cases}
\text{input}_i & \text{if } \text{condition}_i \\
\text{other}_i & \text{otherwise} \\
\end{cases}
.. note::
The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
Arguments:
condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
where :attr:`condition` is ``True``
other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
where :attr:`condition` is ``False``
Keyword args:
out (Tensor, optional): the output tensor.
Returns:
Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
Example::
>>> x = torch.randn(3, 2)
>>> y = torch.ones(3, 2)
>>> x
tensor([[-0.4620, 0.3139],
[ 0.3898, -0.7197],
[ 0.0478, -0.1657]])
>>> torch.where(x > 0, 1.0, 0.0)
tensor([[0., 1.],
[1., 0.],
[1., 0.]])
>>> torch.where(x > 0, x, y)
tensor([[ 1.0000, 0.3139],
[ 0.3898, 1.0000],
[ 0.0478, 1.0000]])
>>> x = torch.randn(2, 2, dtype=torch.double)
>>> x
tensor([[ 1.0779, 0.0383],
[-0.8785, -1.1089]], dtype=torch.float64)
>>> torch.where(x > 0, x, 0.)
tensor([[1.0779, 0.0383],
[0.0000, 0.0000]], dtype=torch.float64)
.. function:: where(condition) -> tuple of LongTensor
:noindex:
``torch.where(condition)`` is identical to
``torch.nonzero(condition, as_tuple=True)``.
.. note::
See also :func:`torch.nonzero`.
"""
...
@overload
def xlogy(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
xlogy(input, other, *, out=None) -> Tensor
Alias for :func:`torch.special.xlogy`.
"""
...
@overload
def xlogy(self: Union[Number, _complex], other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
r"""
xlogy(input, other, *, out=None) -> Tensor
Alias for :func:`torch.special.xlogy`.
"""
...
@overload
def xlogy(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
r"""
xlogy(input, other, *, out=None) -> Tensor
Alias for :func:`torch.special.xlogy`.
"""
...
@overload
def xlogy_(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def xlogy_(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
def zero_(input: Tensor) -> Tensor: ...
@overload
def zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `0`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.zeros(2, 3)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> torch.zeros(5)
tensor([ 0., 0., 0., 0., 0.])
"""
...
@overload
def zeros(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `0`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.zeros(2, 3)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> torch.zeros(5)
tensor([ 0., 0., 0., 0., 0.])
"""
...
@overload
def zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `0`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.zeros(2, 3)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> torch.zeros(5)
tensor([ 0., 0., 0., 0., 0.])
"""
...
@overload
def zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `0`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
out (Tensor, optional): the output tensor.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_device`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
Example::
>>> torch.zeros(2, 3)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> torch.zeros(5)
tensor([ 0., 0., 0., 0., 0.])
"""
...
def zeros_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
r"""
zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor filled with the scalar value `0`, with the same size as
:attr:`input`. ``torch.zeros_like(input)`` is equivalent to
``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
.. warning::
As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
the old ``torch.zeros_like(input, out=output)`` is equivalent to
``torch.zeros(input.size(), out=output)``.
Args:
input (Tensor): the size of :attr:`input` will determine size of the output tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
Default: if ``None``, defaults to the dtype of :attr:`input`.
layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
Default: if ``None``, defaults to the layout of :attr:`input`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, defaults to the device of :attr:`input`.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
Example::
>>> input = torch.empty(2, 3)
>>> torch.zeros_like(input)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]])
"""
...
__all__ = ['__and__', '__lshift__', '__or__', '__rshift__', '__xor__', '_adaptive_avg_pool2d',
'_adaptive_avg_pool3d', '_add_batch_dim', '_add_relu', '_add_relu_', '_addmm_activation',
'_aminmax', '_amp_foreach_non_finite_check_and_unscale_', '_amp_update_scale_', '_assert_async',
'_assert_scalar', '_assert_tensor_metadata', '_batch_norm_impl_index', '_cast_Byte', '_cast_Char',
'_cast_Double', '_cast_Float', '_cast_Half', '_cast_Int', '_cast_Long', '_cast_Short',
'_choose_qparams_per_tensor', '_chunk_cat', '_coalesce', '_compute_linear_combination', '_conj',
'_conj_copy', '_conj_physical', '_convert_indices_from_coo_to_csr',
'_convert_indices_from_csr_to_coo', '_convert_weight_to_int4pack', '_convolution',
'_convolution_mode', '_copy_from', '_copy_from_and_resize', '_cslt_compress', '_cslt_sparse_mm',
'_cslt_sparse_mm_search', '_ctc_loss', '_cudnn_ctc_loss', '_cudnn_init_dropout_state',
'_cudnn_rnn', '_cudnn_rnn_flatten_weight', '_cufft_clear_plan_cache',
'_cufft_get_plan_cache_max_size', '_cufft_get_plan_cache_size', '_cufft_set_plan_cache_max_size',
'_cummax_helper', '_cummin_helper', '_debug_has_internal_overlap', '_dim_arange',
'_dirichlet_grad', '_disable_functionalization', '_efficientzerotensor', '_embedding_bag',
'_embedding_bag_forward_only', '_empty_affine_quantized', '_empty_per_channel_affine_quantized',
'_enable_functionalization', '_euclidean_dist', '_fake_quantize_learnable_per_channel_affine',
'_fake_quantize_learnable_per_tensor_affine',
'_fake_quantize_per_tensor_affine_cachemask_tensor_qparams',
'_fake_quantize_per_tensor_affine_cachemask_tensor_qparams', '_fft_c2c', '_fft_c2r', '_fft_r2c',
'_fill_mem_eff_dropout_mask_', '_foobar', '_foreach_abs', '_foreach_abs_', '_foreach_acos',
'_foreach_acos_', '_foreach_add', '_foreach_add_', '_foreach_addcdiv', '_foreach_addcdiv_',
'_foreach_addcmul', '_foreach_addcmul_', '_foreach_asin', '_foreach_asin_', '_foreach_atan',
'_foreach_atan_', '_foreach_ceil', '_foreach_ceil_', '_foreach_clamp_max', '_foreach_clamp_max_',
'_foreach_clamp_min', '_foreach_clamp_min_', '_foreach_copy_', '_foreach_cos', '_foreach_cos_',
'_foreach_cosh', '_foreach_cosh_', '_foreach_div', '_foreach_div_', '_foreach_erf',
'_foreach_erf_', '_foreach_erfc', '_foreach_erfc_', '_foreach_exp', '_foreach_exp_',
'_foreach_expm1', '_foreach_expm1_', '_foreach_floor', '_foreach_floor_', '_foreach_frac',
'_foreach_frac_', '_foreach_lerp', '_foreach_lerp_', '_foreach_lgamma', '_foreach_lgamma_',
'_foreach_log', '_foreach_log10', '_foreach_log10_', '_foreach_log1p', '_foreach_log1p_',
'_foreach_log2', '_foreach_log2_', '_foreach_log_', '_foreach_maximum', '_foreach_maximum_',
'_foreach_minimum', '_foreach_minimum_', '_foreach_mul', '_foreach_mul_', '_foreach_neg',
'_foreach_neg_', '_foreach_norm', '_foreach_pow', '_foreach_pow_', '_foreach_reciprocal',
'_foreach_reciprocal_', '_foreach_round', '_foreach_round_', '_foreach_sigmoid',
'_foreach_sigmoid_', '_foreach_sign', '_foreach_sign_', '_foreach_sin', '_foreach_sin_',
'_foreach_sinh', '_foreach_sinh_', '_foreach_sqrt', '_foreach_sqrt_', '_foreach_sub',
'_foreach_sub_', '_foreach_tan', '_foreach_tan_', '_foreach_tanh', '_foreach_tanh_',
'_foreach_trunc', '_foreach_trunc_', '_foreach_zero_', '_from_functional_tensor',
'_functional_assert_async', '_functional_assert_scalar', '_functional_sym_constrain_range',
'_functional_sym_constrain_range_for_size',
'_functionalize_are_all_mutations_hidden_from_autograd',
'_functionalize_are_all_mutations_under_no_grad_or_inference_mode', '_functionalize_commit_update',
'_functionalize_mark_mutation_hidden_from_autograd', '_functionalize_replace',
'_functionalize_sync', '_fused_adam_', '_fused_adamw_', '_fused_dropout',
'_fused_moving_avg_obs_fq_helper', '_fused_moving_avg_obs_fq_helper', '_fused_sdp_choice',
'_fused_sgd_', '_fw_primal_copy', '_grid_sampler_2d_cpu_fallback',
'_has_compatible_shallow_copy_type', '_histogramdd_bin_edges', '_histogramdd_from_bin_cts',
'_histogramdd_from_bin_tensors', '_index_put_impl_', '_indices_copy', '_int_mm', '_is_all_true',
'_is_any_true', '_is_functional_tensor', '_is_zerotensor', '_lazy_clone', '_linalg_check_errors',
'_linalg_det', '_linalg_det', '_linalg_eigh', '_linalg_eigh', '_linalg_slogdet', '_linalg_slogdet',
'_linalg_solve_ex', '_linalg_solve_ex', '_linalg_svd', '_linalg_svd', '_log_softmax',
'_log_softmax_backward_data', '_logcumsumexp', '_lstm_mps', '_lu_with_info', '_lu_with_info',
'_make_dep_token', '_make_dual', '_make_dual_copy', '_make_per_channel_quantized_tensor',
'_make_per_tensor_quantized_tensor', '_masked_scale', '_masked_softmax', '_mixed_dtypes_linear',
'_mkldnn_reshape', '_mkldnn_transpose', '_mkldnn_transpose_', '_mps_convolution',
'_mps_convolution_transpose', '_native_batch_norm_legit', '_native_batch_norm_legit_no_training',
'_native_multi_head_attention', '_neg_view', '_neg_view_copy', '_nested_from_padded',
'_nested_from_padded_and_nested_example', '_nested_get_jagged_dummy', '_nested_get_lengths',
'_nested_get_offsets', '_nested_get_ragged_idx', '_nested_get_values', '_nested_get_values_copy',
'_nested_tensor_from_mask', '_nested_tensor_from_mask_left_aligned',
'_nested_tensor_from_tensor_list', '_nested_tensor_softmax_with_shape', '_nested_view_from_buffer',
'_nested_view_from_buffer_copy', '_nested_view_from_jagged', '_nested_view_from_jagged_copy',
'_nnpack_available', '_nnpack_spatial_convolution', '_pack_padded_sequence',
'_pad_packed_sequence', '_pin_memory', '_prelu_kernel', '_print', '_propagate_xla_data',
'_remove_batch_dim', '_reshape_alias_copy', '_reshape_from_tensor', '_resize_output_',
'_rowwise_prune', '_sample_dirichlet', '_saturate_weight_to_fp16',
'_scaled_dot_product_attention_math', '_scaled_dot_product_cudnn_attention',
'_scaled_dot_product_cudnn_attention', '_scaled_dot_product_efficient_attention',
'_scaled_dot_product_efficient_attention', '_scaled_dot_product_flash_attention',
'_scaled_dot_product_flash_attention', '_scaled_dot_product_flash_attention_for_cpu',
'_scaled_dot_product_flash_attention_for_cpu', '_scaled_mm', '_shape_as_tensor',
'_sobol_engine_draw', '_sobol_engine_ff_', '_sobol_engine_initialize_state_',
'_sobol_engine_scramble_', '_softmax', '_softmax_backward_data', '_sparse_broadcast_to',
'_sparse_broadcast_to_copy', '_sparse_csr_prod', '_sparse_csr_sum',
'_sparse_log_softmax_backward_data', '_sparse_semi_structured_linear',
'_sparse_softmax_backward_data', '_sparse_sparse_matmul', '_sparse_sum', '_stack',
'_standard_gamma', '_standard_gamma_grad', '_sync', '_test_autograd_multiple_dispatch',
'_test_autograd_multiple_dispatch_view', '_test_autograd_multiple_dispatch_view_copy',
'_test_check_tensor', '_test_functorch_fallback', '_test_parallel_materialize',
'_test_serialization_subcmul', '_to_cpu', '_to_functional_tensor', '_to_sparse_semi_structured',
'_transform_bias_rescale_qkv', '_transformer_encoder_layer_fwd', '_trilinear',
'_triton_multi_head_attention', '_triton_scaled_dot_attention', '_unique', '_unique2',
'_unpack_dual', '_unpack_dual', '_unsafe_index', '_unsafe_index_put', '_use_cudnn_ctc_loss',
'_use_cudnn_rnn_flatten_weight', '_validate_compressed_sparse_indices',
'_validate_sparse_bsc_tensor_args', '_validate_sparse_bsr_tensor_args',
'_validate_sparse_compressed_tensor_args', '_validate_sparse_coo_tensor_args',
'_validate_sparse_csc_tensor_args', '_validate_sparse_csr_tensor_args', '_values_copy',
'_weight_int4pack_mm', '_weight_int8pack_mm', '_weight_norm', '_weight_norm_interface', 'abs',
'abs_', 'absolute', 'acos', 'acos_', 'acosh', 'acosh_', 'adaptive_avg_pool1d',
'adaptive_max_pool1d', 'add', 'addbmm', 'addcdiv', 'addcmul', 'addmm', 'addmv', 'addmv_', 'addr',
'adjoint', 'affine_grid_generator', 'alias_copy', 'all', 'allclose', 'alpha_dropout',
'alpha_dropout_', 'amax', 'amin', 'aminmax', 'aminmax', 'angle', 'any', 'arange', 'arccos',
'arccos_', 'arccosh', 'arccosh_', 'arcsin', 'arcsin_', 'arcsinh', 'arcsinh_', 'arctan', 'arctan2',
'arctan_', 'arctanh', 'arctanh_', 'argmax', 'argmin', 'argsort', 'argwhere', 'as_strided',
'as_strided_', 'as_strided_copy', 'as_strided_scatter', 'as_tensor', 'asarray', 'asin', 'asin_',
'asinh', 'asinh_', 'atan', 'atan2', 'atan_', 'atanh', 'atanh_', 'avg_pool1d', 'baddbmm',
'bartlett_window', 'batch_norm', 'batch_norm_backward_elemt', 'batch_norm_backward_reduce',
'batch_norm_elemt', 'batch_norm_gather_stats', 'batch_norm_gather_stats_with_counts',
'batch_norm_stats', 'batch_norm_update_stats', 'bernoulli', 'bilinear',
'binary_cross_entropy_with_logits', 'bincount', 'binomial', 'bitwise_and', 'bitwise_left_shift',
'bitwise_not', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', 'blackman_window', 'bmm',
'broadcast_to', 'bucketize', 'can_cast', 'cat', 'ccol_indices_copy', 'ceil', 'ceil_', 'celu',
'celu_', 'channel_shuffle', 'cholesky', 'cholesky_inverse', 'cholesky_solve',
'choose_qparams_optimized', 'chunk', 'clamp', 'clamp_', 'clamp_max', 'clamp_max_', 'clamp_min',
'clamp_min_', 'clip', 'clip_', 'clone', 'col_indices_copy', 'column_stack', 'combinations',
'complex', 'concat', 'concatenate', 'conj', 'conj_physical', 'conj_physical_', 'constant_pad_nd',
'conv1d', 'conv2d', 'conv3d', 'conv_tbc', 'conv_transpose1d', 'conv_transpose2d',
'conv_transpose3d', 'convolution', 'copysign', 'corrcoef', 'cos', 'cos_', 'cosh', 'cosh_',
'cosine_embedding_loss', 'cosine_similarity', 'count_nonzero', 'cov', 'cross', 'crow_indices_copy',
'ctc_loss', 'cudnn_affine_grid_generator', 'cudnn_batch_norm', 'cudnn_convolution',
'cudnn_convolution_add_relu', 'cudnn_convolution_relu', 'cudnn_convolution_transpose',
'cudnn_grid_sampler', 'cudnn_is_acceptable', 'cummax', 'cummax', 'cummin', 'cummin', 'cumprod',
'cumsum', 'cumulative_trapezoid', 'deg2rad', 'deg2rad_', 'dequantize', 'det', 'detach', 'detach_',
'detach_copy', 'diag', 'diag_embed', 'diagflat', 'diagonal', 'diagonal_copy', 'diagonal_scatter',
'diff', 'digamma', 'dist', 'div', 'divide', 'dot', 'dropout', 'dropout_', 'dsmm', 'dsplit',
'dstack', 'embedding', 'embedding_bag', 'embedding_renorm_', 'empty', 'empty_like',
'empty_permuted', 'empty_quantized', 'empty_strided', 'eq', 'equal', 'erf', 'erf_', 'erfc',
'erfc_', 'erfinv', 'exp', 'exp2', 'exp2_', 'exp_', 'expand_copy', 'expm1', 'expm1_', 'eye',
'fake_quantize_per_channel_affine', 'fake_quantize_per_tensor_affine', 'fbgemm_linear_fp16_weight',
'fbgemm_linear_fp16_weight_fp32_activation', 'fbgemm_linear_int8_weight',
'fbgemm_linear_int8_weight_fp32_activation', 'fbgemm_linear_quantize_weight',
'fbgemm_pack_gemm_matrix_fp16', 'fbgemm_pack_quantized_matrix', 'feature_alpha_dropout',
'feature_alpha_dropout_', 'feature_dropout', 'feature_dropout_', 'fill', 'fill_', 'fix', 'fix_',
'flatten', 'flip', 'fliplr', 'flipud', 'float_power', 'floor', 'floor_', 'floor_divide', 'fmax',
'fmin', 'fmod', 'frac', 'frac_', 'frexp', 'frexp', 'frobenius_norm', 'from_file', 'from_numpy',
'frombuffer', 'full', 'full_like', 'fused_moving_avg_obs_fake_quant', 'gather', 'gcd', 'gcd_',
'ge', 'geqrf', 'geqrf', 'ger', 'get_default_dtype', 'get_num_interop_threads', 'get_num_threads',
'gradient', 'greater', 'greater_equal', 'grid_sampler', 'grid_sampler_2d', 'grid_sampler_3d',
'group_norm', 'gru', 'gru_cell', 'gt', 'hamming_window', 'hann_window', 'hardshrink', 'heaviside',
'hinge_embedding_loss', 'histc', 'histogram', 'histogram', 'histogramdd', 'histogramdd', 'hsmm',
'hsplit', 'hspmm', 'hstack', 'hypot', 'i0', 'i0_', 'igamma', 'igammac', 'imag', 'index_add',
'index_copy', 'index_fill', 'index_put', 'index_put_', 'index_reduce', 'index_select',
'indices_copy', 'init_num_threads', 'inner', 'instance_norm', 'int_repr', 'inverse', 'is_complex',
'is_conj', 'is_distributed', 'is_floating_point', 'is_grad_enabled', 'is_inference',
'is_inference_mode_enabled', 'is_neg', 'is_nonzero', 'is_same_size', 'is_signed',
'is_vulkan_available', 'isclose', 'isfinite', 'isin', 'isinf', 'isnan', 'isneginf', 'isposinf',
'isreal', 'istft', 'kaiser_window', 'kl_div', 'kron', 'kthvalue', 'kthvalue', 'layer_norm', 'lcm',
'lcm_', 'ldexp', 'ldexp_', 'le', 'lerp', 'less', 'less_equal', 'lgamma', 'linspace', 'log',
'log10', 'log10_', 'log1p', 'log1p_', 'log2', 'log2_', 'log_', 'log_softmax', 'logaddexp',
'logaddexp2', 'logcumsumexp', 'logdet', 'logical_and', 'logical_not', 'logical_or', 'logical_xor',
'logit', 'logit_', 'logspace', 'logsumexp', 'lstm', 'lstm_cell', 'lt', 'lu_solve', 'lu_unpack',
'lu_unpack', 'margin_ranking_loss', 'masked_fill', 'masked_scatter', 'masked_select', 'matmul',
'matrix_exp', 'matrix_power', 'max', 'max', 'max_pool1d', 'max_pool1d_with_indices', 'max_pool2d',
'max_pool3d', 'maximum', 'mean', 'median', 'median', 'min', 'min', 'minimum', 'miopen_batch_norm',
'miopen_convolution', 'miopen_convolution_add_relu', 'miopen_convolution_relu',
'miopen_convolution_transpose', 'miopen_depthwise_convolution', 'miopen_rnn',
'mkldnn_adaptive_avg_pool2d', 'mkldnn_convolution', 'mkldnn_linear_backward_weights',
'mkldnn_max_pool2d', 'mkldnn_max_pool3d', 'mkldnn_rnn_layer', 'mm', 'mode', 'mode', 'moveaxis',
'movedim', 'msort', 'mul', 'multinomial', 'multiply', 'mv', 'mvlgamma', 'nan_to_num',
'nan_to_num_', 'nanmean', 'nanmedian', 'nanmedian', 'nanquantile', 'nansum', 'narrow',
'narrow_copy', 'native_batch_norm', 'native_channel_shuffle', 'native_dropout',
'native_group_norm', 'native_layer_norm', 'native_norm', 'ne', 'neg', 'neg_', 'negative',
'negative_', 'nextafter', 'nonzero', 'nonzero_static', 'norm_except_dim', 'normal', 'not_equal',
'nuclear_norm', 'numel', 'ones', 'ones_like', 'orgqr', 'ormqr', 'outer', 'pairwise_distance',
'pdist', 'permute', 'permute_copy', 'pinverse', 'pixel_shuffle', 'pixel_unshuffle', 'poisson',
'poisson_nll_loss', 'polar', 'polygamma', 'positive', 'pow', 'prelu', 'prod', 'promote_types',
'put', 'q_per_channel_axis', 'q_per_channel_scales', 'q_per_channel_zero_points', 'q_scale',
'q_zero_point', 'qr', 'qr', 'quantile', 'quantize_per_channel', 'quantize_per_tensor',
'quantize_per_tensor_dynamic', 'quantized_batch_norm', 'quantized_gru_cell', 'quantized_lstm_cell',
'quantized_max_pool1d', 'quantized_max_pool2d', 'quantized_max_pool3d', 'quantized_rnn_relu_cell',
'quantized_rnn_tanh_cell', 'rad2deg', 'rad2deg_', 'rand', 'rand_like', 'randint', 'randint_like',
'randn', 'randn_like', 'randperm', 'range', 'ravel', 'real', 'reciprocal', 'reciprocal_', 'relu',
'relu_', 'remainder', 'renorm', 'repeat_interleave', 'reshape', 'resize_as_', 'resize_as_sparse_',
'resolve_conj', 'resolve_neg', 'result_type', 'rnn_relu', 'rnn_relu_cell', 'rnn_tanh',
'rnn_tanh_cell', 'roll', 'rot90', 'round', 'round_', 'row_indices_copy', 'row_stack', 'rrelu',
'rrelu_', 'rsqrt', 'rsqrt_', 'rsub', 'saddmm', 'scalar_tensor', 'scatter', 'scatter_add',
'scatter_reduce', 'searchsorted', 'segment_reduce', 'select', 'select_copy', 'select_scatter',
'selu', 'selu_', 'set_flush_denormal', 'set_num_interop_threads', 'set_num_threads', 'sgn',
'sigmoid', 'sigmoid_', 'sign', 'signbit', 'sin', 'sin_', 'sinc', 'sinc_', 'sinh', 'sinh_',
'slice_copy', 'slice_inverse', 'slice_scatter', 'slogdet', 'slogdet', 'smm', 'softmax', 'sort',
'sort', 'sparse_bsc_tensor', 'sparse_bsr_tensor', 'sparse_compressed_tensor', 'sparse_coo_tensor',
'sparse_csc_tensor', 'sparse_csr_tensor', 'split_copy', 'split_with_sizes',
'split_with_sizes_copy', 'spmm', 'sqrt', 'sqrt_', 'square', 'square_', 'squeeze', 'squeeze_copy',
'sspaddmm', 'stack', 'std', 'std_mean', 'sub', 'subtract', 'sum', 'svd', 'svd', 'swapaxes',
'swapdims', 'sym_constrain_range', 'sym_constrain_range_for_size', 't', 't_copy', 'take',
'take_along_dim', 'tan', 'tan_', 'tanh', 'tanh_', 'tensor', 'tensor_split', 'threshold',
'threshold_', 'tile', 'topk', 'topk', 'trace', 'transpose', 'transpose_copy', 'trapezoid', 'trapz',
'triangular_solve', 'triangular_solve', 'tril', 'tril_indices', 'triplet_margin_loss', 'triu',
'triu_indices', 'true_divide', 'trunc', 'trunc_', 'unbind', 'unbind_copy', 'unflatten',
'unfold_copy', 'unique_dim', 'unsafe_chunk', 'unsafe_split', 'unsafe_split_with_sizes',
'unsqueeze', 'unsqueeze_copy', 'values_copy', 'vander', 'var', 'var_mean', 'vdot',
'view_as_complex', 'view_as_complex_copy', 'view_as_real', 'view_as_real_copy', 'view_copy',
'vsplit', 'vstack', 'where', 'xlogy', 'xlogy_', 'zero_', 'zeros', 'zeros_like']