torch.Tensor.__matmul__(self, other) torch.Tensor.detach(self) torch.Tensor.addcdiv(self, tensor1, tensor2, *, value=1) torch.Tensor.addcdiv_(self, tensor1, tensor2, *, value=1) torch.Tensor.addcmul(self, tensor1, tensor2, *, value=1) torch.Tensor.addcmul_(self, tensor1, tensor2, *, value=1) torch.Tensor.is_cuda torch.Tensor.is_quantized torch.Tensor.is_meta torch.Tensor.device torch.Tensor.ndim(self) torch.Tensor.abs(self) torch.Tensor.abs_(self) torch.Tensor.absolute(self) torch.Tensor.absolute_(self) torch.Tensor.acos(self) torch.Tensor.acos_(self) torch.Tensor.arccos(self) torch.Tensor.arccos_(self) torch.Tensor.add(self, other, *, alpha=1) torch.Tensor.add_(self, other, *, alpha=1) torch.Tensor.addbmm(self, batch1, batch2, *, beta=1, alpha=1) torch.Tensor.addbmm_(self, batch1, batch2, *, beta=1, alpha=1) torch.Tensor.addmm(self, mat1, mat2, *, beta=1, alpha=1) torch.Tensor.addmm_(self, mat1, mat2, *, beta=1, alpha=1) torch.Tensor.addmv(self, mat, vec, *, beta=1, alpha=1) torch.Tensor.addmv_(self, mat, vec, *, beta=1, alpha=1) torch.Tensor.asin(self) torch.Tensor.asin_(self) torch.Tensor.arcsin(self) torch.Tensor.arcsin_(self) torch.Tensor.atan(self) torch.Tensor.atan_(self) torch.Tensor.arctan(self) torch.Tensor.arctan_(self) torch.Tensor.bool(self, memory_format=torch.preserve_format) torch.Tensor.byte(self, memory_format=torch.preserve_format) torch.Tensor.ceil(self) torch.Tensor.ceil_(self) torch.Tensor.char(self, memory_format=torch.preserve_format) torch.Tensor.chunk(self, chunks, dim=0) torch.Tensor.clamp(self, min, max) torch.Tensor.clamp_(self, min, max) torch.Tensor.clip(self, min, max) torch.Tensor.clip_(self, min, max) torch.Tensor.clone(self, *, memory_format=torch.preserve_format) torch.Tensor.contiguous(self, memory_format=torch.contiguous_format) torch.Tensor.cos(self) torch.Tensor.cos_(self) torch.Tensor.cosh(self) torch.Tensor.cosh_(self) torch.Tensor.acosh(self) torch.Tensor.acosh_(self) torch.Tensor.arccosh(self) torch.Tensor.arccosh_(self) torch.Tensor.cpu(self, memory_format=torch.preserve_format) torch.Tensor.cuda(self, device=None, non_blocking=False, memory_format=torch.preserve_format) torch.Tensor.data_ptr(self) torch.Tensor.dim(self) torch.Tensor.div(self, value, *, rounding_mode=None) torch.Tensor.div_(self, value, *, rounding_mode=None) torch.Tensor.divide(self, value, *, rounding_mode=None) torch.Tensor.divide_(self, value, *, rounding_mode=None) torch.Tensor.double(self, memory_format=torch.preserve_format) torch.Tensor.element_size(self) torch.Tensor.eq(self, other) torch.Tensor.eq_(self, other) torch.Tensor.equal(self, other) torch.Tensor.erf(self) torch.Tensor.erf_(self) torch.Tensor.exp(self) torch.Tensor.exp_(self) torch.Tensor.expm1(self) torch.Tensor.expm1_(self) torch.Tensor.expand(self, *sizes) torch.Tensor.expand_as(self, other) torch.Tensor.exponential_(self, lambd=1, *, generator=None) torch.Tensor.flatten(self, start_dim=0, end_dim=-1) torch.Tensor.float(self, memory_format=torch.preserve_format) torch.Tensor.floor(self) torch.Tensor.floor_(self) torch.Tensor.floor_divide(self, value) torch.Tensor.floor_divide_(self, value) torch.Tensor.fmod(self, divisor) torch.Tensor.fmod_(self, divisor) torch.Tensor.ge(self, other) torch.Tensor.ge_(self, other) torch.Tensor.greater_equal(self, other) torch.Tensor.greater_equal_(self, other) torch.Tensor.gt(self, other) torch.Tensor.gt_(self, other) torch.Tensor.greater(self, other) torch.Tensor.greater_(self, other) torch.Tensor.half(self, memory_format=torch.preserve_format) torch.Tensor.hardshrink(self, lambd=0.5) torch.Tensor.int(self, memory_format=torch.preserve_format) torch.Tensor.is_contiguous(self, memory_format=torch.contiguous_format) torch.Tensor.is_complex(self) torch.Tensor.is_floating_point(self) torch.Tensor.is_pinned(self) torch.Tensor.is_signed(self) torch.Tensor.le(self, other) torch.Tensor.le_(self, other) torch.Tensor.less_equal(self, other) torch.Tensor.less_equal_(self, other) torch.Tensor.log(self) torch.Tensor.log_(self) torch.Tensor.log10(self) torch.Tensor.log10_(self) torch.Tensor.log1p(self) torch.Tensor.log1p_(self) torch.Tensor.log2(self) torch.Tensor.log2_(self) torch.Tensor.logaddexp(self, other) torch.Tensor.logaddexp2(self, other) torch.Tensor.logical_and(self, other) torch.Tensor.logical_and_(self, other) torch.Tensor.logical_not(self) torch.Tensor.logical_not_(self) torch.Tensor.logical_or(self, other) torch.Tensor.logical_or_(self, other) torch.Tensor.logical_xor(self, other) torch.Tensor.logical_xor_(self, other) torch.Tensor.long(self, memory_format=torch.preserve_format) torch.Tensor.lt(self, other) torch.Tensor.lt_(self, other) torch.Tensor.less(self) torch.Tensor.less_(self, other) torch.Tensor.matmul(self, tensor2) torch.Tensor.max(self, dim=None, keepdim=False) torch.Tensor.maximum(self, other) torch.Tensor.mean(self, dim=None, keepdim=False) torch.Tensor.min(self, dim=None, keepdim=False) torch.Tensor.minimum(self, other) torch.Tensor.mul(self, value) torch.Tensor.mul_(self, value) torch.Tensor.multiply(self, value) torch.Tensor.multiply_(self, value) torch.Tensor.narrow(self, dimension, start, length) torch.Tensor.narrow_copy(self, dimension, start, length) torch.Tensor.ndimension(self) torch.Tensor.ne(self, other) torch.Tensor.ne_(self, other) torch.Tensor.not_equal(self, other) torch.Tensor.not_equal_(self, other) torch.Tensor.neg(self) torch.Tensor.neg_(self) torch.Tensor.negative(self) torch.Tensor.negative_(self) torch.Tensor.nelement(self) torch.Tensor.numel(self) torch.Tensor.permute(self, *dims) torch.Tensor.pin_memory(self) torch.Tensor.pow(self, exponent) torch.Tensor.pow_(self, exponent) torch.Tensor.prod(self, dim=None, keepdim=False, dtype=None) torch.Tensor.reciprocal(self) torch.Tensor.reciprocal_(self) torch.Tensor.reshape(self, *shape) torch.Tensor.reshape_as(self, other) torch.Tensor.round(self) torch.Tensor.round_(self) torch.Tensor.rsqrt(self) torch.Tensor.rsqrt_(self) torch.Tensor.short(self, memory_format=torch.preserve_format) torch.Tensor.sigmoid(self) torch.Tensor.sigmoid_(self) torch.Tensor.sign(self) torch.Tensor.sign_(self) torch.Tensor.sin(self) torch.Tensor.sin_(self) torch.Tensor.sinh(self) torch.Tensor.sinh_(self) torch.Tensor.asinh(self) torch.Tensor.asinh_(self) torch.Tensor.arcsinh(self) torch.Tensor.arcsinh_(self) torch.Tensor.size(self, dim=None) torch.Tensor.split(self, split_size, dim=0) torch.Tensor.sqrt(self) torch.Tensor.sqrt_(self) torch.Tensor.square(self) torch.Tensor.square_(self) torch.Tensor.squeeze(self, dim=None) torch.Tensor.squeeze_(self, dim=None) torch.Tensor.storage(self) torch.Tensor.storage_offset(self) torch.Tensor.storage_type(self) torch.Tensor.stride(self, dim) torch.Tensor.sub(self, other, *, alpha=1) torch.Tensor.sub_(self, other, *, alpha=1) torch.Tensor.subtract(self, other, *, alpha=1) torch.Tensor.subtract_(self, other, *, alpha=1) torch.Tensor.sum(self, dim=None, keepdim=False, dtype=None) torch.Tensor.t(self) torch.Tensor.t_(self) torch.Tensor.to(self, dtype=None, device=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) torch.Tensor.to_mkldnn(self) torch.Tensor.tan(self) torch.Tensor.tan_(self) torch.Tensor.tanh(self) torch.Tensor.tanh_(self) torch.Tensor.atanh(self) torch.Tensor.atanh_(self, other) torch.Tensor.arctanh(self) torch.Tensor.arctanh_(self, other) torch.Tensor.transpose(self, dim0, dim1) torch.Tensor.transpose_(self, dim0, dim1) torch.Tensor.true_divide(self, value) torch.Tensor.true_divide_(self, value) torch.Tensor.type(self, dtype=None, non_blocking=False, **kwargs) torch.Tensor.type_as(self, tensor) torch.Tensor.unsqueeze(self, dim) torch.Tensor.unsqueeze_(self, dim) torch.Tensor.view(self, *shape) torch.Tensor.view_as(self, other) torch.Tensor.where(self, condition, y) torch.split(tensor, split_size_or_sections, dim=0) torch._VF.rnn_tanh(*args) torch._VF.rnn_relu(*args) torch._VF.lstm(*args) torch._VF.gru(*args) torch._VF.rnn_relu_cell(input, hx, weight_ih, weight_hh, bias_ih, bias_hh) torch._VF.rnn_tanh_cell(input, hx, weight_ih, weight_hh, bias_ih, bias_hh) torch._VF.lstm_cell(input, hx, weight_ih, weight_hh, bias_ih, bias_hh) torch._VF.gru_cell(input, hx, weight_ih, weight_hh, bias_ih, bias_hh) torch._C._nn.gelu(input) torch.abs(input, *, out=None) torch.absolute(input, *, out=None) torch.acos(input, *, out=None) torch.arccos(input, *, out=None) torch.acosh(input, *, out=None) torch.arccosh(input, *, out=None) torch.add(input, other, *, out=None) torch.addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) torch.addcdiv(input, tensor1, tensor2, *, value=1, out=None) torch.addcmul(input, tensor1, tensor2, *, value=1, out=None) torch.addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) torch.addmv(input, mat, vec, *, beta=1, alpha=1, out=None) torch.asin(input, *, out=None) torch.arcsin(input, *, out=None) torch.asinh(input, *, out=None) torch.arcsinh(input, *, out=None) torch.atan(input, *, out=None) torch.arctan(input, *, out=None) torch.atanh(input, *, out=None) torch.arctanh(input, *, out=None) torch.stack(tensors, dim=0, *, out=None) torch.chunk(input, chunks, dim=0) torch.cat(tensors, dim=0, *, out=None) torch.ceil(input, *, out=None) torch.reciprocal(input, *, out=None) torch.clone(input, *, memory_format=torch.preserve_format) torch.clamp(input, min, max, *, out=None) torch.clip(input, min, max, *, out=None) torch.cos(input, *, out=None) torch.cosh(input, *, out=None) torch.div(input, other, *, rounding_mode=None, out=None) torch.divide(input, other, *, rounding_mode=None, out=None) torch.eq(input, other, *, out=None) torch.equal(input, other) torch.erf(input, *, out=None) torch.exp(input, *, out=None) torch.expm1(input, *, out=None) torch.floor(input, *, out=None) torch.floor_divide(input, other, *, out=None) torch.fmod(input, other, *, out=None) torch.flatten(input, start_dim=0, end_dim=-1) torch.ge(input, other, *, out=None) torch.greater_equal(input, other, *, out=None) torch.gt(input, other, *, out=None) torch.greater(input, other, *, out=None) torch.is_floating_point(input) torch.is_complex(input) torch.le(input, other, *, out=None) torch.less_equal(input, other, *, out=None) torch.log(input, *, out=None) torch.log10(input, *, out=None) torch.log1p(input, *, out=None) torch.log2(input, *, out=None) torch.logaddexp(input, other, *, out=None) torch.logaddexp2(input, other, *, out=None) torch.logical_and(input, other, *, out=None) torch.logical_not(input, *, out=None) torch.logical_or(input, other, *, out=None) torch.logical_xor(input, other, *, out=None) torch.lt(input, other, *, out=None) torch.less(input, other, *, out=None) torch.max(input, dim=None, keepdim=False, *, out=None) torch.maximum(input, other, *, out=None) torch.mean(input, dim=None, keepdim=False, *, out=None) torch.min(input, dim=None, keepdim=False, *, out=None) torch.minimum(input, other, *, out=None) torch.matmul(input, other, *, out=None) torch.mul(input, other, *, out=None) torch.multiply(input, other, *, out=None) torch.narrow(input, dim, start, length) torch.ne(input, other, *, out=None) torch.not_equal(input, other, *, out=None) torch.neg(input, *, out=None) torch.negative(input, *, out=None) torch.numel(input) torch.ones(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) torch.ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) torch.pow(input, exponent, *, out=None) torch.prod(input, dim=None, keepdim=False, *, out=None) torch.range(start, end=None, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) torch.arange(start, end=None, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) torch.reshape(input, shape) torch.round(input, *, out=None) torch.rsqrt(input, *, out=None) torch.sigmoid(input, *, out=None) torch.sign(input, *, out=None) torch.sin(input, *, out=None) torch.sinh(input, *, out=None) torch.sqrt(input, *, out=None) torch.square(input, *, out=None) torch.squeeze(input, dim=None, *, out=None) torch.sub(input, other, *, alpha=1, out=None) torch.subtract(input, other, *, alpha=1, out=None) torch.sum(input, dim=None, keepdim=False, *, out=None) torch.t(input) torch.tan(input, *, out=None) torch.tanh(input, *, out=None) torch.transpose(input, dim0, dim1) torch.unsqueeze(input, dim) torch.zeros(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) torch.zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) torch.where(condition, x, y) torch.nn.functional.conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) torch.nn.functional.conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) torch.nn.functional.conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) torch.nn.functional.avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) torch.nn.functional.avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) torch.nn.functional.avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) torch.nn.functional.max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False) torch.nn.functional.max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False) torch.nn.functional.max_pool3d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False) torch.nn.functional.max_unpool1d(input, indices, kernel_size, stride=None, padding=0, output_size=None) torch.nn.functional.max_unpool2d(input, indices, kernel_size, stride=None, padding=0, output_size=None) torch.nn.functional.max_unpool3d(input, indices, kernel_size, stride=None, padding=0, output_size=None) torch.nn.functional.lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False) torch.nn.functional.lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False) torch.nn.functional.adaptive_max_pool1d(input, output_size, return_indices=False) torch.nn.functional.adaptive_max_pool2d(input, output_size, return_indices=False) torch.nn.functional.adaptive_max_pool3d(input, output_size, return_indices=False) torch.nn.functional.adaptive_avg_pool1d(input, output_size) torch.nn.functional.adaptive_avg_pool2d(input, output_size) torch.nn.functional.adaptive_avg_pool3d(input, output_size) torch.nn.functional.threshold(input, threshold, value, inplace=False) torch.nn.functional.threshold_(input, threshold, value) torch.nn.functional.relu(input, inplace=False) torch.nn.functional.relu_(input) torch.nn.functional.hardtanh(input, min_val=-1., max_val=1., inplace=False) torch.nn.functional.hardtanh_(input, min_val=-1., max_val=1.) torch.nn.functional.relu6(input, inplace=False) torch.nn.functional.elu(input, alpha=1.0, inplace=False) torch.nn.functional.elu_(input, alpha=1.) torch.nn.functional.selu(input, inplace=False) torch.nn.functional.celu(input, alpha=1., inplace=False) torch.nn.functional.leaky_relu(input, negative_slope=0.01, inplace=False) torch.nn.functional.leaky_relu_(input, negative_slope=0.01) torch.nn.functional.prelu(input, weight) torch.nn.functional.rrelu(input, lower=1./8, upper=1./3, training=False, inplace=False) torch.nn.functional.rrelu_(input, lower=1./8, upper=1./3, training=False) torch.nn.functional.gelu(input) torch.nn.functional.logsigmoid(input) torch.nn.functional.hardshrink(input, lambd=0.5) torch.nn.functional.tanhshrink(input) torch.nn.functional.softsign(input) torch.nn.functional.softplus(input, beta=1, threshold=20) torch.nn.functional.softmin(input, dim=None, _stacklevel=3, dtype=None) torch.nn.functional.softmax(input, dim=None, _stacklevel=3, dtype=None) torch.nn.functional.softshrink(input, lambd=0.5) torch.nn.functional.log_softmax(input, dim=None, _stacklevel=3, dtype=None) torch.nn.functional.tanh(input) torch.nn.functional.sigmoid(input) torch.nn.functional.batch_norm(input, running_mean, running_var, weight=None, bias=None, training=False, momentum=0.1, eps=1e-05) torch.nn.functional.layer_norm(input, normalized_shape, weight=None, bias=None, eps=1e-05) torch.nn.functional.local_response_norm(input, size, alpha=0.0001, beta=0.75, k=1.0) torch.nn.functional.linear(input, weight, bias=None) torch.nn.functional.dropout(input, p=0.5, training=True, inplace=False) torch.nn.functional.alpha_dropout(input, p=0.5, training=False, inplace=False) torch.nn.functional.embedding(input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False) torch.nn.functional.l1_loss(input, target, size_average=None, reduce=None, reduction='mean') torch.nn.functional.pad(input, pad, mode='constant', value=0)