#![allow(clippy::all)]
use crate::{Device, Kind, Layout, Scalar, Tensor};
use std::borrow::Borrow;
use std::convert::Into;
use torch_sys::*;
impl Tensor {
pub fn internal_and_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_internal_and_(other).unwrap()
}
pub fn internal_and_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_internal_and_tensor_(other).unwrap()
}
pub fn internal_iand_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_internal_iand_(other).unwrap()
}
pub fn internal_iand_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_internal_iand_tensor_(other).unwrap()
}
pub fn internal_ilshift_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_internal_ilshift_(other).unwrap()
}
pub fn internal_ilshift_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_internal_ilshift_tensor_(other).unwrap()
}
pub fn internal_ior_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_internal_ior_(other).unwrap()
}
pub fn internal_ior_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_internal_ior_tensor_(other).unwrap()
}
pub fn internal_irshift_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_internal_irshift_(other).unwrap()
}
pub fn internal_irshift_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_internal_irshift_tensor_(other).unwrap()
}
pub fn internal_ixor_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_internal_ixor_(other).unwrap()
}
pub fn internal_ixor_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_internal_ixor_tensor_(other).unwrap()
}
pub fn internal_lshift_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_internal_lshift_(other).unwrap()
}
pub fn internal_lshift_scalar_out_<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_internal_lshift_scalar_out_(out, other).unwrap()
}
pub fn internal_lshift_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_internal_lshift_tensor_(other).unwrap()
}
pub fn internal_lshift_tensor_out_(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_internal_lshift_tensor_out_(out, other).unwrap()
}
pub fn internal_or_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_internal_or_(other).unwrap()
}
pub fn internal_or_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_internal_or_tensor_(other).unwrap()
}
pub fn internal_rshift_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_internal_rshift_(other).unwrap()
}
pub fn internal_rshift_scalar_out_<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_internal_rshift_scalar_out_(out, other).unwrap()
}
pub fn internal_rshift_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_internal_rshift_tensor_(other).unwrap()
}
pub fn internal_rshift_tensor_out_(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_internal_rshift_tensor_out_(out, other).unwrap()
}
pub fn internal_xor_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_internal_xor_(other).unwrap()
}
pub fn internal_xor_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_internal_xor_tensor_(other).unwrap()
}
pub fn internal_adaptive_avg_pool2d(&self, output_size: impl IntList) -> Tensor {
self.f_internal_adaptive_avg_pool2d(output_size).unwrap()
}
pub fn internal_adaptive_avg_pool2d_backward(&self, grad_output: &Tensor) -> Tensor {
self.f_internal_adaptive_avg_pool2d_backward(grad_output).unwrap()
}
pub fn internal_adaptive_avg_pool2d_backward_out(
&self,
out: &Tensor,
grad_output: &Tensor,
) -> Tensor {
self.f_internal_adaptive_avg_pool2d_backward_out(out, grad_output).unwrap()
}
pub fn internal_adaptive_avg_pool2d_out(
&self,
out: &Tensor,
output_size: impl IntList,
) -> Tensor {
self.f_internal_adaptive_avg_pool2d_out(out, output_size).unwrap()
}
pub fn internal_adaptive_avg_pool3d(&self, output_size: impl IntList) -> Tensor {
self.f_internal_adaptive_avg_pool3d(output_size).unwrap()
}
pub fn internal_adaptive_avg_pool3d_backward(&self, grad_output: &Tensor) -> Tensor {
self.f_internal_adaptive_avg_pool3d_backward(grad_output).unwrap()
}
pub fn internal_adaptive_avg_pool3d_backward_out(
&self,
out: &Tensor,
grad_output: &Tensor,
) -> Tensor {
self.f_internal_adaptive_avg_pool3d_backward_out(out, grad_output).unwrap()
}
pub fn internal_adaptive_avg_pool3d_out(
&self,
out: &Tensor,
output_size: impl IntList,
) -> Tensor {
self.f_internal_adaptive_avg_pool3d_out(out, output_size).unwrap()
}
pub fn internal_add_batch_dim(&self, batch_dim: i64, level: i64) -> Tensor {
self.f_internal_add_batch_dim(batch_dim, level).unwrap()
}
pub fn internal_add_relu(&self, other: &Tensor) -> Tensor {
self.f_internal_add_relu(other).unwrap()
}
pub fn internal_add_relu_(&mut self, other: &Tensor) -> Tensor {
self.f_internal_add_relu_(other).unwrap()
}
pub fn internal_add_relu_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_internal_add_relu_out(out, other).unwrap()
}
pub fn internal_add_relu_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_internal_add_relu_scalar(other).unwrap()
}
pub fn internal_add_relu_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_internal_add_relu_scalar_(other).unwrap()
}
pub fn internal_add_relu_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_internal_add_relu_scalar_out(out, other).unwrap()
}
pub fn internal_addmm_activation(
&self,
mat1: &Tensor,
mat2: &Tensor,
use_gelu: bool,
) -> Tensor {
self.f_internal_addmm_activation(mat1, mat2, use_gelu).unwrap()
}
pub fn internal_addmm_activation_out(
&self,
out: &Tensor,
mat1: &Tensor,
mat2: &Tensor,
use_gelu: bool,
) -> Tensor {
self.f_internal_addmm_activation_out(out, mat1, mat2, use_gelu).unwrap()
}
pub fn internal_aminmax(&self) -> (Tensor, Tensor) {
self.f_internal_aminmax().unwrap()
}
pub fn internal_aminmax_dim(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
self.f_internal_aminmax_dim(dim, keepdim).unwrap()
}
pub fn internal_aminmax_dim_out(
&self,
out0: &Tensor,
out1: &Tensor,
dim: i64,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_internal_aminmax_dim_out(out0, out1, dim, keepdim).unwrap()
}
pub fn internal_aminmax_out(&self, out0: &Tensor, out1: &Tensor) -> (Tensor, Tensor) {
self.f_internal_aminmax_out(out0, out1).unwrap()
}
pub fn internal_amp_update_scale(
&self,
growth_tracker: &Tensor,
found_inf: &Tensor,
scale_growth_factor: f64,
scale_backoff_factor: f64,
growth_interval: i64,
) -> (Tensor, Tensor) {
self.f_internal_amp_update_scale(
growth_tracker,
found_inf,
scale_growth_factor,
scale_backoff_factor,
growth_interval,
)
.unwrap()
}
pub fn internal_amp_update_scale_(
&mut self,
growth_tracker: &Tensor,
found_inf: &Tensor,
scale_growth_factor: f64,
scale_backoff_factor: f64,
growth_interval: i64,
) -> Tensor {
self.f_internal_amp_update_scale_(
growth_tracker,
found_inf,
scale_growth_factor,
scale_backoff_factor,
growth_interval,
)
.unwrap()
}
pub fn internal_amp_update_scale_out(
&self,
out: &Tensor,
growth_tracker: &Tensor,
found_inf: &Tensor,
scale_growth_factor: f64,
scale_backoff_factor: f64,
growth_interval: i64,
) -> Tensor {
self.f_internal_amp_update_scale_out(
out,
growth_tracker,
found_inf,
scale_growth_factor,
scale_backoff_factor,
growth_interval,
)
.unwrap()
}
pub fn internal_assert_tensor_metadata(
a: &Tensor,
size: impl IntListOption,
stride: impl IntListOption,
dtype: impl Into<Option<Kind>>,
) {
Tensor::f_internal_assert_tensor_metadata(a, size, stride, dtype).unwrap()
}
pub fn internal_autocast_to_full_precision(
&self,
cuda_enabled: bool,
cpu_enabled: bool,
) -> Tensor {
self.f_internal_autocast_to_full_precision(cuda_enabled, cpu_enabled).unwrap()
}
pub fn internal_autocast_to_reduced_precision(
&self,
cuda_enabled: bool,
cpu_enabled: bool,
cuda_dtype: Kind,
cpu_dtype: Kind,
) -> Tensor {
self.f_internal_autocast_to_reduced_precision(
cuda_enabled,
cpu_enabled,
cuda_dtype,
cpu_dtype,
)
.unwrap()
}
pub fn internal_cast_byte(&self, non_blocking: bool) -> Tensor {
self.f_internal_cast_byte(non_blocking).unwrap()
}
pub fn internal_cast_char(&self, non_blocking: bool) -> Tensor {
self.f_internal_cast_char(non_blocking).unwrap()
}
pub fn internal_cast_double(&self, non_blocking: bool) -> Tensor {
self.f_internal_cast_double(non_blocking).unwrap()
}
pub fn internal_cast_float(&self, non_blocking: bool) -> Tensor {
self.f_internal_cast_float(non_blocking).unwrap()
}
pub fn internal_cast_half(&self, non_blocking: bool) -> Tensor {
self.f_internal_cast_half(non_blocking).unwrap()
}
pub fn internal_cast_int(&self, non_blocking: bool) -> Tensor {
self.f_internal_cast_int(non_blocking).unwrap()
}
pub fn internal_cast_long(&self, non_blocking: bool) -> Tensor {
self.f_internal_cast_long(non_blocking).unwrap()
}
pub fn internal_cast_short(&self, non_blocking: bool) -> Tensor {
self.f_internal_cast_short(non_blocking).unwrap()
}
pub fn internal_cdist_backward(
grad: &Tensor,
x1: &Tensor,
x2: &Tensor,
p: f64,
cdist: &Tensor,
) -> Tensor {
Tensor::f_internal_cdist_backward(grad, x1, x2, p, cdist).unwrap()
}
pub fn internal_cdist_backward_out(
out: &Tensor,
grad: &Tensor,
x1: &Tensor,
x2: &Tensor,
p: f64,
cdist: &Tensor,
) -> Tensor {
Tensor::f_internal_cdist_backward_out(out, grad, x1, x2, p, cdist).unwrap()
}
pub fn internal_cholesky_solve_helper(&self, a: &Tensor, upper: bool) -> Tensor {
self.f_internal_cholesky_solve_helper(a, upper).unwrap()
}
pub fn internal_cholesky_solve_helper_out(
&self,
out: &Tensor,
a: &Tensor,
upper: bool,
) -> Tensor {
self.f_internal_cholesky_solve_helper_out(out, a, upper).unwrap()
}
pub fn internal_coalesce(&self) -> Tensor {
self.f_internal_coalesce().unwrap()
}
pub fn internal_coalesce_out(&self, out: &Tensor) -> Tensor {
self.f_internal_coalesce_out(out).unwrap()
}
pub fn internal_coalesced(&self, coalesced: bool) -> Tensor {
self.f_internal_coalesced(coalesced).unwrap()
}
pub fn internal_coalesced_(&mut self, coalesced: bool) -> Tensor {
self.f_internal_coalesced_(coalesced).unwrap()
}
pub fn internal_coalesced_out(&self, out: &Tensor, coalesced: bool) -> Tensor {
self.f_internal_coalesced_out(out, coalesced).unwrap()
}
pub fn internal_compute_linear_combination(&self, coefficients: &Tensor) -> Tensor {
self.f_internal_compute_linear_combination(coefficients).unwrap()
}
pub fn internal_compute_linear_combination_out(
&self,
out: &Tensor,
coefficients: &Tensor,
) -> Tensor {
self.f_internal_compute_linear_combination_out(out, coefficients).unwrap()
}
pub fn internal_conj(&self) -> Tensor {
self.f_internal_conj().unwrap()
}
pub fn internal_conj_copy(&self) -> Tensor {
self.f_internal_conj_copy().unwrap()
}
pub fn internal_conj_copy_out(&self, out: &Tensor) -> Tensor {
self.f_internal_conj_copy_out(out).unwrap()
}
pub fn internal_conj_physical(&self) -> Tensor {
self.f_internal_conj_physical().unwrap()
}
pub fn internal_conj_physical_out(&self, out: &Tensor) -> Tensor {
self.f_internal_conj_physical_out(out).unwrap()
}
pub fn internal_conv_depthwise2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Tensor {
self.f_internal_conv_depthwise2d(weight, kernel_size, bias, stride, padding, dilation)
.unwrap()
}
pub fn internal_conv_depthwise2d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Tensor {
self.f_internal_conv_depthwise2d_out(
out,
weight,
kernel_size,
bias,
stride,
padding,
dilation,
)
.unwrap()
}
pub fn internal_convert_indices_from_coo_to_csr(&self, size: i64, out_int32: bool) -> Tensor {
self.f_internal_convert_indices_from_coo_to_csr(size, out_int32).unwrap()
}
pub fn internal_convert_indices_from_coo_to_csr_out(
&self,
out: &Tensor,
size: i64,
out_int32: bool,
) -> Tensor {
self.f_internal_convert_indices_from_coo_to_csr_out(out, size, out_int32).unwrap()
}
pub fn internal_convert_indices_from_csr_to_coo(
crow_indices: &Tensor,
col_indices: &Tensor,
out_int32: bool,
transpose: bool,
) -> Tensor {
Tensor::f_internal_convert_indices_from_csr_to_coo(
crow_indices,
col_indices,
out_int32,
transpose,
)
.unwrap()
}
pub fn internal_convert_indices_from_csr_to_coo_out(
out: &Tensor,
crow_indices: &Tensor,
col_indices: &Tensor,
out_int32: bool,
transpose: bool,
) -> Tensor {
Tensor::f_internal_convert_indices_from_csr_to_coo_out(
out,
crow_indices,
col_indices,
out_int32,
transpose,
)
.unwrap()
}
pub fn internal_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
cudnn_enabled: bool,
allow_tf32: bool,
) -> Tensor {
self.f_internal_convolution(
weight,
bias,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
benchmark,
deterministic,
cudnn_enabled,
allow_tf32,
)
.unwrap()
}
pub fn internal_convolution_deprecated<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
cudnn_enabled: bool,
) -> Tensor {
self.f_internal_convolution_deprecated(
weight,
bias,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
benchmark,
deterministic,
cudnn_enabled,
)
.unwrap()
}
pub fn internal_convolution_mode<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: &str,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_internal_convolution_mode(weight, bias, stride, padding, dilation, groups).unwrap()
}
pub fn internal_convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
cudnn_enabled: bool,
allow_tf32: bool,
) -> Tensor {
self.f_internal_convolution_out(
out,
weight,
bias,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
benchmark,
deterministic,
cudnn_enabled,
allow_tf32,
)
.unwrap()
}
pub fn internal_copy_from(&self, dst: &Tensor, non_blocking: bool) -> Tensor {
self.f_internal_copy_from(dst, non_blocking).unwrap()
}
pub fn internal_copy_from_and_resize(&self, dst: &Tensor) -> Tensor {
self.f_internal_copy_from_and_resize(dst).unwrap()
}
pub fn internal_copy_from_and_resize_out(&self, out: &Tensor, dst: &Tensor) -> Tensor {
self.f_internal_copy_from_and_resize_out(out, dst).unwrap()
}
pub fn internal_copy_from_out(&self, out: &Tensor, dst: &Tensor, non_blocking: bool) -> Tensor {
self.f_internal_copy_from_out(out, dst, non_blocking).unwrap()
}
pub fn internal_cslt_compress(&self) -> Tensor {
self.f_internal_cslt_compress().unwrap()
}
pub fn internal_cslt_sparse_mm<T: Borrow<Tensor>>(
compressed_a: &Tensor,
dense_b: &Tensor,
bias: Option<T>,
transpose_result: bool,
) -> Tensor {
Tensor::f_internal_cslt_sparse_mm(compressed_a, dense_b, bias, transpose_result).unwrap()
}
pub fn internal_ctc_loss(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
blank: i64,
zero_infinity: bool,
) -> (Tensor, Tensor) {
Tensor::f_internal_ctc_loss(
log_probs,
targets,
input_lengths,
target_lengths,
blank,
zero_infinity,
)
.unwrap()
}
pub fn internal_ctc_loss_backward(
grad: &Tensor,
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
neg_log_likelihood: &Tensor,
log_alpha: &Tensor,
blank: i64,
zero_infinity: bool,
) -> Tensor {
Tensor::f_internal_ctc_loss_backward(
grad,
log_probs,
targets,
input_lengths,
target_lengths,
neg_log_likelihood,
log_alpha,
blank,
zero_infinity,
)
.unwrap()
}
pub fn internal_ctc_loss_backward_out(
out: &Tensor,
grad: &Tensor,
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
neg_log_likelihood: &Tensor,
log_alpha: &Tensor,
blank: i64,
zero_infinity: bool,
) -> Tensor {
Tensor::f_internal_ctc_loss_backward_out(
out,
grad,
log_probs,
targets,
input_lengths,
target_lengths,
neg_log_likelihood,
log_alpha,
blank,
zero_infinity,
)
.unwrap()
}
pub fn internal_ctc_loss_backward_tensor(
grad: &Tensor,
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &Tensor,
target_lengths: &Tensor,
neg_log_likelihood: &Tensor,
log_alpha: &Tensor,
blank: i64,
zero_infinity: bool,
) -> Tensor {
Tensor::f_internal_ctc_loss_backward_tensor(
grad,
log_probs,
targets,
input_lengths,
target_lengths,
neg_log_likelihood,
log_alpha,
blank,
zero_infinity,
)
.unwrap()
}
pub fn internal_ctc_loss_out(
out0: &Tensor,
out1: &Tensor,
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
blank: i64,
zero_infinity: bool,
) -> (Tensor, Tensor) {
Tensor::f_internal_ctc_loss_out(
out0,
out1,
log_probs,
targets,
input_lengths,
target_lengths,
blank,
zero_infinity,
)
.unwrap()
}
pub fn internal_ctc_loss_tensor(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &Tensor,
target_lengths: &Tensor,
blank: i64,
zero_infinity: bool,
) -> (Tensor, Tensor) {
Tensor::f_internal_ctc_loss_tensor(
log_probs,
targets,
input_lengths,
target_lengths,
blank,
zero_infinity,
)
.unwrap()
}
pub fn internal_ctc_loss_tensor_out(
out0: &Tensor,
out1: &Tensor,
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &Tensor,
target_lengths: &Tensor,
blank: i64,
zero_infinity: bool,
) -> (Tensor, Tensor) {
Tensor::f_internal_ctc_loss_tensor_out(
out0,
out1,
log_probs,
targets,
input_lengths,
target_lengths,
blank,
zero_infinity,
)
.unwrap()
}
pub fn internal_cudnn_ctc_loss(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
blank: i64,
deterministic: bool,
zero_infinity: bool,
) -> (Tensor, Tensor) {
Tensor::f_internal_cudnn_ctc_loss(
log_probs,
targets,
input_lengths,
target_lengths,
blank,
deterministic,
zero_infinity,
)
.unwrap()
}
pub fn internal_cudnn_ctc_loss_out(
out0: &Tensor,
out1: &Tensor,
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
blank: i64,
deterministic: bool,
zero_infinity: bool,
) -> (Tensor, Tensor) {
Tensor::f_internal_cudnn_ctc_loss_out(
out0,
out1,
log_probs,
targets,
input_lengths,
target_lengths,
blank,
deterministic,
zero_infinity,
)
.unwrap()
}
pub fn internal_cudnn_ctc_loss_tensor(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &Tensor,
target_lengths: &Tensor,
blank: i64,
deterministic: bool,
zero_infinity: bool,
) -> (Tensor, Tensor) {
Tensor::f_internal_cudnn_ctc_loss_tensor(
log_probs,
targets,
input_lengths,
target_lengths,
blank,
deterministic,
zero_infinity,
)
.unwrap()
}
pub fn internal_cudnn_init_dropout_state(
dropout: f64,
train: bool,
dropout_seed: i64,
options: (Kind, Device),
) -> Tensor {
Tensor::f_internal_cudnn_init_dropout_state(dropout, train, dropout_seed, options).unwrap()
}
pub fn internal_cudnn_init_dropout_state_out(
out: &Tensor,
dropout: f64,
train: bool,
dropout_seed: i64,
) -> Tensor {
Tensor::f_internal_cudnn_init_dropout_state_out(out, dropout, train, dropout_seed).unwrap()
}
pub fn internal_cudnn_rnn<T: Borrow<Tensor>>(
&self,
weight: &[T],
weight_stride0: i64,
weight_buf: Option<T>,
hx: &Tensor,
cx: Option<T>,
mode: i64,
hidden_size: i64,
proj_size: i64,
num_layers: i64,
batch_first: bool,
dropout: f64,
train: bool,
bidirectional: bool,
batch_sizes: impl IntList,
dropout_state: Option<T>,
) -> (Tensor, Tensor, Tensor, Tensor, Tensor) {
self.f_internal_cudnn_rnn(
weight,
weight_stride0,
weight_buf,
hx,
cx,
mode,
hidden_size,
proj_size,
num_layers,
batch_first,
dropout,
train,
bidirectional,
batch_sizes,
dropout_state,
)
.unwrap()
}
pub fn internal_cudnn_rnn_flatten_weight<T: Borrow<Tensor>>(
weight_arr: &[T],
weight_stride0: i64,
input_size: i64,
mode: i64,
hidden_size: i64,
proj_size: i64,
num_layers: i64,
batch_first: bool,
bidirectional: bool,
) -> Tensor {
Tensor::f_internal_cudnn_rnn_flatten_weight(
weight_arr,
weight_stride0,
input_size,
mode,
hidden_size,
proj_size,
num_layers,
batch_first,
bidirectional,
)
.unwrap()
}
pub fn internal_cudnn_rnn_flatten_weight_out<T: Borrow<Tensor>>(
out: &Tensor,
weight_arr: &[T],
weight_stride0: i64,
input_size: i64,
mode: i64,
hidden_size: i64,
proj_size: i64,
num_layers: i64,
batch_first: bool,
bidirectional: bool,
) -> Tensor {
Tensor::f_internal_cudnn_rnn_flatten_weight_out(
out,
weight_arr,
weight_stride0,
input_size,
mode,
hidden_size,
proj_size,
num_layers,
batch_first,
bidirectional,
)
.unwrap()
}
pub fn internal_cudnn_rnn_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
out4: &Tensor,
weight: &[T],
weight_stride0: i64,
weight_buf: Option<T>,
hx: &Tensor,
cx: Option<T>,
mode: i64,
hidden_size: i64,
proj_size: i64,
num_layers: i64,
batch_first: bool,
dropout: f64,
train: bool,
bidirectional: bool,
batch_sizes: impl IntList,
dropout_state: Option<T>,
) -> (Tensor, Tensor, Tensor, Tensor, Tensor) {
self.f_internal_cudnn_rnn_out(
out0,
out1,
out2,
out3,
out4,
weight,
weight_stride0,
weight_buf,
hx,
cx,
mode,
hidden_size,
proj_size,
num_layers,
batch_first,
dropout,
train,
bidirectional,
batch_sizes,
dropout_state,
)
.unwrap()
}
pub fn internal_debug_has_internal_overlap(&self) -> i64 {
self.f_internal_debug_has_internal_overlap().unwrap()
}
pub fn internal_dim_arange(like: &Tensor, dim: i64) -> Tensor {
Tensor::f_internal_dim_arange(like, dim).unwrap()
}
pub fn internal_dimi(&self) -> i64 {
self.f_internal_dimi().unwrap()
}
pub fn internal_dimv(&self) -> i64 {
self.f_internal_dimv().unwrap()
}
pub fn internal_dirichlet_grad(x: &Tensor, alpha: &Tensor, total: &Tensor) -> Tensor {
Tensor::f_internal_dirichlet_grad(x, alpha, total).unwrap()
}
pub fn internal_dirichlet_grad_out(
out: &Tensor,
x: &Tensor,
alpha: &Tensor,
total: &Tensor,
) -> Tensor {
Tensor::f_internal_dirichlet_grad_out(out, x, alpha, total).unwrap()
}
pub fn internal_efficient_attention_backward<T: Borrow<Tensor>>(
grad_out_: &Tensor,
query: &Tensor,
key: &Tensor,
value: &Tensor,
bias: Option<T>,
out: &Tensor,
cu_seqlens_q: Option<T>,
cu_seqlens_k: Option<T>,
max_seqlen_k: i64,
max_seqlen_q: i64,
logsumexp: &Tensor,
dropout_p: f64,
philox_seed: &Tensor,
philox_offset: &Tensor,
custom_mask_type: i64,
bias_requires_grad: bool,
scale: impl Into<Option<f64>>,
num_splits_key: impl Into<Option<i64>>,
) -> (Tensor, Tensor, Tensor, Tensor) {
Tensor::f_internal_efficient_attention_backward(
grad_out_,
query,
key,
value,
bias,
out,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_k,
max_seqlen_q,
logsumexp,
dropout_p,
philox_seed,
philox_offset,
custom_mask_type,
bias_requires_grad,
scale,
num_splits_key,
)
.unwrap()
}
pub fn internal_efficientzerotensor(size: impl IntList, options: (Kind, Device)) -> Tensor {
Tensor::f_internal_efficientzerotensor(size, options).unwrap()
}
pub fn internal_efficientzerotensor_out(out: &Tensor, size: impl IntList) -> Tensor {
Tensor::f_internal_efficientzerotensor_out(out, size).unwrap()
}
pub fn internal_embedding_bag<T: Borrow<Tensor>>(
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
padding_idx: i64,
) -> (Tensor, Tensor, Tensor, Tensor) {
Tensor::f_internal_embedding_bag(
weight,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx,
)
.unwrap()
}
pub fn internal_embedding_bag_backward<T: Borrow<Tensor>>(
grad: &Tensor,
indices: &Tensor,
offsets: &Tensor,
offset2bag: &Tensor,
bag_size: &Tensor,
maximum_indices: &Tensor,
num_weights: i64,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
padding_idx: i64,
) -> Tensor {
Tensor::f_internal_embedding_bag_backward(
grad,
indices,
offsets,
offset2bag,
bag_size,
maximum_indices,
num_weights,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
padding_idx,
)
.unwrap()
}
pub fn internal_embedding_bag_dense_backward<T: Borrow<Tensor>>(
grad: &Tensor,
indices: &Tensor,
offset2bag: &Tensor,
bag_size: &Tensor,
maximum_indices: &Tensor,
num_weights: i64,
scale_grad_by_freq: bool,
mode: i64,
per_sample_weights: Option<T>,
padding_idx: i64,
) -> Tensor {
Tensor::f_internal_embedding_bag_dense_backward(
grad,
indices,
offset2bag,
bag_size,
maximum_indices,
num_weights,
scale_grad_by_freq,
mode,
per_sample_weights,
padding_idx,
)
.unwrap()
}
pub fn internal_embedding_bag_dense_backward_out<T: Borrow<Tensor>>(
out: &Tensor,
grad: &Tensor,
indices: &Tensor,
offset2bag: &Tensor,
bag_size: &Tensor,
maximum_indices: &Tensor,
num_weights: i64,
scale_grad_by_freq: bool,
mode: i64,
per_sample_weights: Option<T>,
padding_idx: i64,
) -> Tensor {
Tensor::f_internal_embedding_bag_dense_backward_out(
out,
grad,
indices,
offset2bag,
bag_size,
maximum_indices,
num_weights,
scale_grad_by_freq,
mode,
per_sample_weights,
padding_idx,
)
.unwrap()
}
pub fn internal_embedding_bag_forward_only<T: Borrow<Tensor>>(
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
padding_idx: i64,
) -> (Tensor, Tensor, Tensor, Tensor) {
Tensor::f_internal_embedding_bag_forward_only(
weight,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx,
)
.unwrap()
}
pub fn internal_embedding_bag_forward_only_out<T: Borrow<Tensor>>(
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
padding_idx: i64,
) -> (Tensor, Tensor, Tensor, Tensor) {
Tensor::f_internal_embedding_bag_forward_only_out(
out0,
out1,
out2,
out3,
weight,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx,
)
.unwrap()
}
pub fn internal_embedding_bag_out<T: Borrow<Tensor>>(
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
padding_idx: i64,
) -> (Tensor, Tensor, Tensor, Tensor) {
Tensor::f_internal_embedding_bag_out(
out0,
out1,
out2,
out3,
weight,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx,
)
.unwrap()
}
pub fn internal_embedding_bag_per_sample_weights_backward(
grad: &Tensor,
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
offset2bag: &Tensor,
mode: i64,
padding_idx: i64,
) -> Tensor {
Tensor::f_internal_embedding_bag_per_sample_weights_backward(
grad,
weight,
indices,
offsets,
offset2bag,
mode,
padding_idx,
)
.unwrap()
}
pub fn internal_embedding_bag_per_sample_weights_backward_out(
out: &Tensor,
grad: &Tensor,
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
offset2bag: &Tensor,
mode: i64,
padding_idx: i64,
) -> Tensor {
Tensor::f_internal_embedding_bag_per_sample_weights_backward_out(
out,
grad,
weight,
indices,
offsets,
offset2bag,
mode,
padding_idx,
)
.unwrap()
}
pub fn internal_embedding_bag_sparse_backward<T: Borrow<Tensor>>(
grad: &Tensor,
indices: &Tensor,
offsets: &Tensor,
offset2bag: &Tensor,
bag_size: &Tensor,
num_weights: i64,
scale_grad_by_freq: bool,
mode: i64,
per_sample_weights: Option<T>,
padding_idx: i64,
) -> Tensor {
Tensor::f_internal_embedding_bag_sparse_backward(
grad,
indices,
offsets,
offset2bag,
bag_size,
num_weights,
scale_grad_by_freq,
mode,
per_sample_weights,
padding_idx,
)
.unwrap()
}
pub fn internal_empty_affine_quantized(
size: impl IntList,
options: (Kind, Device),
scale: f64,
zero_point: i64,
) -> Tensor {
Tensor::f_internal_empty_affine_quantized(size, options, scale, zero_point).unwrap()
}
pub fn internal_empty_affine_quantized_out(
out: &Tensor,
size: impl IntList,
scale: f64,
zero_point: i64,
) -> Tensor {
Tensor::f_internal_empty_affine_quantized_out(out, size, scale, zero_point).unwrap()
}
pub fn internal_empty_per_channel_affine_quantized(
size: impl IntList,
scales: &Tensor,
zero_points: &Tensor,
axis: i64,
options: (Kind, Device),
) -> Tensor {
Tensor::f_internal_empty_per_channel_affine_quantized(
size,
scales,
zero_points,
axis,
options,
)
.unwrap()
}
pub fn internal_empty_per_channel_affine_quantized_out(
out: &Tensor,
size: impl IntList,
scales: &Tensor,
zero_points: &Tensor,
axis: i64,
) -> Tensor {
Tensor::f_internal_empty_per_channel_affine_quantized_out(
out,
size,
scales,
zero_points,
axis,
)
.unwrap()
}
pub fn internal_euclidean_dist(x1: &Tensor, x2: &Tensor) -> Tensor {
Tensor::f_internal_euclidean_dist(x1, x2).unwrap()
}
pub fn internal_euclidean_dist_out(out: &Tensor, x1: &Tensor, x2: &Tensor) -> Tensor {
Tensor::f_internal_euclidean_dist_out(out, x1, x2).unwrap()
}
pub fn internal_fake_quantize_learnable_per_channel_affine(
&self,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Tensor {
self.f_internal_fake_quantize_learnable_per_channel_affine(
scale,
zero_point,
axis,
quant_min,
quant_max,
grad_factor,
)
.unwrap()
}
pub fn internal_fake_quantize_learnable_per_channel_affine_backward(
&self,
grad: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_internal_fake_quantize_learnable_per_channel_affine_backward(
grad,
scale,
zero_point,
axis,
quant_min,
quant_max,
grad_factor,
)
.unwrap()
}
pub fn internal_fake_quantize_learnable_per_channel_affine_out(
&self,
out: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Tensor {
self.f_internal_fake_quantize_learnable_per_channel_affine_out(
out,
scale,
zero_point,
axis,
quant_min,
quant_max,
grad_factor,
)
.unwrap()
}
pub fn internal_fake_quantize_learnable_per_tensor_affine(
&self,
scale: &Tensor,
zero_point: &Tensor,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Tensor {
self.f_internal_fake_quantize_learnable_per_tensor_affine(
scale,
zero_point,
quant_min,
quant_max,
grad_factor,
)
.unwrap()
}
pub fn internal_fake_quantize_learnable_per_tensor_affine_backward(
&self,
grad: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_internal_fake_quantize_learnable_per_tensor_affine_backward(
grad,
scale,
zero_point,
quant_min,
quant_max,
grad_factor,
)
.unwrap()
}
pub fn internal_fake_quantize_learnable_per_tensor_affine_out(
&self,
out: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Tensor {
self.f_internal_fake_quantize_learnable_per_tensor_affine_out(
out,
scale,
zero_point,
quant_min,
quant_max,
grad_factor,
)
.unwrap()
}
pub fn internal_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(
&self,
scale: &Tensor,
zero_point: &Tensor,
fake_quant_enabled: &Tensor,
quant_min: i64,
quant_max: i64,
) -> (Tensor, Tensor) {
self.f_internal_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(
scale,
zero_point,
fake_quant_enabled,
quant_min,
quant_max,
)
.unwrap()
}
pub fn internal_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(
&self,
out0: &Tensor,
out1: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
fake_quant_enabled: &Tensor,
quant_min: i64,
quant_max: i64,
) -> (Tensor, Tensor) {
self.f_internal_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(
out0,
out1,
scale,
zero_point,
fake_quant_enabled,
quant_min,
quant_max,
)
.unwrap()
}
pub fn internal_fft_c2c(&self, dim: impl IntList, normalization: i64, forward: bool) -> Tensor {
self.f_internal_fft_c2c(dim, normalization, forward).unwrap()
}
pub fn internal_fft_c2c_out(
&self,
out: &Tensor,
dim: impl IntList,
normalization: i64,
forward: bool,
) -> Tensor {
self.f_internal_fft_c2c_out(out, dim, normalization, forward).unwrap()
}
pub fn internal_fft_c2r(
&self,
dim: impl IntList,
normalization: i64,
last_dim_size: i64,
) -> Tensor {
self.f_internal_fft_c2r(dim, normalization, last_dim_size).unwrap()
}
pub fn internal_fft_c2r_out(
&self,
out: &Tensor,
dim: impl IntList,
normalization: i64,
last_dim_size: i64,
) -> Tensor {
self.f_internal_fft_c2r_out(out, dim, normalization, last_dim_size).unwrap()
}
pub fn internal_fft_r2c(
&self,
dim: impl IntList,
normalization: i64,
onesided: bool,
) -> Tensor {
self.f_internal_fft_r2c(dim, normalization, onesided).unwrap()
}
pub fn internal_fft_r2c_out(
&self,
out: &Tensor,
dim: impl IntList,
normalization: i64,
onesided: bool,
) -> Tensor {
self.f_internal_fft_r2c_out(out, dim, normalization, onesided).unwrap()
}
pub fn internal_fill_mem_eff_dropout_mask_(
&mut self,
dropout_p: f64,
seed: i64,
offset: i64,
) -> Tensor {
self.f_internal_fill_mem_eff_dropout_mask_(dropout_p, seed, offset).unwrap()
}
pub fn internal_flash_attention_backward(
grad_out: &Tensor,
query: &Tensor,
key: &Tensor,
value: &Tensor,
out: &Tensor,
logsumexp: &Tensor,
cum_seq_q: &Tensor,
cum_seq_k: &Tensor,
max_q: i64,
max_k: i64,
dropout_p: f64,
is_causal: bool,
philox_seed: &Tensor,
philox_offset: &Tensor,
scale: impl Into<Option<f64>>,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_internal_flash_attention_backward(
grad_out,
query,
key,
value,
out,
logsumexp,
cum_seq_q,
cum_seq_k,
max_q,
max_k,
dropout_p,
is_causal,
philox_seed,
philox_offset,
scale,
)
.unwrap()
}
pub fn internal_foobar(&self, arg1: bool, arg2: bool, arg3: bool) -> Tensor {
self.f_internal_foobar(arg1, arg2, arg3).unwrap()
}
pub fn internal_foobar_out(&self, out: &Tensor, arg1: bool, arg2: bool, arg3: bool) -> Tensor {
self.f_internal_foobar_out(out, arg1, arg2, arg3).unwrap()
}
pub fn internal_functional_assert_async(&self, assert_msg: &str, dep_token: &Tensor) -> Tensor {
self.f_internal_functional_assert_async(assert_msg, dep_token).unwrap()
}
pub fn internal_functional_sym_constrain_range<S: Into<Scalar>>(
size: S,
min: impl Into<Option<i64>>,
max: impl Into<Option<i64>>,
dep_token: &Tensor,
) -> Tensor {
Tensor::f_internal_functional_sym_constrain_range(size, min, max, dep_token).unwrap()
}
pub fn internal_functional_sym_constrain_range_for_size<S: Into<Scalar>>(
size: S,
min: impl Into<Option<i64>>,
max: impl Into<Option<i64>>,
dep_token: &Tensor,
) -> Tensor {
Tensor::f_internal_functional_sym_constrain_range_for_size(size, min, max, dep_token)
.unwrap()
}
pub fn internal_fused_dropout(&self, p: f64) -> (Tensor, Tensor) {
self.f_internal_fused_dropout(p).unwrap()
}
pub fn internal_fused_dropout_out(
&self,
out0: &Tensor,
out1: &Tensor,
p: f64,
) -> (Tensor, Tensor) {
self.f_internal_fused_dropout_out(out0, out1, p).unwrap()
}
pub fn internal_fused_moving_avg_obs_fq_helper(
&self,
observer_on: &Tensor,
fake_quant_on: &Tensor,
running_min: &Tensor,
running_max: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
averaging_const: f64,
quant_min: i64,
quant_max: i64,
ch_axis: i64,
per_row_fake_quant: bool,
symmetric_quant: bool,
) -> (Tensor, Tensor) {
self.f_internal_fused_moving_avg_obs_fq_helper(
observer_on,
fake_quant_on,
running_min,
running_max,
scale,
zero_point,
averaging_const,
quant_min,
quant_max,
ch_axis,
per_row_fake_quant,
symmetric_quant,
)
.unwrap()
}
pub fn internal_fused_moving_avg_obs_fq_helper_functional(
&self,
observer_on: &Tensor,
fake_quant_on: &Tensor,
running_min: &Tensor,
running_max: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
averaging_const: f64,
quant_min: i64,
quant_max: i64,
ch_axis: i64,
per_row_fake_quant: bool,
symmetric_quant: bool,
) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) {
self.f_internal_fused_moving_avg_obs_fq_helper_functional(
observer_on,
fake_quant_on,
running_min,
running_max,
scale,
zero_point,
averaging_const,
quant_min,
quant_max,
ch_axis,
per_row_fake_quant,
symmetric_quant,
)
.unwrap()
}
pub fn internal_fused_moving_avg_obs_fq_helper_out(
&self,
out0: &Tensor,
out1: &Tensor,
observer_on: &Tensor,
fake_quant_on: &Tensor,
running_min: &Tensor,
running_max: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
averaging_const: f64,
quant_min: i64,
quant_max: i64,
ch_axis: i64,
per_row_fake_quant: bool,
symmetric_quant: bool,
) -> (Tensor, Tensor) {
self.f_internal_fused_moving_avg_obs_fq_helper_out(
out0,
out1,
observer_on,
fake_quant_on,
running_min,
running_max,
scale,
zero_point,
averaging_const,
quant_min,
quant_max,
ch_axis,
per_row_fake_quant,
symmetric_quant,
)
.unwrap()
}
pub fn internal_fused_sdp_choice<T: Borrow<Tensor>>(
query: &Tensor,
key: &Tensor,
value: &Tensor,
attn_mask: Option<T>,
dropout_p: f64,
is_causal: bool,
scale: impl Into<Option<f64>>,
) -> i64 {
Tensor::f_internal_fused_sdp_choice(
query, key, value, attn_mask, dropout_p, is_causal, scale,
)
.unwrap()
}
pub fn internal_fw_primal(&self, level: i64) -> Tensor {
self.f_internal_fw_primal(level).unwrap()
}
pub fn internal_fw_primal_copy(&self, level: i64) -> Tensor {
self.f_internal_fw_primal_copy(level).unwrap()
}
pub fn internal_fw_primal_copy_out(&self, out: &Tensor, level: i64) -> Tensor {
self.f_internal_fw_primal_copy_out(out, level).unwrap()
}
pub fn internal_gather_sparse_backward(
&self,
dim: i64,
index: &Tensor,
grad: &Tensor,
) -> Tensor {
self.f_internal_gather_sparse_backward(dim, index, grad).unwrap()
}
pub fn internal_grid_sampler_2d_cpu_fallback(
&self,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Tensor {
self.f_internal_grid_sampler_2d_cpu_fallback(
grid,
interpolation_mode,
padding_mode,
align_corners,
)
.unwrap()
}
pub fn internal_grid_sampler_2d_cpu_fallback_backward(
&self,
grad_output: &Tensor,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> (Tensor, Tensor) {
self.f_internal_grid_sampler_2d_cpu_fallback_backward(
grad_output,
grid,
interpolation_mode,
padding_mode,
align_corners,
)
.unwrap()
}
pub fn internal_grid_sampler_2d_cpu_fallback_out(
&self,
out: &Tensor,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Tensor {
self.f_internal_grid_sampler_2d_cpu_fallback_out(
out,
grid,
interpolation_mode,
padding_mode,
align_corners,
)
.unwrap()
}
pub fn internal_has_compatible_shallow_copy_type(&self, from: &Tensor) -> bool {
self.f_internal_has_compatible_shallow_copy_type(from).unwrap()
}
pub fn internal_has_same_storage_numel(&self, other: &Tensor) -> bool {
self.f_internal_has_same_storage_numel(other).unwrap()
}
pub fn internal_histogramdd_bin_edges<T: Borrow<Tensor>>(
&self,
bins: impl IntList,
range: impl DoubleList,
weight: Option<T>,
density: bool,
) -> Vec<Tensor> {
self.f_internal_histogramdd_bin_edges(bins, range, weight, density).unwrap()
}
pub fn internal_histogramdd_bin_edges_out<T: Borrow<Tensor>>(
&self,
out: &[T],
bins: impl IntList,
range: impl DoubleList,
weight: Option<T>,
density: bool,
) {
self.f_internal_histogramdd_bin_edges_out(out, bins, range, weight, density).unwrap()
}
pub fn internal_histogramdd_from_bin_cts<T: Borrow<Tensor>>(
&self,
bins: impl IntList,
range: impl DoubleList,
weight: Option<T>,
density: bool,
) -> Tensor {
self.f_internal_histogramdd_from_bin_cts(bins, range, weight, density).unwrap()
}
pub fn internal_histogramdd_from_bin_cts_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
bins: impl IntList,
range: impl DoubleList,
weight: Option<T>,
density: bool,
) -> Tensor {
self.f_internal_histogramdd_from_bin_cts_out(out, bins, range, weight, density).unwrap()
}
pub fn internal_histogramdd_from_bin_tensors<T: Borrow<Tensor>>(
&self,
bins: &[T],
weight: Option<T>,
density: bool,
) -> Tensor {
self.f_internal_histogramdd_from_bin_tensors(bins, weight, density).unwrap()
}
pub fn internal_histogramdd_from_bin_tensors_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
bins: &[T],
weight: Option<T>,
density: bool,
) -> Tensor {
self.f_internal_histogramdd_from_bin_tensors_out(out, bins, weight, density).unwrap()
}
pub fn internal_index_put_impl<T: Borrow<Tensor>>(
&self,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
unsafe_: bool,
) -> Tensor {
self.f_internal_index_put_impl(indices, values, accumulate, unsafe_).unwrap()
}
pub fn internal_index_put_impl_<T: Borrow<Tensor>>(
&mut self,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
unsafe_: bool,
) -> Tensor {
self.f_internal_index_put_impl_(indices, values, accumulate, unsafe_).unwrap()
}
pub fn internal_index_put_impl_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
unsafe_: bool,
) -> Tensor {
self.f_internal_index_put_impl_out(out, indices, values, accumulate, unsafe_).unwrap()
}
pub fn internal_indices(&self) -> Tensor {
self.f_internal_indices().unwrap()
}
pub fn internal_indices_copy(&self) -> Tensor {
self.f_internal_indices_copy().unwrap()
}
pub fn internal_indices_copy_out(&self, out: &Tensor) -> Tensor {
self.f_internal_indices_copy_out(out).unwrap()
}
pub fn internal_int_mm(&self, mat2: &Tensor) -> Tensor {
self.f_internal_int_mm(mat2).unwrap()
}
pub fn internal_int_mm_out(&self, out: &Tensor, mat2: &Tensor) -> Tensor {
self.f_internal_int_mm_out(out, mat2).unwrap()
}
pub fn internal_is_all_true(&self) -> Tensor {
self.f_internal_is_all_true().unwrap()
}
pub fn internal_is_any_true(&self) -> Tensor {
self.f_internal_is_any_true().unwrap()
}
pub fn internal_is_zerotensor(&self) -> bool {
self.f_internal_is_zerotensor().unwrap()
}
pub fn internal_linalg_check_errors(info: &Tensor, api_name: &str, is_matrix: bool) {
Tensor::f_internal_linalg_check_errors(info, api_name, is_matrix).unwrap()
}
pub fn internal_linalg_det(a: &Tensor) -> (Tensor, Tensor, Tensor) {
Tensor::f_internal_linalg_det(a).unwrap()
}
pub fn internal_linalg_det_result(
result: &Tensor,
lu: &Tensor,
pivots: &Tensor,
a: &Tensor,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_internal_linalg_det_result(result, lu, pivots, a).unwrap()
}
pub fn internal_linalg_eigh(a: &Tensor, uplo: &str, compute_v: bool) -> (Tensor, Tensor) {
Tensor::f_internal_linalg_eigh(a, uplo, compute_v).unwrap()
}
pub fn internal_linalg_eigh_eigenvalues(
eigenvalues: &Tensor,
eigenvectors: &Tensor,
a: &Tensor,
uplo: &str,
compute_v: bool,
) -> (Tensor, Tensor) {
Tensor::f_internal_linalg_eigh_eigenvalues(eigenvalues, eigenvectors, a, uplo, compute_v)
.unwrap()
}
pub fn internal_linalg_slogdet(a: &Tensor) -> (Tensor, Tensor, Tensor, Tensor) {
Tensor::f_internal_linalg_slogdet(a).unwrap()
}
pub fn internal_linalg_slogdet_sign(
sign: &Tensor,
logabsdet: &Tensor,
lu: &Tensor,
pivots: &Tensor,
a: &Tensor,
) -> (Tensor, Tensor, Tensor, Tensor) {
Tensor::f_internal_linalg_slogdet_sign(sign, logabsdet, lu, pivots, a).unwrap()
}
pub fn internal_linalg_solve_ex(
a: &Tensor,
b: &Tensor,
left: bool,
check_errors: bool,
) -> (Tensor, Tensor, Tensor, Tensor) {
Tensor::f_internal_linalg_solve_ex(a, b, left, check_errors).unwrap()
}
pub fn internal_linalg_solve_ex_result(
result: &Tensor,
lu: &Tensor,
pivots: &Tensor,
info: &Tensor,
a: &Tensor,
b: &Tensor,
left: bool,
check_errors: bool,
) -> (Tensor, Tensor, Tensor, Tensor) {
Tensor::f_internal_linalg_solve_ex_result(
result,
lu,
pivots,
info,
a,
b,
left,
check_errors,
)
.unwrap()
}
pub fn internal_linalg_svd(
a: &Tensor,
full_matrices: bool,
compute_uv: bool,
driver: &str,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_internal_linalg_svd(a, full_matrices, compute_uv, driver).unwrap()
}
pub fn internal_linalg_svd_u(
u: &Tensor,
s: &Tensor,
vh: &Tensor,
a: &Tensor,
full_matrices: bool,
compute_uv: bool,
driver: &str,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_internal_linalg_svd_u(u, s, vh, a, full_matrices, compute_uv, driver).unwrap()
}
pub fn internal_log_softmax(&self, dim: i64, half_to_float: bool) -> Tensor {
self.f_internal_log_softmax(dim, half_to_float).unwrap()
}
pub fn internal_log_softmax_backward_data(
grad_output: &Tensor,
output: &Tensor,
dim: i64,
input_dtype: Kind,
) -> Tensor {
Tensor::f_internal_log_softmax_backward_data(grad_output, output, dim, input_dtype).unwrap()
}
pub fn internal_log_softmax_backward_data_out(
out: &Tensor,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
input_dtype: Kind,
) -> Tensor {
Tensor::f_internal_log_softmax_backward_data_out(out, grad_output, output, dim, input_dtype)
.unwrap()
}
pub fn internal_log_softmax_out(&self, out: &Tensor, dim: i64, half_to_float: bool) -> Tensor {
self.f_internal_log_softmax_out(out, dim, half_to_float).unwrap()
}
pub fn internal_logcumsumexp(&self, dim: i64) -> Tensor {
self.f_internal_logcumsumexp(dim).unwrap()
}
pub fn internal_logcumsumexp_out(&self, out: &Tensor, dim: i64) -> Tensor {
self.f_internal_logcumsumexp_out(out, dim).unwrap()
}
pub fn internal_lstm_mps<T: Borrow<Tensor>>(
&self,
hx: &[T],
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) {
self.f_internal_lstm_mps(
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
)
.unwrap()
}
pub fn internal_lstm_mps_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
out4: &Tensor,
out5: &Tensor,
hx: &[T],
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) {
self.f_internal_lstm_mps_out(
out0,
out1,
out2,
out3,
out4,
out5,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
)
.unwrap()
}
pub fn internal_lu_with_info(
&self,
pivot: bool,
check_errors: bool,
) -> (Tensor, Tensor, Tensor) {
self.f_internal_lu_with_info(pivot, check_errors).unwrap()
}
pub fn internal_make_dep_token(options: (Kind, Device)) -> Tensor {
Tensor::f_internal_make_dep_token(options).unwrap()
}
pub fn internal_make_dual(primal: &Tensor, tangent: &Tensor, level: i64) -> Tensor {
Tensor::f_internal_make_dual(primal, tangent, level).unwrap()
}
pub fn internal_make_dual_copy(primal: &Tensor, tangent: &Tensor, level: i64) -> Tensor {
Tensor::f_internal_make_dual_copy(primal, tangent, level).unwrap()
}
pub fn internal_make_dual_copy_out(
out: &Tensor,
primal: &Tensor,
tangent: &Tensor,
level: i64,
) -> Tensor {
Tensor::f_internal_make_dual_copy_out(out, primal, tangent, level).unwrap()
}
pub fn internal_make_per_channel_quantized_tensor(
&self,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
) -> Tensor {
self.f_internal_make_per_channel_quantized_tensor(scale, zero_point, axis).unwrap()
}
pub fn internal_make_per_channel_quantized_tensor_out(
&self,
out: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
) -> Tensor {
self.f_internal_make_per_channel_quantized_tensor_out(out, scale, zero_point, axis).unwrap()
}
pub fn internal_make_per_tensor_quantized_tensor(&self, scale: f64, zero_point: i64) -> Tensor {
self.f_internal_make_per_tensor_quantized_tensor(scale, zero_point).unwrap()
}
pub fn internal_make_per_tensor_quantized_tensor_out(
&self,
out: &Tensor,
scale: f64,
zero_point: i64,
) -> Tensor {
self.f_internal_make_per_tensor_quantized_tensor_out(out, scale, zero_point).unwrap()
}
pub fn internal_masked_scale(&self, mask: &Tensor, scale: f64) -> Tensor {
self.f_internal_masked_scale(mask, scale).unwrap()
}
pub fn internal_masked_scale_out(&self, out: &Tensor, mask: &Tensor, scale: f64) -> Tensor {
self.f_internal_masked_scale_out(out, mask, scale).unwrap()
}
pub fn internal_masked_softmax(
&self,
mask: &Tensor,
dim: impl Into<Option<i64>>,
mask_type: impl Into<Option<i64>>,
) -> Tensor {
self.f_internal_masked_softmax(mask, dim, mask_type).unwrap()
}
pub fn internal_masked_softmax_backward(
grad_output: &Tensor,
output: &Tensor,
mask: &Tensor,
dim: impl Into<Option<i64>>,
) -> Tensor {
Tensor::f_internal_masked_softmax_backward(grad_output, output, mask, dim).unwrap()
}
pub fn internal_masked_softmax_backward_out(
out: &Tensor,
grad_output: &Tensor,
output: &Tensor,
mask: &Tensor,
dim: impl Into<Option<i64>>,
) -> Tensor {
Tensor::f_internal_masked_softmax_backward_out(out, grad_output, output, mask, dim).unwrap()
}
pub fn internal_masked_softmax_out(
&self,
out: &Tensor,
mask: &Tensor,
dim: impl Into<Option<i64>>,
mask_type: impl Into<Option<i64>>,
) -> Tensor {
self.f_internal_masked_softmax_out(out, mask, dim, mask_type).unwrap()
}
pub fn internal_mkldnn_reshape(&self, shape: impl IntList) -> Tensor {
self.f_internal_mkldnn_reshape(shape).unwrap()
}
pub fn internal_mkldnn_reshape_out(&self, out: &Tensor, shape: impl IntList) -> Tensor {
self.f_internal_mkldnn_reshape_out(out, shape).unwrap()
}
pub fn internal_mkldnn_transpose(&self, dim0: i64, dim1: i64) -> Tensor {
self.f_internal_mkldnn_transpose(dim0, dim1).unwrap()
}
pub fn internal_mkldnn_transpose_(&mut self, dim0: i64, dim1: i64) -> Tensor {
self.f_internal_mkldnn_transpose_(dim0, dim1).unwrap()
}
pub fn internal_mkldnn_transpose_out(&self, out: &Tensor, dim0: i64, dim1: i64) -> Tensor {
self.f_internal_mkldnn_transpose_out(out, dim0, dim1).unwrap()
}
pub fn internal_mps_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_internal_mps_convolution(weight, bias, padding, stride, dilation, groups).unwrap()
}
pub fn internal_mps_convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_internal_mps_convolution_out(out, weight, bias, padding, stride, dilation, groups)
.unwrap()
}
pub fn internal_mps_convolution_transpose(
&self,
weight: &Tensor,
padding: impl IntList,
output_padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_internal_mps_convolution_transpose(
weight,
padding,
output_padding,
stride,
dilation,
groups,
)
.unwrap()
}
pub fn internal_mps_convolution_transpose_out(
&self,
out: &Tensor,
weight: &Tensor,
padding: impl IntList,
output_padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_internal_mps_convolution_transpose_out(
out,
weight,
padding,
output_padding,
stride,
dilation,
groups,
)
.unwrap()
}
pub fn internal_native_batch_norm_legit<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: &Tensor,
running_var: &Tensor,
training: bool,
momentum: f64,
eps: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_internal_native_batch_norm_legit(
weight,
bias,
running_mean,
running_var,
training,
momentum,
eps,
)
.unwrap()
}
pub fn internal_native_batch_norm_legit_functional<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: &Tensor,
running_var: &Tensor,
training: bool,
momentum: f64,
eps: f64,
) -> (Tensor, Tensor, Tensor, Tensor, Tensor) {
self.f_internal_native_batch_norm_legit_functional(
weight,
bias,
running_mean,
running_var,
training,
momentum,
eps,
)
.unwrap()
}
pub fn internal_native_batch_norm_legit_no_stats<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
training: bool,
momentum: f64,
eps: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_internal_native_batch_norm_legit_no_stats(weight, bias, training, momentum, eps)
.unwrap()
}
pub fn internal_native_batch_norm_legit_no_stats_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
save_mean: &Tensor,
save_invstd: &Tensor,
weight: Option<T>,
bias: Option<T>,
training: bool,
momentum: f64,
eps: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_internal_native_batch_norm_legit_no_stats_out(
out,
save_mean,
save_invstd,
weight,
bias,
training,
momentum,
eps,
)
.unwrap()
}
pub fn internal_native_batch_norm_legit_no_training<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: &Tensor,
running_var: &Tensor,
momentum: f64,
eps: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_internal_native_batch_norm_legit_no_training(
weight,
bias,
running_mean,
running_var,
momentum,
eps,
)
.unwrap()
}
pub fn internal_native_batch_norm_legit_no_training_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
weight: Option<T>,
bias: Option<T>,
running_mean: &Tensor,
running_var: &Tensor,
momentum: f64,
eps: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_internal_native_batch_norm_legit_no_training_out(
out0,
out1,
out2,
weight,
bias,
running_mean,
running_var,
momentum,
eps,
)
.unwrap()
}
pub fn internal_native_batch_norm_legit_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
save_mean: &Tensor,
save_invstd: &Tensor,
weight: Option<T>,
bias: Option<T>,
running_mean: &Tensor,
running_var: &Tensor,
training: bool,
momentum: f64,
eps: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_internal_native_batch_norm_legit_out(
out,
save_mean,
save_invstd,
weight,
bias,
running_mean,
running_var,
training,
momentum,
eps,
)
.unwrap()
}
pub fn internal_native_multi_head_attention<T: Borrow<Tensor>>(
query: &Tensor,
key: &Tensor,
value: &Tensor,
embed_dim: i64,
num_head: i64,
qkv_weight: &Tensor,
qkv_bias: &Tensor,
proj_weight: &Tensor,
proj_bias: &Tensor,
mask: Option<T>,
need_weights: bool,
average_attn_weights: bool,
mask_type: impl Into<Option<i64>>,
) -> (Tensor, Tensor) {
Tensor::f_internal_native_multi_head_attention(
query,
key,
value,
embed_dim,
num_head,
qkv_weight,
qkv_bias,
proj_weight,
proj_bias,
mask,
need_weights,
average_attn_weights,
mask_type,
)
.unwrap()
}
pub fn internal_native_multi_head_attention_out<T: Borrow<Tensor>>(
out0: &Tensor,
out1: &Tensor,
query: &Tensor,
key: &Tensor,
value: &Tensor,
embed_dim: i64,
num_head: i64,
qkv_weight: &Tensor,
qkv_bias: &Tensor,
proj_weight: &Tensor,
proj_bias: &Tensor,
mask: Option<T>,
need_weights: bool,
average_attn_weights: bool,
mask_type: impl Into<Option<i64>>,
) -> (Tensor, Tensor) {
Tensor::f_internal_native_multi_head_attention_out(
out0,
out1,
query,
key,
value,
embed_dim,
num_head,
qkv_weight,
qkv_bias,
proj_weight,
proj_bias,
mask,
need_weights,
average_attn_weights,
mask_type,
)
.unwrap()
}
pub fn internal_neg_view(&self) -> Tensor {
self.f_internal_neg_view().unwrap()
}
pub fn internal_neg_view_copy(&self) -> Tensor {
self.f_internal_neg_view_copy().unwrap()
}
pub fn internal_neg_view_copy_out(&self, out: &Tensor) -> Tensor {
self.f_internal_neg_view_copy_out(out).unwrap()
}
pub fn internal_nested_from_padded(
padded: &Tensor,
cpu_nested_shape_example: &Tensor,
fuse_transform_0213: bool,
) -> Tensor {
Tensor::f_internal_nested_from_padded(padded, cpu_nested_shape_example, fuse_transform_0213)
.unwrap()
}
pub fn internal_nested_from_padded_and_nested_example(
padded: &Tensor,
nt_example: &Tensor,
) -> Tensor {
Tensor::f_internal_nested_from_padded_and_nested_example(padded, nt_example).unwrap()
}
pub fn internal_nested_from_padded_and_nested_example_out(
out: &Tensor,
padded: &Tensor,
nt_example: &Tensor,
) -> Tensor {
Tensor::f_internal_nested_from_padded_and_nested_example_out(out, padded, nt_example)
.unwrap()
}
pub fn internal_nested_from_padded_out(
out: &Tensor,
padded: &Tensor,
cpu_nested_shape_example: &Tensor,
fuse_transform_0213: bool,
) -> Tensor {
Tensor::f_internal_nested_from_padded_out(
out,
padded,
cpu_nested_shape_example,
fuse_transform_0213,
)
.unwrap()
}
pub fn internal_nested_select_backward(
&self,
grad_output: &Tensor,
dim: i64,
index: i64,
) -> Tensor {
self.f_internal_nested_select_backward(grad_output, dim, index).unwrap()
}
pub fn internal_nested_sum_backward(
&self,
grad: &Tensor,
dim: impl IntListOption,
keepdim: bool,
) -> Tensor {
self.f_internal_nested_sum_backward(grad, dim, keepdim).unwrap()
}
pub fn internal_nested_view_from_buffer(
&self,
nested_size: &Tensor,
nested_strides: &Tensor,
offsets: &Tensor,
) -> Tensor {
self.f_internal_nested_view_from_buffer(nested_size, nested_strides, offsets).unwrap()
}
pub fn internal_nested_view_from_buffer_copy(
&self,
nested_size: &Tensor,
nested_strides: &Tensor,
offsets: &Tensor,
) -> Tensor {
self.f_internal_nested_view_from_buffer_copy(nested_size, nested_strides, offsets).unwrap()
}
pub fn internal_nested_view_from_buffer_copy_out(
&self,
out: &Tensor,
nested_size: &Tensor,
nested_strides: &Tensor,
offsets: &Tensor,
) -> Tensor {
self.f_internal_nested_view_from_buffer_copy_out(out, nested_size, nested_strides, offsets)
.unwrap()
}
pub fn internal_new_zeros_with_same_feature_meta(
&self,
other: &Tensor,
self_num_batch_dims: i64,
) -> Tensor {
self.f_internal_new_zeros_with_same_feature_meta(other, self_num_batch_dims).unwrap()
}
pub fn internal_new_zeros_with_same_feature_meta_out(
&self,
out: &Tensor,
other: &Tensor,
self_num_batch_dims: i64,
) -> Tensor {
self.f_internal_new_zeros_with_same_feature_meta_out(out, other, self_num_batch_dims)
.unwrap()
}
pub fn internal_nnpack_available() -> bool {
Tensor::f_internal_nnpack_available().unwrap()
}
pub fn internal_nnpack_spatial_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
) -> Tensor {
self.f_internal_nnpack_spatial_convolution(weight, bias, padding, stride).unwrap()
}
pub fn internal_nnpack_spatial_convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
) -> Tensor {
self.f_internal_nnpack_spatial_convolution_out(out, weight, bias, padding, stride).unwrap()
}
pub fn internal_nnz(&self) -> i64 {
self.f_internal_nnz().unwrap()
}
pub fn internal_pack_padded_sequence(
&self,
lengths: &Tensor,
batch_first: bool,
) -> (Tensor, Tensor) {
self.f_internal_pack_padded_sequence(lengths, batch_first).unwrap()
}
pub fn internal_pack_padded_sequence_backward(
grad: &Tensor,
input_size: impl IntList,
batch_sizes: &Tensor,
batch_first: bool,
) -> Tensor {
Tensor::f_internal_pack_padded_sequence_backward(grad, input_size, batch_sizes, batch_first)
.unwrap()
}
pub fn internal_pack_padded_sequence_out(
&self,
out0: &Tensor,
out1: &Tensor,
lengths: &Tensor,
batch_first: bool,
) -> (Tensor, Tensor) {
self.f_internal_pack_padded_sequence_out(out0, out1, lengths, batch_first).unwrap()
}
pub fn internal_pad_circular(&self, pad: impl IntList) -> Tensor {
self.f_internal_pad_circular(pad).unwrap()
}
pub fn internal_pad_enum(
&self,
pad: impl IntList,
mode: i64,
value: impl Into<Option<f64>>,
) -> Tensor {
self.f_internal_pad_enum(pad, mode, value).unwrap()
}
pub fn internal_pad_packed_sequence<S: Into<Scalar>>(
data: &Tensor,
batch_sizes: &Tensor,
batch_first: bool,
padding_value: S,
total_length: i64,
) -> (Tensor, Tensor) {
Tensor::f_internal_pad_packed_sequence(
data,
batch_sizes,
batch_first,
padding_value,
total_length,
)
.unwrap()
}
pub fn internal_pdist_backward(&self, grad: &Tensor, p: f64, pdist: &Tensor) -> Tensor {
self.f_internal_pdist_backward(grad, p, pdist).unwrap()
}
pub fn internal_pdist_backward_out(
&self,
out: &Tensor,
grad: &Tensor,
p: f64,
pdist: &Tensor,
) -> Tensor {
self.f_internal_pdist_backward_out(out, grad, p, pdist).unwrap()
}
pub fn internal_pin_memory(&self, device: Device) -> Tensor {
self.f_internal_pin_memory(device).unwrap()
}
pub fn internal_pin_memory_out(&self, out: &Tensor, device: Device) -> Tensor {
self.f_internal_pin_memory_out(out, device).unwrap()
}
pub fn internal_prelu_kernel(&self, weight: &Tensor) -> Tensor {
self.f_internal_prelu_kernel(weight).unwrap()
}
pub fn internal_prelu_kernel_backward(
&self,
grad_output: &Tensor,
weight: &Tensor,
) -> (Tensor, Tensor) {
self.f_internal_prelu_kernel_backward(grad_output, weight).unwrap()
}
pub fn internal_propagate_xla_data(&self, output: &Tensor) {
self.f_internal_propagate_xla_data(output).unwrap()
}
pub fn internal_remove_batch_dim(&self, level: i64, batch_size: i64, out_dim: i64) -> Tensor {
self.f_internal_remove_batch_dim(level, batch_size, out_dim).unwrap()
}
pub fn internal_reshape_alias(&self, size: impl IntList, stride: impl IntList) -> Tensor {
self.f_internal_reshape_alias(size, stride).unwrap()
}
pub fn internal_reshape_alias_copy(&self, size: impl IntList, stride: impl IntList) -> Tensor {
self.f_internal_reshape_alias_copy(size, stride).unwrap()
}
pub fn internal_reshape_alias_copy_out(
&self,
out: &Tensor,
size: impl IntList,
stride: impl IntList,
) -> Tensor {
self.f_internal_reshape_alias_copy_out(out, size, stride).unwrap()
}
pub fn internal_reshape_copy(&self, size: impl IntList) -> Tensor {
self.f_internal_reshape_copy(size).unwrap()
}
pub fn internal_reshape_from_tensor(&self, shape: &Tensor) -> Tensor {
self.f_internal_reshape_from_tensor(shape).unwrap()
}
pub fn internal_resize_output(&self, size: impl IntList, device: Device) -> Tensor {
self.f_internal_resize_output(size, device).unwrap()
}
pub fn internal_resize_output_(&mut self, size: impl IntList, device: Device) -> Tensor {
self.f_internal_resize_output_(size, device).unwrap()
}
pub fn internal_resize_output_out(
&self,
out: &Tensor,
size: impl IntList,
device: Device,
) -> Tensor {
self.f_internal_resize_output_out(out, size, device).unwrap()
}
pub fn internal_rowwise_prune(
weight: &Tensor,
mask: &Tensor,
compressed_indices_dtype: Kind,
) -> (Tensor, Tensor) {
Tensor::f_internal_rowwise_prune(weight, mask, compressed_indices_dtype).unwrap()
}
pub fn internal_sample_dirichlet(&self) -> Tensor {
self.f_internal_sample_dirichlet().unwrap()
}
pub fn internal_sample_dirichlet_out(&self, out: &Tensor) -> Tensor {
self.f_internal_sample_dirichlet_out(out).unwrap()
}
pub fn internal_saturate_weight_to_fp16(weight: &Tensor) -> Tensor {
Tensor::f_internal_saturate_weight_to_fp16(weight).unwrap()
}
pub fn internal_scaled_dot_product_attention_math<T: Borrow<Tensor>>(
query: &Tensor,
key: &Tensor,
value: &Tensor,
attn_mask: Option<T>,
dropout_p: f64,
is_causal: bool,
dropout_mask: Option<T>,
scale: impl Into<Option<f64>>,
) -> (Tensor, Tensor) {
Tensor::f_internal_scaled_dot_product_attention_math(
query,
key,
value,
attn_mask,
dropout_p,
is_causal,
dropout_mask,
scale,
)
.unwrap()
}
pub fn internal_scaled_dot_product_efficient_attention<T: Borrow<Tensor>>(
query: &Tensor,
key: &Tensor,
value: &Tensor,
attn_bias: Option<T>,
compute_log_sumexp: bool,
dropout_p: f64,
is_causal: bool,
scale: impl Into<Option<f64>>,
) -> (Tensor, Tensor, Tensor, Tensor) {
Tensor::f_internal_scaled_dot_product_efficient_attention(
query,
key,
value,
attn_bias,
compute_log_sumexp,
dropout_p,
is_causal,
scale,
)
.unwrap()
}
pub fn internal_scaled_dot_product_flash_attention_backward(
grad_out: &Tensor,
query: &Tensor,
key: &Tensor,
value: &Tensor,
out: &Tensor,
logsumexp: &Tensor,
cum_seq_q: &Tensor,
cum_seq_k: &Tensor,
max_q: i64,
max_k: i64,
dropout_p: f64,
is_causal: bool,
philox_seed: &Tensor,
philox_offset: &Tensor,
scale: impl Into<Option<f64>>,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_internal_scaled_dot_product_flash_attention_backward(
grad_out,
query,
key,
value,
out,
logsumexp,
cum_seq_q,
cum_seq_k,
max_q,
max_k,
dropout_p,
is_causal,
philox_seed,
philox_offset,
scale,
)
.unwrap()
}
pub fn internal_scaled_mm<T: Borrow<Tensor>>(
&self,
mat2: &Tensor,
bias: Option<T>,
out_dtype: impl Into<Option<Kind>>,
scale_a: Option<T>,
scale_b: Option<T>,
scale_result: Option<T>,
) -> (Tensor, Tensor) {
self.f_internal_scaled_mm(mat2, bias, out_dtype, scale_a, scale_b, scale_result).unwrap()
}
pub fn internal_scaled_mm_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
out_amax: &Tensor,
mat2: &Tensor,
bias: Option<T>,
out_dtype: impl Into<Option<Kind>>,
scale_a: Option<T>,
scale_b: Option<T>,
scale_result: Option<T>,
) -> (Tensor, Tensor) {
self.f_internal_scaled_mm_out(
out,
out_amax,
mat2,
bias,
out_dtype,
scale_a,
scale_b,
scale_result,
)
.unwrap()
}
pub fn internal_scatter_reduce(
&self,
dim: i64,
index: &Tensor,
src: &Tensor,
reduce: &str,
include_self: bool,
) -> Tensor {
self.f_internal_scatter_reduce(dim, index, src, reduce, include_self).unwrap()
}
pub fn internal_scatter_reduce_(
&mut self,
dim: i64,
index: &Tensor,
src: &Tensor,
reduce: &str,
include_self: bool,
) -> Tensor {
self.f_internal_scatter_reduce_(dim, index, src, reduce, include_self).unwrap()
}
pub fn internal_scatter_reduce_two_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
src: &Tensor,
reduce: &str,
include_self: bool,
) -> Tensor {
self.f_internal_scatter_reduce_two_out(out, dim, index, src, reduce, include_self).unwrap()
}
pub fn internal_segment_reduce_backward<T: Borrow<Tensor>, S: Into<Scalar>>(
grad: &Tensor,
output: &Tensor,
data: &Tensor,
reduce: &str,
lengths: Option<T>,
offsets: Option<T>,
axis: i64,
initial: S,
) -> Tensor {
Tensor::f_internal_segment_reduce_backward(
grad, output, data, reduce, lengths, offsets, axis, initial,
)
.unwrap()
}
pub fn internal_segment_reduce_backward_out<T: Borrow<Tensor>, S: Into<Scalar>>(
out: &Tensor,
grad: &Tensor,
output: &Tensor,
data: &Tensor,
reduce: &str,
lengths: Option<T>,
offsets: Option<T>,
axis: i64,
initial: S,
) -> Tensor {
Tensor::f_internal_segment_reduce_backward_out(
out, grad, output, data, reduce, lengths, offsets, axis, initial,
)
.unwrap()
}
pub fn internal_shape_as_tensor(&self) -> Tensor {
self.f_internal_shape_as_tensor().unwrap()
}
pub fn internal_slow_conv2d_backward(
&self,
grad_input: &Tensor,
grad_weight: &Tensor,
grad_bias: &Tensor,
grad_output: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
) -> (Tensor, Tensor, Tensor) {
self.f_internal_slow_conv2d_backward(
grad_input,
grad_weight,
grad_bias,
grad_output,
weight,
kernel_size,
stride,
padding,
)
.unwrap()
}
pub fn internal_sobol_engine_draw(
quasi: &Tensor,
n: i64,
sobolstate: &Tensor,
dimension: i64,
num_generated: i64,
dtype: impl Into<Option<Kind>>,
) -> (Tensor, Tensor) {
Tensor::f_internal_sobol_engine_draw(quasi, n, sobolstate, dimension, num_generated, dtype)
.unwrap()
}
pub fn internal_sobol_engine_ff_(
&mut self,
n: i64,
sobolstate: &Tensor,
dimension: i64,
num_generated: i64,
) -> Tensor {
self.f_internal_sobol_engine_ff_(n, sobolstate, dimension, num_generated).unwrap()
}
pub fn internal_sobol_engine_initialize_state_(&mut self, dimension: i64) -> Tensor {
self.f_internal_sobol_engine_initialize_state_(dimension).unwrap()
}
pub fn internal_sobol_engine_scramble_(&mut self, ltm: &Tensor, dimension: i64) -> Tensor {
self.f_internal_sobol_engine_scramble_(ltm, dimension).unwrap()
}
pub fn internal_softmax(&self, dim: i64, half_to_float: bool) -> Tensor {
self.f_internal_softmax(dim, half_to_float).unwrap()
}
pub fn internal_softmax_backward_data(
grad_output: &Tensor,
output: &Tensor,
dim: i64,
input_dtype: Kind,
) -> Tensor {
Tensor::f_internal_softmax_backward_data(grad_output, output, dim, input_dtype).unwrap()
}
pub fn internal_softmax_backward_data_out(
grad_input: &Tensor,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
input_dtype: Kind,
) -> Tensor {
Tensor::f_internal_softmax_backward_data_out(
grad_input,
grad_output,
output,
dim,
input_dtype,
)
.unwrap()
}
pub fn internal_softmax_out(&self, out: &Tensor, dim: i64, half_to_float: bool) -> Tensor {
self.f_internal_softmax_out(out, dim, half_to_float).unwrap()
}
pub fn internal_sparse_addmm(&self, mat1: &Tensor, mat2: &Tensor) -> Tensor {
self.f_internal_sparse_addmm(mat1, mat2).unwrap()
}
pub fn internal_sparse_addmm_out(&self, out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Tensor {
self.f_internal_sparse_addmm_out(out, mat1, mat2).unwrap()
}
pub fn internal_sparse_broadcast_to(&self, size: impl IntList) -> Tensor {
self.f_internal_sparse_broadcast_to(size).unwrap()
}
pub fn internal_sparse_broadcast_to_copy(&self, size: impl IntList) -> Tensor {
self.f_internal_sparse_broadcast_to_copy(size).unwrap()
}
pub fn internal_sparse_broadcast_to_copy_out(
&self,
out: &Tensor,
size: impl IntList,
) -> Tensor {
self.f_internal_sparse_broadcast_to_copy_out(out, size).unwrap()
}
pub fn internal_sparse_bsc_tensor_unsafe(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Tensor {
Tensor::f_internal_sparse_bsc_tensor_unsafe(
ccol_indices,
row_indices,
values,
size,
options,
)
.unwrap()
}
pub fn internal_sparse_bsr_tensor_unsafe(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Tensor {
Tensor::f_internal_sparse_bsr_tensor_unsafe(
crow_indices,
col_indices,
values,
size,
options,
)
.unwrap()
}
pub fn internal_sparse_compressed_tensor_unsafe(
compressed_indices: &Tensor,
plain_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Tensor {
Tensor::f_internal_sparse_compressed_tensor_unsafe(
compressed_indices,
plain_indices,
values,
size,
options,
)
.unwrap()
}
pub fn internal_sparse_coo_tensor_unsafe(
indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
is_coalesced: bool,
) -> Tensor {
Tensor::f_internal_sparse_coo_tensor_unsafe(indices, values, size, options, is_coalesced)
.unwrap()
}
pub fn internal_sparse_coo_tensor_with_dims(
sparse_dim: i64,
dense_dim: i64,
size: impl IntList,
options: (Kind, Device),
) -> Tensor {
Tensor::f_internal_sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, options)
.unwrap()
}
pub fn internal_sparse_coo_tensor_with_dims_and_tensors(
sparse_dim: i64,
dense_dim: i64,
size: impl IntList,
indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
is_coalesced: bool,
) -> Tensor {
Tensor::f_internal_sparse_coo_tensor_with_dims_and_tensors(
sparse_dim,
dense_dim,
size,
indices,
values,
options,
is_coalesced,
)
.unwrap()
}
pub fn internal_sparse_coo_tensor_with_dims_and_tensors_out(
out: &Tensor,
sparse_dim: i64,
dense_dim: i64,
size: impl IntList,
indices: &Tensor,
values: &Tensor,
is_coalesced: bool,
) -> Tensor {
Tensor::f_internal_sparse_coo_tensor_with_dims_and_tensors_out(
out,
sparse_dim,
dense_dim,
size,
indices,
values,
is_coalesced,
)
.unwrap()
}
pub fn internal_sparse_coo_tensor_with_dims_out(
out: &Tensor,
sparse_dim: i64,
dense_dim: i64,
size: impl IntList,
) -> Tensor {
Tensor::f_internal_sparse_coo_tensor_with_dims_out(out, sparse_dim, dense_dim, size)
.unwrap()
}
pub fn internal_sparse_csc_tensor_unsafe(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Tensor {
Tensor::f_internal_sparse_csc_tensor_unsafe(
ccol_indices,
row_indices,
values,
size,
options,
)
.unwrap()
}
pub fn internal_sparse_csr_prod(
&self,
dim: impl IntList,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_internal_sparse_csr_prod(dim, keepdim, dtype).unwrap()
}
pub fn internal_sparse_csr_prod_dim_dtype_out(
&self,
out: &Tensor,
dim: impl IntList,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_internal_sparse_csr_prod_dim_dtype_out(out, dim, keepdim, dtype).unwrap()
}
pub fn internal_sparse_csr_sum(
&self,
dim: impl IntList,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_internal_sparse_csr_sum(dim, keepdim, dtype).unwrap()
}
pub fn internal_sparse_csr_sum_dim_dtype_out(
&self,
out: &Tensor,
dim: impl IntList,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_internal_sparse_csr_sum_dim_dtype_out(out, dim, keepdim, dtype).unwrap()
}
pub fn internal_sparse_csr_tensor_unsafe(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Tensor {
Tensor::f_internal_sparse_csr_tensor_unsafe(
crow_indices,
col_indices,
values,
size,
options,
)
.unwrap()
}
pub fn internal_sparse_log_softmax(&self, dim: i64, half_to_float: bool) -> Tensor {
self.f_internal_sparse_log_softmax(dim, half_to_float).unwrap()
}
pub fn internal_sparse_log_softmax_backward_data(
&self,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
) -> Tensor {
self.f_internal_sparse_log_softmax_backward_data(grad_output, output, dim).unwrap()
}
pub fn internal_sparse_log_softmax_backward_data_out(
&self,
out: &Tensor,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
) -> Tensor {
self.f_internal_sparse_log_softmax_backward_data_out(out, grad_output, output, dim).unwrap()
}
pub fn internal_sparse_log_softmax_int(
&self,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_internal_sparse_log_softmax_int(dim, dtype).unwrap()
}
pub fn internal_sparse_log_softmax_out(
&self,
out: &Tensor,
dim: i64,
half_to_float: bool,
) -> Tensor {
self.f_internal_sparse_log_softmax_out(out, dim, half_to_float).unwrap()
}
pub fn internal_sparse_mask_projection(
&self,
mask: &Tensor,
accumulate_matches: bool,
) -> Tensor {
self.f_internal_sparse_mask_projection(mask, accumulate_matches).unwrap()
}
pub fn internal_sparse_mask_projection_out(
&self,
out: &Tensor,
mask: &Tensor,
accumulate_matches: bool,
) -> Tensor {
self.f_internal_sparse_mask_projection_out(out, mask, accumulate_matches).unwrap()
}
pub fn internal_sparse_mm(sparse: &Tensor, dense: &Tensor) -> Tensor {
Tensor::f_internal_sparse_mm(sparse, dense).unwrap()
}
pub fn internal_sparse_mm_reduce(sparse: &Tensor, dense: &Tensor, reduce: &str) -> Tensor {
Tensor::f_internal_sparse_mm_reduce(sparse, dense, reduce).unwrap()
}
pub fn internal_sparse_mm_reduce_impl(&self, other: &Tensor, reduce: &str) -> (Tensor, Tensor) {
self.f_internal_sparse_mm_reduce_impl(other, reduce).unwrap()
}
pub fn internal_sparse_semi_structured_linear<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
meta: &Tensor,
bias: Option<T>,
activation: &str,
) -> Tensor {
self.f_internal_sparse_semi_structured_linear(weight, meta, bias, activation).unwrap()
}
pub fn internal_sparse_softmax(&self, dim: i64, half_to_float: bool) -> Tensor {
self.f_internal_sparse_softmax(dim, half_to_float).unwrap()
}
pub fn internal_sparse_softmax_backward_data(
&self,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
) -> Tensor {
self.f_internal_sparse_softmax_backward_data(grad_output, output, dim).unwrap()
}
pub fn internal_sparse_softmax_backward_data_out(
&self,
out: &Tensor,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
) -> Tensor {
self.f_internal_sparse_softmax_backward_data_out(out, grad_output, output, dim).unwrap()
}
pub fn internal_sparse_softmax_int(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_internal_sparse_softmax_int(dim, dtype).unwrap()
}
pub fn internal_sparse_softmax_out(
&self,
out: &Tensor,
dim: i64,
half_to_float: bool,
) -> Tensor {
self.f_internal_sparse_softmax_out(out, dim, half_to_float).unwrap()
}
pub fn internal_sparse_sparse_matmul(&self, other: &Tensor) -> Tensor {
self.f_internal_sparse_sparse_matmul(other).unwrap()
}
pub fn internal_sparse_sparse_matmul_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_internal_sparse_sparse_matmul_out(out, other).unwrap()
}
pub fn internal_sparse_sum(&self) -> Tensor {
self.f_internal_sparse_sum().unwrap()
}
pub fn internal_sparse_sum_backward(&self, grad: &Tensor, dim: impl IntList) -> Tensor {
self.f_internal_sparse_sum_backward(grad, dim).unwrap()
}
pub fn internal_sparse_sum_backward_out(
&self,
out: &Tensor,
grad: &Tensor,
dim: impl IntList,
) -> Tensor {
self.f_internal_sparse_sum_backward_out(out, grad, dim).unwrap()
}
pub fn internal_sparse_sum_dim(&self, dim: impl IntList) -> Tensor {
self.f_internal_sparse_sum_dim(dim).unwrap()
}
pub fn internal_sparse_sum_dim_dtype(&self, dim: impl IntList, dtype: Kind) -> Tensor {
self.f_internal_sparse_sum_dim_dtype(dim, dtype).unwrap()
}
pub fn internal_sparse_sum_dim_out(&self, out: &Tensor, dim: impl IntList) -> Tensor {
self.f_internal_sparse_sum_dim_out(out, dim).unwrap()
}
pub fn internal_sparse_sum_dtype(&self, dtype: Kind) -> Tensor {
self.f_internal_sparse_sum_dtype(dtype).unwrap()
}
pub fn internal_spdiags(
diagonals: &Tensor,
offsets: &Tensor,
shape: impl IntList,
layout: Option<Layout>,
) -> Tensor {
Tensor::f_internal_spdiags(diagonals, offsets, shape, layout).unwrap()
}
pub fn internal_spdiags_out(
out: &Tensor,
diagonals: &Tensor,
offsets: &Tensor,
shape: impl IntList,
layout: Option<Layout>,
) -> Tensor {
Tensor::f_internal_spdiags_out(out, diagonals, offsets, shape, layout).unwrap()
}
pub fn internal_stack<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Tensor {
Tensor::f_internal_stack(tensors, dim).unwrap()
}
pub fn internal_stack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T], dim: i64) -> Tensor {
Tensor::f_internal_stack_out(out, tensors, dim).unwrap()
}
pub fn internal_standard_gamma(&self) -> Tensor {
self.f_internal_standard_gamma().unwrap()
}
pub fn internal_standard_gamma_grad(&self, output: &Tensor) -> Tensor {
self.f_internal_standard_gamma_grad(output).unwrap()
}
pub fn internal_standard_gamma_grad_out(&self, out: &Tensor, output: &Tensor) -> Tensor {
self.f_internal_standard_gamma_grad_out(out, output).unwrap()
}
pub fn internal_standard_gamma_out(&self, out: &Tensor) -> Tensor {
self.f_internal_standard_gamma_out(out).unwrap()
}
pub fn internal_test_ambiguous_defaults(dummy: &Tensor, a: i64, b: i64) -> Tensor {
Tensor::f_internal_test_ambiguous_defaults(dummy, a, b).unwrap()
}
pub fn internal_test_ambiguous_defaults_b(dummy: &Tensor, a: i64, b: &str) -> Tensor {
Tensor::f_internal_test_ambiguous_defaults_b(dummy, a, b).unwrap()
}
pub fn internal_test_autograd_multiple_dispatch(&self) -> Tensor {
self.f_internal_test_autograd_multiple_dispatch().unwrap()
}
pub fn internal_test_autograd_multiple_dispatch_fullcoverage_out(
&self,
out: &Tensor,
) -> Tensor {
self.f_internal_test_autograd_multiple_dispatch_fullcoverage_out(out).unwrap()
}
pub fn internal_test_autograd_multiple_dispatch_ntonly(&self, b: bool) -> Tensor {
self.f_internal_test_autograd_multiple_dispatch_ntonly(b).unwrap()
}
pub fn internal_test_autograd_multiple_dispatch_view(&self) -> Tensor {
self.f_internal_test_autograd_multiple_dispatch_view().unwrap()
}
pub fn internal_test_autograd_multiple_dispatch_view_copy(&self) -> Tensor {
self.f_internal_test_autograd_multiple_dispatch_view_copy().unwrap()
}
pub fn internal_test_autograd_multiple_dispatch_view_copy_out(&self, out: &Tensor) -> Tensor {
self.f_internal_test_autograd_multiple_dispatch_view_copy_out(out).unwrap()
}
pub fn internal_test_check_tensor(&self) -> Tensor {
self.f_internal_test_check_tensor().unwrap()
}
pub fn internal_test_functorch_fallback(&self, other: &Tensor) -> Tensor {
self.f_internal_test_functorch_fallback(other).unwrap()
}
pub fn internal_test_functorch_fallback_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_internal_test_functorch_fallback_out(out, other).unwrap()
}
pub fn internal_test_optional_filled_intlist(
values: &Tensor,
addends: impl IntListOption,
) -> Tensor {
Tensor::f_internal_test_optional_filled_intlist(values, addends).unwrap()
}
pub fn internal_test_optional_filled_intlist_out(
out: &Tensor,
values: &Tensor,
addends: impl IntListOption,
) -> Tensor {
Tensor::f_internal_test_optional_filled_intlist_out(out, values, addends).unwrap()
}
pub fn internal_test_optional_floatlist(values: &Tensor, addends: impl DoubleList) -> Tensor {
Tensor::f_internal_test_optional_floatlist(values, addends).unwrap()
}
pub fn internal_test_optional_floatlist_out(
out: &Tensor,
values: &Tensor,
addends: impl DoubleList,
) -> Tensor {
Tensor::f_internal_test_optional_floatlist_out(out, values, addends).unwrap()
}
pub fn internal_test_optional_intlist(values: &Tensor, addends: impl IntListOption) -> Tensor {
Tensor::f_internal_test_optional_intlist(values, addends).unwrap()
}
pub fn internal_test_optional_intlist_out(
out: &Tensor,
values: &Tensor,
addends: impl IntListOption,
) -> Tensor {
Tensor::f_internal_test_optional_intlist_out(out, values, addends).unwrap()
}
pub fn internal_test_serialization_subcmul(&self, other: &Tensor) -> Tensor {
self.f_internal_test_serialization_subcmul(other).unwrap()
}
pub fn internal_test_string_default(dummy: &Tensor, a: &str, b: &str) -> Tensor {
Tensor::f_internal_test_string_default(dummy, a, b).unwrap()
}
pub fn internal_test_warn_in_autograd(&self) -> Tensor {
self.f_internal_test_warn_in_autograd().unwrap()
}
pub fn internal_test_warn_in_autograd_out(&self, out: &Tensor) -> Tensor {
self.f_internal_test_warn_in_autograd_out(out).unwrap()
}
pub fn internal_to_copy(&self, options: (Kind, Device), non_blocking: bool) -> Tensor {
self.f_internal_to_copy(options, non_blocking).unwrap()
}
pub fn internal_to_copy_out(&self, out: &Tensor, non_blocking: bool) -> Tensor {
self.f_internal_to_copy_out(out, non_blocking).unwrap()
}
pub fn internal_to_cpu<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
Tensor::f_internal_to_cpu(tensors).unwrap()
}
pub fn internal_to_dense(&self, dtype: impl Into<Option<Kind>>, masked_grad: bool) -> Tensor {
self.f_internal_to_dense(dtype, masked_grad).unwrap()
}
pub fn internal_to_dense_out(
&self,
out: &Tensor,
dtype: impl Into<Option<Kind>>,
masked_grad: bool,
) -> Tensor {
self.f_internal_to_dense_out(out, dtype, masked_grad).unwrap()
}
pub fn internal_to_sparse(
&self,
layout: Option<Layout>,
blocksize: impl IntListOption,
dense_dim: impl Into<Option<i64>>,
) -> Tensor {
self.f_internal_to_sparse(layout, blocksize, dense_dim).unwrap()
}
pub fn internal_to_sparse_bsc(
&self,
blocksize: impl IntList,
dense_dim: impl Into<Option<i64>>,
) -> Tensor {
self.f_internal_to_sparse_bsc(blocksize, dense_dim).unwrap()
}
pub fn internal_to_sparse_bsc_out(
&self,
out: &Tensor,
blocksize: impl IntList,
dense_dim: impl Into<Option<i64>>,
) -> Tensor {
self.f_internal_to_sparse_bsc_out(out, blocksize, dense_dim).unwrap()
}
pub fn internal_to_sparse_bsr(
&self,
blocksize: impl IntList,
dense_dim: impl Into<Option<i64>>,
) -> Tensor {
self.f_internal_to_sparse_bsr(blocksize, dense_dim).unwrap()
}
pub fn internal_to_sparse_bsr_out(
&self,
out: &Tensor,
blocksize: impl IntList,
dense_dim: impl Into<Option<i64>>,
) -> Tensor {
self.f_internal_to_sparse_bsr_out(out, blocksize, dense_dim).unwrap()
}
pub fn internal_to_sparse_csc(&self, dense_dim: impl Into<Option<i64>>) -> Tensor {
self.f_internal_to_sparse_csc(dense_dim).unwrap()
}
pub fn internal_to_sparse_csc_out(
&self,
out: &Tensor,
dense_dim: impl Into<Option<i64>>,
) -> Tensor {
self.f_internal_to_sparse_csc_out(out, dense_dim).unwrap()
}
pub fn internal_to_sparse_csr(&self, dense_dim: impl Into<Option<i64>>) -> Tensor {
self.f_internal_to_sparse_csr(dense_dim).unwrap()
}
pub fn internal_to_sparse_csr_out(
&self,
out: &Tensor,
dense_dim: impl Into<Option<i64>>,
) -> Tensor {
self.f_internal_to_sparse_csr_out(out, dense_dim).unwrap()
}
pub fn internal_to_sparse_out(
&self,
out: &Tensor,
layout: Option<Layout>,
blocksize: impl IntListOption,
dense_dim: impl Into<Option<i64>>,
) -> Tensor {
self.f_internal_to_sparse_out(out, layout, blocksize, dense_dim).unwrap()
}
pub fn internal_to_sparse_semi_structured(dense: &Tensor) -> (Tensor, Tensor) {
Tensor::f_internal_to_sparse_semi_structured(dense).unwrap()
}
pub fn internal_to_sparse_sparse_dim(&self, sparse_dim: i64) -> Tensor {
self.f_internal_to_sparse_sparse_dim(sparse_dim).unwrap()
}
pub fn internal_to_sparse_sparse_dim_out(&self, out: &Tensor, sparse_dim: i64) -> Tensor {
self.f_internal_to_sparse_sparse_dim_out(out, sparse_dim).unwrap()
}
pub fn internal_transform_bias_rescale_qkv(
qkv: &Tensor,
qkv_bias: &Tensor,
num_heads: i64,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_internal_transform_bias_rescale_qkv(qkv, qkv_bias, num_heads).unwrap()
}
pub fn internal_transform_bias_rescale_qkv_out(
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
qkv: &Tensor,
qkv_bias: &Tensor,
num_heads: i64,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_internal_transform_bias_rescale_qkv_out(
out0, out1, out2, qkv, qkv_bias, num_heads,
)
.unwrap()
}
pub fn internal_transformer_encoder_layer_fwd<T: Borrow<Tensor>>(
src: &Tensor,
embed_dim: i64,
num_heads: i64,
qkv_weight: &Tensor,
qkv_bias: &Tensor,
proj_weight: &Tensor,
proj_bias: &Tensor,
use_gelu: bool,
norm_first: bool,
eps: f64,
norm_weight_1: &Tensor,
norm_bias_1: &Tensor,
norm_weight_2: &Tensor,
norm_bias_2: &Tensor,
ffn_weight_1: &Tensor,
ffn_bias_1: &Tensor,
ffn_weight_2: &Tensor,
ffn_bias_2: &Tensor,
mask: Option<T>,
mask_type: impl Into<Option<i64>>,
) -> Tensor {
Tensor::f_internal_transformer_encoder_layer_fwd(
src,
embed_dim,
num_heads,
qkv_weight,
qkv_bias,
proj_weight,
proj_bias,
use_gelu,
norm_first,
eps,
norm_weight_1,
norm_bias_1,
norm_weight_2,
norm_bias_2,
ffn_weight_1,
ffn_bias_1,
ffn_weight_2,
ffn_bias_2,
mask,
mask_type,
)
.unwrap()
}
pub fn internal_transformer_encoder_layer_fwd_out<T: Borrow<Tensor>>(
out: &Tensor,
src: &Tensor,
embed_dim: i64,
num_heads: i64,
qkv_weight: &Tensor,
qkv_bias: &Tensor,
proj_weight: &Tensor,
proj_bias: &Tensor,
use_gelu: bool,
norm_first: bool,
eps: f64,
norm_weight_1: &Tensor,
norm_bias_1: &Tensor,
norm_weight_2: &Tensor,
norm_bias_2: &Tensor,
ffn_weight_1: &Tensor,
ffn_bias_1: &Tensor,
ffn_weight_2: &Tensor,
ffn_bias_2: &Tensor,
mask: Option<T>,
mask_type: impl Into<Option<i64>>,
) -> Tensor {
Tensor::f_internal_transformer_encoder_layer_fwd_out(
out,
src,
embed_dim,
num_heads,
qkv_weight,
qkv_bias,
proj_weight,
proj_bias,
use_gelu,
norm_first,
eps,
norm_weight_1,
norm_bias_1,
norm_weight_2,
norm_bias_2,
ffn_weight_1,
ffn_bias_1,
ffn_weight_2,
ffn_bias_2,
mask,
mask_type,
)
.unwrap()
}
pub fn internal_trilinear(
i1: &Tensor,
i2: &Tensor,
i3: &Tensor,
expand1: impl IntList,
expand2: impl IntList,
expand3: impl IntList,
sumdim: impl IntList,
unroll_dim: i64,
) -> Tensor {
Tensor::f_internal_trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim)
.unwrap()
}
pub fn internal_trilinear_out(
out: &Tensor,
i1: &Tensor,
i2: &Tensor,
i3: &Tensor,
expand1: impl IntList,
expand2: impl IntList,
expand3: impl IntList,
sumdim: impl IntList,
unroll_dim: i64,
) -> Tensor {
Tensor::f_internal_trilinear_out(
out, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim,
)
.unwrap()
}
pub fn internal_triton_multi_head_attention<T: Borrow<Tensor>>(
query: &Tensor,
key: &Tensor,
value: &Tensor,
embed_dim: i64,
num_head: i64,
qkv_weight: &Tensor,
qkv_bias: &Tensor,
proj_weight: &Tensor,
proj_bias: &Tensor,
mask: Option<T>,
) -> Tensor {
Tensor::f_internal_triton_multi_head_attention(
query,
key,
value,
embed_dim,
num_head,
qkv_weight,
qkv_bias,
proj_weight,
proj_bias,
mask,
)
.unwrap()
}
pub fn internal_triton_multi_head_attention_out<T: Borrow<Tensor>>(
out: &Tensor,
query: &Tensor,
key: &Tensor,
value: &Tensor,
embed_dim: i64,
num_head: i64,
qkv_weight: &Tensor,
qkv_bias: &Tensor,
proj_weight: &Tensor,
proj_bias: &Tensor,
mask: Option<T>,
) -> Tensor {
Tensor::f_internal_triton_multi_head_attention_out(
out,
query,
key,
value,
embed_dim,
num_head,
qkv_weight,
qkv_bias,
proj_weight,
proj_bias,
mask,
)
.unwrap()
}
pub fn internal_triton_scaled_dot_attention(
q: &Tensor,
k: &Tensor,
v: &Tensor,
dropout_p: f64,
) -> Tensor {
Tensor::f_internal_triton_scaled_dot_attention(q, k, v, dropout_p).unwrap()
}
pub fn internal_triton_scaled_dot_attention_out(
out: &Tensor,
q: &Tensor,
k: &Tensor,
v: &Tensor,
dropout_p: f64,
) -> Tensor {
Tensor::f_internal_triton_scaled_dot_attention_out(out, q, k, v, dropout_p).unwrap()
}
pub fn internal_unique(&self, sorted: bool, return_inverse: bool) -> (Tensor, Tensor) {
self.f_internal_unique(sorted, return_inverse).unwrap()
}
pub fn internal_unique2(
&self,
sorted: bool,
return_inverse: bool,
return_counts: bool,
) -> (Tensor, Tensor, Tensor) {
self.f_internal_unique2(sorted, return_inverse, return_counts).unwrap()
}
pub fn internal_unique2_out(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
sorted: bool,
return_inverse: bool,
return_counts: bool,
) -> (Tensor, Tensor, Tensor) {
self.f_internal_unique2_out(out0, out1, out2, sorted, return_inverse, return_counts)
.unwrap()
}
pub fn internal_unique_out(
&self,
out0: &Tensor,
out1: &Tensor,
sorted: bool,
return_inverse: bool,
) -> (Tensor, Tensor) {
self.f_internal_unique_out(out0, out1, sorted, return_inverse).unwrap()
}
pub fn internal_unpack_dual(dual: &Tensor, level: i64) -> (Tensor, Tensor) {
Tensor::f_internal_unpack_dual(dual, level).unwrap()
}
pub fn internal_unsafe_index<T: Borrow<Tensor>>(&self, indices: &[Option<T>]) -> Tensor {
self.f_internal_unsafe_index(indices).unwrap()
}
pub fn internal_unsafe_index_put<T: Borrow<Tensor>>(
&self,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
) -> Tensor {
self.f_internal_unsafe_index_put(indices, values, accumulate).unwrap()
}
pub fn internal_unsafe_view(&self, size: impl IntList) -> Tensor {
self.f_internal_unsafe_view(size).unwrap()
}
pub fn internal_unsafe_view_out(&self, out: &Tensor, size: impl IntList) -> Tensor {
self.f_internal_unsafe_view_out(out, size).unwrap()
}
pub fn internal_upsample_bicubic2d_aa(
&self,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_internal_upsample_bicubic2d_aa(output_size, align_corners, scales_h, scales_w)
.unwrap()
}
pub fn internal_upsample_bicubic2d_aa_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_internal_upsample_bicubic2d_aa_backward(
grad_output,
output_size,
input_size,
align_corners,
scales_h,
scales_w,
)
.unwrap()
}
pub fn internal_upsample_bicubic2d_aa_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_internal_upsample_bicubic2d_aa_backward_grad_input(
grad_input,
grad_output,
output_size,
input_size,
align_corners,
scales_h,
scales_w,
)
.unwrap()
}
pub fn internal_upsample_bicubic2d_aa_out(
&self,
out: &Tensor,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_internal_upsample_bicubic2d_aa_out(
out,
output_size,
align_corners,
scales_h,
scales_w,
)
.unwrap()
}
pub fn internal_upsample_bicubic2d_aa_vec(
&self,
output_size: impl IntListOption,
align_corners: bool,
scale_factors: impl DoubleList,
) -> Tensor {
self.f_internal_upsample_bicubic2d_aa_vec(output_size, align_corners, scale_factors)
.unwrap()
}
pub fn internal_upsample_bilinear2d_aa(
&self,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_internal_upsample_bilinear2d_aa(output_size, align_corners, scales_h, scales_w)
.unwrap()
}
pub fn internal_upsample_bilinear2d_aa_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_internal_upsample_bilinear2d_aa_backward(
grad_output,
output_size,
input_size,
align_corners,
scales_h,
scales_w,
)
.unwrap()
}
pub fn internal_upsample_bilinear2d_aa_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_internal_upsample_bilinear2d_aa_backward_grad_input(
grad_input,
grad_output,
output_size,
input_size,
align_corners,
scales_h,
scales_w,
)
.unwrap()
}
pub fn internal_upsample_bilinear2d_aa_out(
&self,
out: &Tensor,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_internal_upsample_bilinear2d_aa_out(
out,
output_size,
align_corners,
scales_h,
scales_w,
)
.unwrap()
}
pub fn internal_upsample_bilinear2d_aa_vec(
&self,
output_size: impl IntListOption,
align_corners: bool,
scale_factors: impl DoubleList,
) -> Tensor {
self.f_internal_upsample_bilinear2d_aa_vec(output_size, align_corners, scale_factors)
.unwrap()
}
pub fn internal_upsample_nearest_exact1d(
&self,
output_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Tensor {
self.f_internal_upsample_nearest_exact1d(output_size, scales).unwrap()
}
pub fn internal_upsample_nearest_exact1d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_internal_upsample_nearest_exact1d_backward(
grad_output,
output_size,
input_size,
scales,
)
.unwrap()
}
pub fn internal_upsample_nearest_exact1d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_internal_upsample_nearest_exact1d_backward_grad_input(
grad_input,
grad_output,
output_size,
input_size,
scales,
)
.unwrap()
}
pub fn internal_upsample_nearest_exact1d_out(
&self,
out: &Tensor,
output_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Tensor {
self.f_internal_upsample_nearest_exact1d_out(out, output_size, scales).unwrap()
}
pub fn internal_upsample_nearest_exact1d_vec(
&self,
output_size: impl IntListOption,
scale_factors: impl DoubleList,
) -> Tensor {
self.f_internal_upsample_nearest_exact1d_vec(output_size, scale_factors).unwrap()
}
pub fn internal_upsample_nearest_exact2d(
&self,
output_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_internal_upsample_nearest_exact2d(output_size, scales_h, scales_w).unwrap()
}
pub fn internal_upsample_nearest_exact2d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_internal_upsample_nearest_exact2d_backward(
grad_output,
output_size,
input_size,
scales_h,
scales_w,
)
.unwrap()
}
pub fn internal_upsample_nearest_exact2d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_internal_upsample_nearest_exact2d_backward_grad_input(
grad_input,
grad_output,
output_size,
input_size,
scales_h,
scales_w,
)
.unwrap()
}
pub fn internal_upsample_nearest_exact2d_out(
&self,
out: &Tensor,
output_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_internal_upsample_nearest_exact2d_out(out, output_size, scales_h, scales_w).unwrap()
}
pub fn internal_upsample_nearest_exact2d_vec(
&self,
output_size: impl IntListOption,
scale_factors: impl DoubleList,
) -> Tensor {
self.f_internal_upsample_nearest_exact2d_vec(output_size, scale_factors).unwrap()
}
pub fn internal_upsample_nearest_exact3d(
&self,
output_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_internal_upsample_nearest_exact3d(output_size, scales_d, scales_h, scales_w).unwrap()
}
pub fn internal_upsample_nearest_exact3d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_internal_upsample_nearest_exact3d_backward(
grad_output,
output_size,
input_size,
scales_d,
scales_h,
scales_w,
)
.unwrap()
}
pub fn internal_upsample_nearest_exact3d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_internal_upsample_nearest_exact3d_backward_grad_input(
grad_input,
grad_output,
output_size,
input_size,
scales_d,
scales_h,
scales_w,
)
.unwrap()
}
pub fn internal_upsample_nearest_exact3d_out(
&self,
out: &Tensor,
output_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_internal_upsample_nearest_exact3d_out(out, output_size, scales_d, scales_h, scales_w)
.unwrap()
}
pub fn internal_upsample_nearest_exact3d_vec(
&self,
output_size: impl IntListOption,
scale_factors: impl DoubleList,
) -> Tensor {
self.f_internal_upsample_nearest_exact3d_vec(output_size, scale_factors).unwrap()
}
pub fn internal_use_cudnn_ctc_loss(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
blank: i64,
) -> bool {
Tensor::f_internal_use_cudnn_ctc_loss(
log_probs,
targets,
input_lengths,
target_lengths,
blank,
)
.unwrap()
}
pub fn internal_use_cudnn_ctc_loss_tensor(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &Tensor,
target_lengths: &Tensor,
blank: i64,
) -> bool {
Tensor::f_internal_use_cudnn_ctc_loss_tensor(
log_probs,
targets,
input_lengths,
target_lengths,
blank,
)
.unwrap()
}
pub fn internal_use_cudnn_rnn_flatten_weight() -> bool {
Tensor::f_internal_use_cudnn_rnn_flatten_weight().unwrap()
}
pub fn internal_validate_compressed_sparse_indices(
is_crow: bool,
compressed_idx: &Tensor,
plain_idx: &Tensor,
cdim: i64,
dim: i64,
nnz: i64,
) {
Tensor::f_internal_validate_compressed_sparse_indices(
is_crow,
compressed_idx,
plain_idx,
cdim,
dim,
nnz,
)
.unwrap()
}
pub fn internal_validate_sparse_bsc_tensor_args(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
size: impl IntList,
) {
Tensor::f_internal_validate_sparse_bsc_tensor_args(ccol_indices, row_indices, values, size)
.unwrap()
}
pub fn internal_validate_sparse_bsr_tensor_args(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
size: impl IntList,
) {
Tensor::f_internal_validate_sparse_bsr_tensor_args(crow_indices, col_indices, values, size)
.unwrap()
}
pub fn internal_validate_sparse_compressed_tensor_args(
compressed_indices: &Tensor,
plain_indices: &Tensor,
values: &Tensor,
size: impl IntList,
layout: Layout,
) {
Tensor::f_internal_validate_sparse_compressed_tensor_args(
compressed_indices,
plain_indices,
values,
size,
layout,
)
.unwrap()
}
pub fn internal_validate_sparse_csc_tensor_args(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
size: impl IntList,
) {
Tensor::f_internal_validate_sparse_csc_tensor_args(ccol_indices, row_indices, values, size)
.unwrap()
}
pub fn internal_validate_sparse_csr_tensor_args(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
size: impl IntList,
) {
Tensor::f_internal_validate_sparse_csr_tensor_args(crow_indices, col_indices, values, size)
.unwrap()
}
pub fn internal_values(&self) -> Tensor {
self.f_internal_values().unwrap()
}
pub fn internal_values_copy(&self) -> Tensor {
self.f_internal_values_copy().unwrap()
}
pub fn internal_values_copy_out(&self, out: &Tensor) -> Tensor {
self.f_internal_values_copy_out(out).unwrap()
}
pub fn internal_version(&self) -> i64 {
self.f_internal_version().unwrap()
}
pub fn internal_weight_norm(v: &Tensor, g: &Tensor, dim: i64) -> Tensor {
Tensor::f_internal_weight_norm(v, g, dim).unwrap()
}
pub fn internal_weight_norm_differentiable_backward(
grad_w: &Tensor,
saved_v: &Tensor,
saved_g: &Tensor,
saved_norms: &Tensor,
dim: i64,
) -> (Tensor, Tensor) {
Tensor::f_internal_weight_norm_differentiable_backward(
grad_w,
saved_v,
saved_g,
saved_norms,
dim,
)
.unwrap()
}
pub fn internal_weight_norm_interface(v: &Tensor, g: &Tensor, dim: i64) -> (Tensor, Tensor) {
Tensor::f_internal_weight_norm_interface(v, g, dim).unwrap()
}
pub fn internal_weight_norm_interface_backward(
grad_w: &Tensor,
saved_v: &Tensor,
saved_g: &Tensor,
saved_norms: &Tensor,
dim: i64,
) -> (Tensor, Tensor) {
Tensor::f_internal_weight_norm_interface_backward(
grad_w,
saved_v,
saved_g,
saved_norms,
dim,
)
.unwrap()
}
pub fn internal_weight_norm_interface_backward_out(
out0: &Tensor,
out1: &Tensor,
grad_w: &Tensor,
saved_v: &Tensor,
saved_g: &Tensor,
saved_norms: &Tensor,
dim: i64,
) -> (Tensor, Tensor) {
Tensor::f_internal_weight_norm_interface_backward_out(
out0,
out1,
grad_w,
saved_v,
saved_g,
saved_norms,
dim,
)
.unwrap()
}
pub fn internal_weight_norm_interface_out(
out0: &Tensor,
out1: &Tensor,
v: &Tensor,
g: &Tensor,
dim: i64,
) -> (Tensor, Tensor) {
Tensor::f_internal_weight_norm_interface_out(out0, out1, v, g, dim).unwrap()
}
pub fn abs(&self) -> Tensor {
self.f_abs().unwrap()
}
pub fn abs_(&mut self) -> Tensor {
self.f_abs_().unwrap()
}
pub fn abs_out(&self, out: &Tensor) -> Tensor {
self.f_abs_out(out).unwrap()
}
pub fn absolute(&self) -> Tensor {
self.f_absolute().unwrap()
}
pub fn absolute_(&mut self) -> Tensor {
self.f_absolute_().unwrap()
}
pub fn absolute_out(&self, out: &Tensor) -> Tensor {
self.f_absolute_out(out).unwrap()
}
pub fn acos(&self) -> Tensor {
self.f_acos().unwrap()
}
pub fn acos_(&mut self) -> Tensor {
self.f_acos_().unwrap()
}
pub fn acos_out(&self, out: &Tensor) -> Tensor {
self.f_acos_out(out).unwrap()
}
pub fn acosh(&self) -> Tensor {
self.f_acosh().unwrap()
}
pub fn acosh_(&mut self) -> Tensor {
self.f_acosh_().unwrap()
}
pub fn acosh_out(&self, out: &Tensor) -> Tensor {
self.f_acosh_out(out).unwrap()
}
pub fn adaptive_avg_pool1d(&self, output_size: impl IntList) -> Tensor {
self.f_adaptive_avg_pool1d(output_size).unwrap()
}
pub fn adaptive_avg_pool2d(&self, output_size: impl IntList) -> Tensor {
self.f_adaptive_avg_pool2d(output_size).unwrap()
}
pub fn adaptive_avg_pool2d_out(&self, out: &Tensor, output_size: impl IntList) -> Tensor {
self.f_adaptive_avg_pool2d_out(out, output_size).unwrap()
}
pub fn adaptive_avg_pool3d(&self, output_size: impl IntList) -> Tensor {
self.f_adaptive_avg_pool3d(output_size).unwrap()
}
pub fn adaptive_avg_pool3d_backward(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
) -> Tensor {
self.f_adaptive_avg_pool3d_backward(grad_input, grad_output).unwrap()
}
pub fn adaptive_avg_pool3d_out(&self, out: &Tensor, output_size: impl IntList) -> Tensor {
self.f_adaptive_avg_pool3d_out(out, output_size).unwrap()
}
pub fn adaptive_max_pool1d(&self, output_size: impl IntList) -> (Tensor, Tensor) {
self.f_adaptive_max_pool1d(output_size).unwrap()
}
pub fn adaptive_max_pool2d(&self, output_size: impl IntList) -> (Tensor, Tensor) {
self.f_adaptive_max_pool2d(output_size).unwrap()
}
pub fn adaptive_max_pool2d_backward(&self, grad_output: &Tensor, indices: &Tensor) -> Tensor {
self.f_adaptive_max_pool2d_backward(grad_output, indices).unwrap()
}
pub fn adaptive_max_pool2d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
indices: &Tensor,
) -> Tensor {
self.f_adaptive_max_pool2d_backward_grad_input(grad_input, grad_output, indices).unwrap()
}
pub fn adaptive_max_pool2d_out(
&self,
out: &Tensor,
indices: &Tensor,
output_size: impl IntList,
) -> (Tensor, Tensor) {
self.f_adaptive_max_pool2d_out(out, indices, output_size).unwrap()
}
pub fn adaptive_max_pool3d(&self, output_size: impl IntList) -> (Tensor, Tensor) {
self.f_adaptive_max_pool3d(output_size).unwrap()
}
pub fn adaptive_max_pool3d_backward(&self, grad_output: &Tensor, indices: &Tensor) -> Tensor {
self.f_adaptive_max_pool3d_backward(grad_output, indices).unwrap()
}
pub fn adaptive_max_pool3d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
indices: &Tensor,
) -> Tensor {
self.f_adaptive_max_pool3d_backward_grad_input(grad_input, grad_output, indices).unwrap()
}
pub fn adaptive_max_pool3d_out(
&self,
out: &Tensor,
indices: &Tensor,
output_size: impl IntList,
) -> (Tensor, Tensor) {
self.f_adaptive_max_pool3d_out(out, indices, output_size).unwrap()
}
pub fn g_add(&self, other: &Tensor) -> Tensor {
self.f_add(other).unwrap()
}
pub fn g_add_(&mut self, other: &Tensor) -> Tensor {
self.f_add_(other).unwrap()
}
pub fn add_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_add_out(out, other).unwrap()
}
pub fn g_add_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_add_scalar(other).unwrap()
}
pub fn g_add_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_add_scalar_(other).unwrap()
}
pub fn add_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_add_scalar_out(out, other).unwrap()
}
pub fn addbmm(&self, batch1: &Tensor, batch2: &Tensor) -> Tensor {
self.f_addbmm(batch1, batch2).unwrap()
}
pub fn addbmm_(&mut self, batch1: &Tensor, batch2: &Tensor) -> Tensor {
self.f_addbmm_(batch1, batch2).unwrap()
}
pub fn addbmm_out(&self, out: &Tensor, batch1: &Tensor, batch2: &Tensor) -> Tensor {
self.f_addbmm_out(out, batch1, batch2).unwrap()
}
pub fn addcdiv(&self, tensor1: &Tensor, tensor2: &Tensor) -> Tensor {
self.f_addcdiv(tensor1, tensor2).unwrap()
}
pub fn addcdiv_(&mut self, tensor1: &Tensor, tensor2: &Tensor) -> Tensor {
self.f_addcdiv_(tensor1, tensor2).unwrap()
}
pub fn addcdiv_out(&self, out: &Tensor, tensor1: &Tensor, tensor2: &Tensor) -> Tensor {
self.f_addcdiv_out(out, tensor1, tensor2).unwrap()
}
pub fn addcmul(&self, tensor1: &Tensor, tensor2: &Tensor) -> Tensor {
self.f_addcmul(tensor1, tensor2).unwrap()
}
pub fn addcmul_(&mut self, tensor1: &Tensor, tensor2: &Tensor) -> Tensor {
self.f_addcmul_(tensor1, tensor2).unwrap()
}
pub fn addcmul_out(&self, out: &Tensor, tensor1: &Tensor, tensor2: &Tensor) -> Tensor {
self.f_addcmul_out(out, tensor1, tensor2).unwrap()
}
pub fn addmm(&self, mat1: &Tensor, mat2: &Tensor) -> Tensor {
self.f_addmm(mat1, mat2).unwrap()
}
pub fn addmm_(&mut self, mat1: &Tensor, mat2: &Tensor) -> Tensor {
self.f_addmm_(mat1, mat2).unwrap()
}
pub fn addmm_out(&self, out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Tensor {
self.f_addmm_out(out, mat1, mat2).unwrap()
}
pub fn addmv(&self, mat: &Tensor, vec: &Tensor) -> Tensor {
self.f_addmv(mat, vec).unwrap()
}
pub fn addmv_(&mut self, mat: &Tensor, vec: &Tensor) -> Tensor {
self.f_addmv_(mat, vec).unwrap()
}
pub fn addmv_out(&self, out: &Tensor, mat: &Tensor, vec: &Tensor) -> Tensor {
self.f_addmv_out(out, mat, vec).unwrap()
}
pub fn addr(&self, vec1: &Tensor, vec2: &Tensor) -> Tensor {
self.f_addr(vec1, vec2).unwrap()
}
pub fn addr_(&mut self, vec1: &Tensor, vec2: &Tensor) -> Tensor {
self.f_addr_(vec1, vec2).unwrap()
}
pub fn addr_out(&self, out: &Tensor, vec1: &Tensor, vec2: &Tensor) -> Tensor {
self.f_addr_out(out, vec1, vec2).unwrap()
}
pub fn adjoint(&self) -> Tensor {
self.f_adjoint().unwrap()
}
pub fn affine_grid_generator(
theta: &Tensor,
size: impl IntList,
align_corners: bool,
) -> Tensor {
Tensor::f_affine_grid_generator(theta, size, align_corners).unwrap()
}
pub fn affine_grid_generator_backward(
grad: &Tensor,
size: impl IntList,
align_corners: bool,
) -> Tensor {
Tensor::f_affine_grid_generator_backward(grad, size, align_corners).unwrap()
}
pub fn affine_grid_generator_out(
out: &Tensor,
theta: &Tensor,
size: impl IntList,
align_corners: bool,
) -> Tensor {
Tensor::f_affine_grid_generator_out(out, theta, size, align_corners).unwrap()
}
pub fn alias(&self) -> Tensor {
self.f_alias().unwrap()
}
pub fn alias_copy(&self) -> Tensor {
self.f_alias_copy().unwrap()
}
pub fn alias_copy_out(&self, out: &Tensor) -> Tensor {
self.f_alias_copy_out(out).unwrap()
}
pub fn align_as(&self, other: &Tensor) -> Tensor {
self.f_align_as(other).unwrap()
}
pub fn align_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
Tensor::f_align_tensors(tensors).unwrap()
}
pub fn all(&self) -> Tensor {
self.f_all().unwrap()
}
pub fn all_all_out(&self, out: &Tensor) -> Tensor {
self.f_all_all_out(out).unwrap()
}
pub fn all_dim(&self, dim: i64, keepdim: bool) -> Tensor {
self.f_all_dim(dim, keepdim).unwrap()
}
pub fn all_out(&self, out: &Tensor, dim: i64, keepdim: bool) -> Tensor {
self.f_all_out(out, dim, keepdim).unwrap()
}
pub fn allclose(&self, other: &Tensor, rtol: f64, atol: f64, equal_nan: bool) -> bool {
self.f_allclose(other, rtol, atol, equal_nan).unwrap()
}
pub fn alpha_dropout(&self, p: f64, train: bool) -> Tensor {
self.f_alpha_dropout(p, train).unwrap()
}
pub fn alpha_dropout_(&mut self, p: f64, train: bool) -> Tensor {
self.f_alpha_dropout_(p, train).unwrap()
}
pub fn amax(&self, dim: impl IntList, keepdim: bool) -> Tensor {
self.f_amax(dim, keepdim).unwrap()
}
pub fn amax_out(&self, out: &Tensor, dim: impl IntList, keepdim: bool) -> Tensor {
self.f_amax_out(out, dim, keepdim).unwrap()
}
pub fn amin(&self, dim: impl IntList, keepdim: bool) -> Tensor {
self.f_amin(dim, keepdim).unwrap()
}
pub fn amin_out(&self, out: &Tensor, dim: impl IntList, keepdim: bool) -> Tensor {
self.f_amin_out(out, dim, keepdim).unwrap()
}
pub fn aminmax(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> (Tensor, Tensor) {
self.f_aminmax(dim, keepdim).unwrap()
}
pub fn aminmax_out(
&self,
min: &Tensor,
max: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_aminmax_out(min, max, dim, keepdim).unwrap()
}
pub fn angle(&self) -> Tensor {
self.f_angle().unwrap()
}
pub fn angle_out(&self, out: &Tensor) -> Tensor {
self.f_angle_out(out).unwrap()
}
pub fn any(&self) -> Tensor {
self.f_any().unwrap()
}
pub fn any_all_out(&self, out: &Tensor) -> Tensor {
self.f_any_all_out(out).unwrap()
}
pub fn any_dim(&self, dim: i64, keepdim: bool) -> Tensor {
self.f_any_dim(dim, keepdim).unwrap()
}
pub fn any_out(&self, out: &Tensor, dim: i64, keepdim: bool) -> Tensor {
self.f_any_out(out, dim, keepdim).unwrap()
}
pub fn arange<S: Into<Scalar>>(end: S, options: (Kind, Device)) -> Tensor {
Tensor::f_arange(end, options).unwrap()
}
pub fn arange_start<S: Into<Scalar>>(start: S, end: S, options: (Kind, Device)) -> Tensor {
Tensor::f_arange_start(start, end, options).unwrap()
}
pub fn arange_start_step<S: Into<Scalar>>(
start: S,
end: S,
step: S,
options: (Kind, Device),
) -> Tensor {
Tensor::f_arange_start_step(start, end, step, options).unwrap()
}
pub fn arccos(&self) -> Tensor {
self.f_arccos().unwrap()
}
pub fn arccos_(&mut self) -> Tensor {
self.f_arccos_().unwrap()
}
pub fn arccos_out(&self, out: &Tensor) -> Tensor {
self.f_arccos_out(out).unwrap()
}
pub fn arccosh(&self) -> Tensor {
self.f_arccosh().unwrap()
}
pub fn arccosh_(&mut self) -> Tensor {
self.f_arccosh_().unwrap()
}
pub fn arccosh_out(&self, out: &Tensor) -> Tensor {
self.f_arccosh_out(out).unwrap()
}
pub fn arcsin(&self) -> Tensor {
self.f_arcsin().unwrap()
}
pub fn arcsin_(&mut self) -> Tensor {
self.f_arcsin_().unwrap()
}
pub fn arcsin_out(&self, out: &Tensor) -> Tensor {
self.f_arcsin_out(out).unwrap()
}
pub fn arcsinh(&self) -> Tensor {
self.f_arcsinh().unwrap()
}
pub fn arcsinh_(&mut self) -> Tensor {
self.f_arcsinh_().unwrap()
}
pub fn arcsinh_out(&self, out: &Tensor) -> Tensor {
self.f_arcsinh_out(out).unwrap()
}
pub fn arctan(&self) -> Tensor {
self.f_arctan().unwrap()
}
pub fn arctan2(&self, other: &Tensor) -> Tensor {
self.f_arctan2(other).unwrap()
}
pub fn arctan2_(&mut self, other: &Tensor) -> Tensor {
self.f_arctan2_(other).unwrap()
}
pub fn arctan2_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_arctan2_out(out, other).unwrap()
}
pub fn arctan_(&mut self) -> Tensor {
self.f_arctan_().unwrap()
}
pub fn arctan_out(&self, out: &Tensor) -> Tensor {
self.f_arctan_out(out).unwrap()
}
pub fn arctanh(&self) -> Tensor {
self.f_arctanh().unwrap()
}
pub fn arctanh_(&mut self) -> Tensor {
self.f_arctanh_().unwrap()
}
pub fn arctanh_out(&self, out: &Tensor) -> Tensor {
self.f_arctanh_out(out).unwrap()
}
pub fn argmax(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> Tensor {
self.f_argmax(dim, keepdim).unwrap()
}
pub fn argmax_out(&self, out: &Tensor, dim: impl Into<Option<i64>>, keepdim: bool) -> Tensor {
self.f_argmax_out(out, dim, keepdim).unwrap()
}
pub fn argmin(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> Tensor {
self.f_argmin(dim, keepdim).unwrap()
}
pub fn argmin_out(&self, out: &Tensor, dim: impl Into<Option<i64>>, keepdim: bool) -> Tensor {
self.f_argmin_out(out, dim, keepdim).unwrap()
}
pub fn argsort(&self, dim: i64, descending: bool) -> Tensor {
self.f_argsort(dim, descending).unwrap()
}
pub fn argsort_stable(&self, stable: bool, dim: i64, descending: bool) -> Tensor {
self.f_argsort_stable(stable, dim, descending).unwrap()
}
pub fn argsort_stable_out(
&self,
out: &Tensor,
stable: bool,
dim: i64,
descending: bool,
) -> Tensor {
self.f_argsort_stable_out(out, stable, dim, descending).unwrap()
}
pub fn argwhere(&self) -> Tensor {
self.f_argwhere().unwrap()
}
pub fn as_strided(
&self,
size: impl IntList,
stride: impl IntList,
storage_offset: impl Into<Option<i64>>,
) -> Tensor {
self.f_as_strided(size, stride, storage_offset).unwrap()
}
pub fn as_strided_(
&mut self,
size: impl IntList,
stride: impl IntList,
storage_offset: impl Into<Option<i64>>,
) -> Tensor {
self.f_as_strided_(size, stride, storage_offset).unwrap()
}
pub fn as_strided_copy(
&self,
size: impl IntList,
stride: impl IntList,
storage_offset: impl Into<Option<i64>>,
) -> Tensor {
self.f_as_strided_copy(size, stride, storage_offset).unwrap()
}
pub fn as_strided_copy_out(
&self,
out: &Tensor,
size: impl IntList,
stride: impl IntList,
storage_offset: impl Into<Option<i64>>,
) -> Tensor {
self.f_as_strided_copy_out(out, size, stride, storage_offset).unwrap()
}
pub fn as_strided_scatter(
&self,
src: &Tensor,
size: impl IntList,
stride: impl IntList,
storage_offset: impl Into<Option<i64>>,
) -> Tensor {
self.f_as_strided_scatter(src, size, stride, storage_offset).unwrap()
}
pub fn as_strided_scatter_out(
&self,
out: &Tensor,
src: &Tensor,
size: impl IntList,
stride: impl IntList,
storage_offset: impl Into<Option<i64>>,
) -> Tensor {
self.f_as_strided_scatter_out(out, src, size, stride, storage_offset).unwrap()
}
pub fn asin(&self) -> Tensor {
self.f_asin().unwrap()
}
pub fn asin_(&mut self) -> Tensor {
self.f_asin_().unwrap()
}
pub fn asin_out(&self, out: &Tensor) -> Tensor {
self.f_asin_out(out).unwrap()
}
pub fn asinh(&self) -> Tensor {
self.f_asinh().unwrap()
}
pub fn asinh_(&mut self) -> Tensor {
self.f_asinh_().unwrap()
}
pub fn asinh_out(&self, out: &Tensor) -> Tensor {
self.f_asinh_out(out).unwrap()
}
pub fn atan(&self) -> Tensor {
self.f_atan().unwrap()
}
pub fn atan2(&self, other: &Tensor) -> Tensor {
self.f_atan2(other).unwrap()
}
pub fn atan2_(&mut self, other: &Tensor) -> Tensor {
self.f_atan2_(other).unwrap()
}
pub fn atan2_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_atan2_out(out, other).unwrap()
}
pub fn atan_(&mut self) -> Tensor {
self.f_atan_().unwrap()
}
pub fn atan_out(&self, out: &Tensor) -> Tensor {
self.f_atan_out(out).unwrap()
}
pub fn atanh(&self) -> Tensor {
self.f_atanh().unwrap()
}
pub fn atanh_(&mut self) -> Tensor {
self.f_atanh_().unwrap()
}
pub fn atanh_out(&self, out: &Tensor) -> Tensor {
self.f_atanh_out(out).unwrap()
}
pub fn atleast_1d(&self) -> Tensor {
self.f_atleast_1d().unwrap()
}
pub fn atleast_1d_sequence<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
Tensor::f_atleast_1d_sequence(tensors).unwrap()
}
pub fn atleast_2d(&self) -> Tensor {
self.f_atleast_2d().unwrap()
}
pub fn atleast_2d_sequence<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
Tensor::f_atleast_2d_sequence(tensors).unwrap()
}
pub fn atleast_3d(&self) -> Tensor {
self.f_atleast_3d().unwrap()
}
pub fn atleast_3d_sequence<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
Tensor::f_atleast_3d_sequence(tensors).unwrap()
}
pub fn avg_pool1d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
) -> Tensor {
self.f_avg_pool1d(kernel_size, stride, padding, ceil_mode, count_include_pad).unwrap()
}
pub fn avg_pool2d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Tensor {
self.f_avg_pool2d(
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
)
.unwrap()
}
pub fn avg_pool2d_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Tensor {
self.f_avg_pool2d_backward(
grad_output,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
)
.unwrap()
}
pub fn avg_pool2d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Tensor {
self.f_avg_pool2d_backward_grad_input(
grad_input,
grad_output,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
)
.unwrap()
}
pub fn avg_pool2d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Tensor {
self.f_avg_pool2d_out(
out,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
)
.unwrap()
}
pub fn avg_pool3d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Tensor {
self.f_avg_pool3d(
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
)
.unwrap()
}
pub fn avg_pool3d_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Tensor {
self.f_avg_pool3d_backward(
grad_output,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
)
.unwrap()
}
pub fn avg_pool3d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Tensor {
self.f_avg_pool3d_backward_grad_input(
grad_input,
grad_output,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
)
.unwrap()
}
pub fn avg_pool3d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Tensor {
self.f_avg_pool3d_out(
out,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
)
.unwrap()
}
pub fn baddbmm<S: Into<Scalar>>(
&self,
batch1: &Tensor,
batch2: &Tensor,
beta: S,
alpha: S,
) -> Tensor {
self.f_baddbmm(batch1, batch2, beta, alpha).unwrap()
}
pub fn baddbmm_(&mut self, batch1: &Tensor, batch2: &Tensor) -> Tensor {
self.f_baddbmm_(batch1, batch2).unwrap()
}
pub fn baddbmm_out(&self, out: &Tensor, batch1: &Tensor, batch2: &Tensor) -> Tensor {
self.f_baddbmm_out(out, batch1, batch2).unwrap()
}
pub fn bartlett_window(window_length: i64, options: (Kind, Device)) -> Tensor {
Tensor::f_bartlett_window(window_length, options).unwrap()
}
pub fn bartlett_window_out(out: &Tensor, window_length: i64) -> Tensor {
Tensor::f_bartlett_window_out(out, window_length).unwrap()
}
pub fn bartlett_window_periodic(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Tensor {
Tensor::f_bartlett_window_periodic(window_length, periodic, options).unwrap()
}
pub fn bartlett_window_periodic_out(
out: &Tensor,
window_length: i64,
periodic: bool,
) -> Tensor {
Tensor::f_bartlett_window_periodic_out(out, window_length, periodic).unwrap()
}
pub fn batch_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
momentum: f64,
eps: f64,
cudnn_enabled: bool,
) -> Tensor {
self.f_batch_norm(
weight,
bias,
running_mean,
running_var,
training,
momentum,
eps,
cudnn_enabled,
)
.unwrap()
}
pub fn batch_norm_backward_elemt<T: Borrow<Tensor>>(
&self,
grad_out: &Tensor,
mean: &Tensor,
invstd: &Tensor,
weight: Option<T>,
sum_dy: &Tensor,
sum_dy_xmu: &Tensor,
count: &Tensor,
) -> Tensor {
self.f_batch_norm_backward_elemt(grad_out, mean, invstd, weight, sum_dy, sum_dy_xmu, count)
.unwrap()
}
pub fn batch_norm_backward_elemt_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
grad_out: &Tensor,
mean: &Tensor,
invstd: &Tensor,
weight: Option<T>,
sum_dy: &Tensor,
sum_dy_xmu: &Tensor,
count: &Tensor,
) -> Tensor {
self.f_batch_norm_backward_elemt_out(
out, grad_out, mean, invstd, weight, sum_dy, sum_dy_xmu, count,
)
.unwrap()
}
pub fn batch_norm_backward_reduce<T: Borrow<Tensor>>(
&self,
grad_out: &Tensor,
mean: &Tensor,
invstd: &Tensor,
weight: Option<T>,
input_g: bool,
weight_g: bool,
bias_g: bool,
) -> (Tensor, Tensor, Tensor, Tensor) {
self.f_batch_norm_backward_reduce(grad_out, mean, invstd, weight, input_g, weight_g, bias_g)
.unwrap()
}
pub fn batch_norm_backward_reduce_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
grad_out: &Tensor,
mean: &Tensor,
invstd: &Tensor,
weight: Option<T>,
input_g: bool,
weight_g: bool,
bias_g: bool,
) -> (Tensor, Tensor, Tensor, Tensor) {
self.f_batch_norm_backward_reduce_out(
out0, out1, out2, out3, grad_out, mean, invstd, weight, input_g, weight_g, bias_g,
)
.unwrap()
}
pub fn batch_norm_elemt<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
mean: &Tensor,
invstd: &Tensor,
eps: f64,
) -> Tensor {
self.f_batch_norm_elemt(weight, bias, mean, invstd, eps).unwrap()
}
pub fn batch_norm_elemt_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: Option<T>,
bias: Option<T>,
mean: &Tensor,
invstd: &Tensor,
eps: f64,
) -> Tensor {
self.f_batch_norm_elemt_out(out, weight, bias, mean, invstd, eps).unwrap()
}
pub fn batch_norm_gather_stats<T: Borrow<Tensor>>(
&self,
mean: &Tensor,
invstd: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
eps: f64,
count: i64,
) -> (Tensor, Tensor) {
self.f_batch_norm_gather_stats(
mean,
invstd,
running_mean,
running_var,
momentum,
eps,
count,
)
.unwrap()
}
pub fn batch_norm_gather_stats_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
mean: &Tensor,
invstd: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
eps: f64,
count: i64,
) -> (Tensor, Tensor) {
self.f_batch_norm_gather_stats_out(
out0,
out1,
mean,
invstd,
running_mean,
running_var,
momentum,
eps,
count,
)
.unwrap()
}
pub fn batch_norm_gather_stats_with_counts<T: Borrow<Tensor>>(
&self,
mean: &Tensor,
invstd: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
eps: f64,
counts: &Tensor,
) -> (Tensor, Tensor) {
self.f_batch_norm_gather_stats_with_counts(
mean,
invstd,
running_mean,
running_var,
momentum,
eps,
counts,
)
.unwrap()
}
pub fn batch_norm_gather_stats_with_counts_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
mean: &Tensor,
invstd: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
eps: f64,
counts: &Tensor,
) -> (Tensor, Tensor) {
self.f_batch_norm_gather_stats_with_counts_out(
out0,
out1,
mean,
invstd,
running_mean,
running_var,
momentum,
eps,
counts,
)
.unwrap()
}
pub fn batch_norm_stats(&self, eps: f64) -> (Tensor, Tensor) {
self.f_batch_norm_stats(eps).unwrap()
}
pub fn batch_norm_stats_out(&self, out0: &Tensor, out1: &Tensor, eps: f64) -> (Tensor, Tensor) {
self.f_batch_norm_stats_out(out0, out1, eps).unwrap()
}
pub fn batch_norm_update_stats<T: Borrow<Tensor>>(
&self,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
) -> (Tensor, Tensor) {
self.f_batch_norm_update_stats(running_mean, running_var, momentum).unwrap()
}
pub fn batch_norm_update_stats_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
) -> (Tensor, Tensor) {
self.f_batch_norm_update_stats_out(out0, out1, running_mean, running_var, momentum).unwrap()
}
pub fn bernoulli(&self) -> Tensor {
self.f_bernoulli().unwrap()
}
pub fn bernoulli_(&mut self, p: &Tensor) -> Tensor {
self.f_bernoulli_(p).unwrap()
}
pub fn bernoulli_float_(&mut self, p: f64) -> Tensor {
self.f_bernoulli_float_(p).unwrap()
}
pub fn bernoulli_p(&self, p: f64) -> Tensor {
self.f_bernoulli_p(p).unwrap()
}
pub fn bernoulli_tensor(&self, p: &Tensor) -> Tensor {
self.f_bernoulli_tensor(p).unwrap()
}
pub fn bilinear<T: Borrow<Tensor>>(
input1: &Tensor,
input2: &Tensor,
weight: &Tensor,
bias: Option<T>,
) -> Tensor {
Tensor::f_bilinear(input1, input2, weight, bias).unwrap()
}
pub fn binary_cross_entropy<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
) -> Tensor {
self.f_binary_cross_entropy(target, weight, reduction).unwrap()
}
pub fn binary_cross_entropy_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
) -> Tensor {
self.f_binary_cross_entropy_backward(grad_output, target, weight, reduction).unwrap()
}
pub fn binary_cross_entropy_backward_grad_input<T: Borrow<Tensor>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
) -> Tensor {
self.f_binary_cross_entropy_backward_grad_input(
grad_input,
grad_output,
target,
weight,
reduction,
)
.unwrap()
}
pub fn binary_cross_entropy_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
) -> Tensor {
self.f_binary_cross_entropy_out(out, target, weight, reduction).unwrap()
}
pub fn binary_cross_entropy_with_logits<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
pos_weight: Option<T>,
reduction: crate::Reduction,
) -> Tensor {
self.f_binary_cross_entropy_with_logits(target, weight, pos_weight, reduction).unwrap()
}
pub fn binary_cross_entropy_with_logits_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
target: &Tensor,
weight: Option<T>,
pos_weight: Option<T>,
reduction: crate::Reduction,
) -> Tensor {
self.f_binary_cross_entropy_with_logits_out(out, target, weight, pos_weight, reduction)
.unwrap()
}
pub fn bincount<T: Borrow<Tensor>>(&self, weights: Option<T>, minlength: i64) -> Tensor {
self.f_bincount(weights, minlength).unwrap()
}
pub fn bincount_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weights: Option<T>,
minlength: i64,
) -> Tensor {
self.f_bincount_out(out, weights, minlength).unwrap()
}
pub fn binomial(count: &Tensor, prob: &Tensor) -> Tensor {
Tensor::f_binomial(count, prob).unwrap()
}
pub fn binomial_out(out: &Tensor, count: &Tensor, prob: &Tensor) -> Tensor {
Tensor::f_binomial_out(out, count, prob).unwrap()
}
pub fn bitwise_and<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_bitwise_and(other).unwrap()
}
pub fn bitwise_and_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_bitwise_and_(other).unwrap()
}
pub fn bitwise_and_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_bitwise_and_scalar_out(out, other).unwrap()
}
pub fn bitwise_and_scalar_tensor<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
Tensor::f_bitwise_and_scalar_tensor(self_scalar, other).unwrap()
}
pub fn bitwise_and_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Tensor {
Tensor::f_bitwise_and_scalar_tensor_out(out, self_scalar, other).unwrap()
}
pub fn bitwise_and_tensor(&self, other: &Tensor) -> Tensor {
self.f_bitwise_and_tensor(other).unwrap()
}
pub fn bitwise_and_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_bitwise_and_tensor_(other).unwrap()
}
pub fn bitwise_and_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_bitwise_and_tensor_out(out, other).unwrap()
}
pub fn bitwise_left_shift(&self, other: &Tensor) -> Tensor {
self.f_bitwise_left_shift(other).unwrap()
}
pub fn bitwise_left_shift_(&mut self, other: &Tensor) -> Tensor {
self.f_bitwise_left_shift_(other).unwrap()
}
pub fn bitwise_left_shift_scalar_tensor<S: Into<Scalar>>(
self_scalar: S,
other: &Tensor,
) -> Tensor {
Tensor::f_bitwise_left_shift_scalar_tensor(self_scalar, other).unwrap()
}
pub fn bitwise_left_shift_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Tensor {
Tensor::f_bitwise_left_shift_scalar_tensor_out(out, self_scalar, other).unwrap()
}
pub fn bitwise_left_shift_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_bitwise_left_shift_tensor_out(out, other).unwrap()
}
pub fn bitwise_left_shift_tensor_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_bitwise_left_shift_tensor_scalar(other).unwrap()
}
pub fn bitwise_left_shift_tensor_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_bitwise_left_shift_tensor_scalar_(other).unwrap()
}
pub fn bitwise_left_shift_tensor_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Tensor {
self.f_bitwise_left_shift_tensor_scalar_out(out, other).unwrap()
}
pub fn bitwise_not(&self) -> Tensor {
self.f_bitwise_not().unwrap()
}
pub fn bitwise_not_(&mut self) -> Tensor {
self.f_bitwise_not_().unwrap()
}
pub fn bitwise_not_out(&self, out: &Tensor) -> Tensor {
self.f_bitwise_not_out(out).unwrap()
}
pub fn bitwise_or<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_bitwise_or(other).unwrap()
}
pub fn bitwise_or_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_bitwise_or_(other).unwrap()
}
pub fn bitwise_or_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_bitwise_or_scalar_out(out, other).unwrap()
}
pub fn bitwise_or_scalar_tensor<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
Tensor::f_bitwise_or_scalar_tensor(self_scalar, other).unwrap()
}
pub fn bitwise_or_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Tensor {
Tensor::f_bitwise_or_scalar_tensor_out(out, self_scalar, other).unwrap()
}
pub fn bitwise_or_tensor(&self, other: &Tensor) -> Tensor {
self.f_bitwise_or_tensor(other).unwrap()
}
pub fn bitwise_or_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_bitwise_or_tensor_(other).unwrap()
}
pub fn bitwise_or_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_bitwise_or_tensor_out(out, other).unwrap()
}
pub fn bitwise_right_shift(&self, other: &Tensor) -> Tensor {
self.f_bitwise_right_shift(other).unwrap()
}
pub fn bitwise_right_shift_(&mut self, other: &Tensor) -> Tensor {
self.f_bitwise_right_shift_(other).unwrap()
}
pub fn bitwise_right_shift_scalar_tensor<S: Into<Scalar>>(
self_scalar: S,
other: &Tensor,
) -> Tensor {
Tensor::f_bitwise_right_shift_scalar_tensor(self_scalar, other).unwrap()
}
pub fn bitwise_right_shift_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Tensor {
Tensor::f_bitwise_right_shift_scalar_tensor_out(out, self_scalar, other).unwrap()
}
pub fn bitwise_right_shift_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_bitwise_right_shift_tensor_out(out, other).unwrap()
}
pub fn bitwise_right_shift_tensor_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_bitwise_right_shift_tensor_scalar(other).unwrap()
}
pub fn bitwise_right_shift_tensor_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_bitwise_right_shift_tensor_scalar_(other).unwrap()
}
pub fn bitwise_right_shift_tensor_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Tensor {
self.f_bitwise_right_shift_tensor_scalar_out(out, other).unwrap()
}
pub fn bitwise_xor<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_bitwise_xor(other).unwrap()
}
pub fn bitwise_xor_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_bitwise_xor_(other).unwrap()
}
pub fn bitwise_xor_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_bitwise_xor_scalar_out(out, other).unwrap()
}
pub fn bitwise_xor_scalar_tensor<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
Tensor::f_bitwise_xor_scalar_tensor(self_scalar, other).unwrap()
}
pub fn bitwise_xor_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Tensor {
Tensor::f_bitwise_xor_scalar_tensor_out(out, self_scalar, other).unwrap()
}
pub fn bitwise_xor_tensor(&self, other: &Tensor) -> Tensor {
self.f_bitwise_xor_tensor(other).unwrap()
}
pub fn bitwise_xor_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_bitwise_xor_tensor_(other).unwrap()
}
pub fn bitwise_xor_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_bitwise_xor_tensor_out(out, other).unwrap()
}
pub fn blackman_window(window_length: i64, options: (Kind, Device)) -> Tensor {
Tensor::f_blackman_window(window_length, options).unwrap()
}
pub fn blackman_window_out(out: &Tensor, window_length: i64) -> Tensor {
Tensor::f_blackman_window_out(out, window_length).unwrap()
}
pub fn blackman_window_periodic(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Tensor {
Tensor::f_blackman_window_periodic(window_length, periodic, options).unwrap()
}
pub fn blackman_window_periodic_out(
out: &Tensor,
window_length: i64,
periodic: bool,
) -> Tensor {
Tensor::f_blackman_window_periodic_out(out, window_length, periodic).unwrap()
}
pub fn block_diag<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
Tensor::f_block_diag(tensors).unwrap()
}
pub fn block_diag_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
Tensor::f_block_diag_out(out, tensors).unwrap()
}
pub fn bmm(&self, mat2: &Tensor) -> Tensor {
self.f_bmm(mat2).unwrap()
}
pub fn bmm_out(&self, out: &Tensor, mat2: &Tensor) -> Tensor {
self.f_bmm_out(out, mat2).unwrap()
}
pub fn broadcast_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
Tensor::f_broadcast_tensors(tensors).unwrap()
}
pub fn broadcast_to(&self, size: impl IntList) -> Tensor {
self.f_broadcast_to(size).unwrap()
}
pub fn bucketize(&self, boundaries: &Tensor, out_int32: bool, right: bool) -> Tensor {
self.f_bucketize(boundaries, out_int32, right).unwrap()
}
pub fn bucketize_scalar<S: Into<Scalar>>(
self_scalar: S,
boundaries: &Tensor,
out_int32: bool,
right: bool,
) -> Tensor {
Tensor::f_bucketize_scalar(self_scalar, boundaries, out_int32, right).unwrap()
}
pub fn bucketize_scalar_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
boundaries: &Tensor,
out_int32: bool,
right: bool,
) -> Tensor {
Tensor::f_bucketize_scalar_out(out, self_scalar, boundaries, out_int32, right).unwrap()
}
pub fn bucketize_tensor_out(
&self,
out: &Tensor,
boundaries: &Tensor,
out_int32: bool,
right: bool,
) -> Tensor {
self.f_bucketize_tensor_out(out, boundaries, out_int32, right).unwrap()
}
pub fn can_cast(from: Kind, to: Kind) -> bool {
Tensor::f_can_cast(from, to).unwrap()
}
pub fn cartesian_prod<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
Tensor::f_cartesian_prod(tensors).unwrap()
}
pub fn cat<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Tensor {
Tensor::f_cat(tensors, dim).unwrap()
}
pub fn cat_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T], dim: i64) -> Tensor {
Tensor::f_cat_out(out, tensors, dim).unwrap()
}
pub fn cauchy(&self, median: f64, sigma: f64) -> Tensor {
self.f_cauchy(median, sigma).unwrap()
}
pub fn cauchy_(&mut self, median: f64, sigma: f64) -> Tensor {
self.f_cauchy_(median, sigma).unwrap()
}
pub fn cauchy_out(&self, out: &Tensor, median: f64, sigma: f64) -> Tensor {
self.f_cauchy_out(out, median, sigma).unwrap()
}
pub fn ccol_indices(&self) -> Tensor {
self.f_ccol_indices().unwrap()
}
pub fn ccol_indices_copy(&self) -> Tensor {
self.f_ccol_indices_copy().unwrap()
}
pub fn ccol_indices_copy_out(&self, out: &Tensor) -> Tensor {
self.f_ccol_indices_copy_out(out).unwrap()
}
pub fn cdist(x1: &Tensor, x2: &Tensor, p: f64, compute_mode: impl Into<Option<i64>>) -> Tensor {
Tensor::f_cdist(x1, x2, p, compute_mode).unwrap()
}
pub fn ceil(&self) -> Tensor {
self.f_ceil().unwrap()
}
pub fn ceil_(&mut self) -> Tensor {
self.f_ceil_().unwrap()
}
pub fn ceil_out(&self, out: &Tensor) -> Tensor {
self.f_ceil_out(out).unwrap()
}
pub fn celu(&self) -> Tensor {
self.f_celu().unwrap()
}
pub fn celu_(&mut self) -> Tensor {
self.f_celu_().unwrap()
}
pub fn celu_out(&self, out: &Tensor) -> Tensor {
self.f_celu_out(out).unwrap()
}
pub fn chain_matmul<T: Borrow<Tensor>>(matrices: &[T]) -> Tensor {
Tensor::f_chain_matmul(matrices).unwrap()
}
pub fn chain_matmul_out<T: Borrow<Tensor>>(out: &Tensor, matrices: &[T]) -> Tensor {
Tensor::f_chain_matmul_out(out, matrices).unwrap()
}
pub fn chalf(&self) -> Tensor {
self.f_chalf().unwrap()
}
pub fn channel_shuffle(&self, groups: i64) -> Tensor {
self.f_channel_shuffle(groups).unwrap()
}
pub fn channel_shuffle_out(&self, out: &Tensor, groups: i64) -> Tensor {
self.f_channel_shuffle_out(out, groups).unwrap()
}
pub fn cholesky(&self, upper: bool) -> Tensor {
self.f_cholesky(upper).unwrap()
}
pub fn cholesky_inverse(&self, upper: bool) -> Tensor {
self.f_cholesky_inverse(upper).unwrap()
}
pub fn cholesky_inverse_out(&self, out: &Tensor, upper: bool) -> Tensor {
self.f_cholesky_inverse_out(out, upper).unwrap()
}
pub fn cholesky_out(&self, out: &Tensor, upper: bool) -> Tensor {
self.f_cholesky_out(out, upper).unwrap()
}
pub fn cholesky_solve(&self, input2: &Tensor, upper: bool) -> Tensor {
self.f_cholesky_solve(input2, upper).unwrap()
}
pub fn cholesky_solve_out(&self, out: &Tensor, input2: &Tensor, upper: bool) -> Tensor {
self.f_cholesky_solve_out(out, input2, upper).unwrap()
}
pub fn choose_qparams_optimized(
&self,
numel: i64,
n_bins: i64,
ratio: f64,
bit_width: i64,
) -> (Tensor, Tensor) {
self.f_choose_qparams_optimized(numel, n_bins, ratio, bit_width).unwrap()
}
pub fn chunk(&self, chunks: i64, dim: i64) -> Vec<Tensor> {
self.f_chunk(chunks, dim).unwrap()
}
pub fn clamp<S: Into<Scalar>>(&self, min: S, max: S) -> Tensor {
self.f_clamp(min, max).unwrap()
}
pub fn clamp_<S: Into<Scalar>>(&mut self, min: S, max: S) -> Tensor {
self.f_clamp_(min, max).unwrap()
}
pub fn clamp_max<S: Into<Scalar>>(&self, max: S) -> Tensor {
self.f_clamp_max(max).unwrap()
}
pub fn clamp_max_<S: Into<Scalar>>(&mut self, max: S) -> Tensor {
self.f_clamp_max_(max).unwrap()
}
pub fn clamp_max_out<S: Into<Scalar>>(&self, out: &Tensor, max: S) -> Tensor {
self.f_clamp_max_out(out, max).unwrap()
}
pub fn clamp_max_tensor(&self, max: &Tensor) -> Tensor {
self.f_clamp_max_tensor(max).unwrap()
}
pub fn clamp_max_tensor_(&mut self, max: &Tensor) -> Tensor {
self.f_clamp_max_tensor_(max).unwrap()
}
pub fn clamp_max_tensor_out(&self, out: &Tensor, max: &Tensor) -> Tensor {
self.f_clamp_max_tensor_out(out, max).unwrap()
}
pub fn clamp_min<S: Into<Scalar>>(&self, min: S) -> Tensor {
self.f_clamp_min(min).unwrap()
}
pub fn clamp_min_<S: Into<Scalar>>(&mut self, min: S) -> Tensor {
self.f_clamp_min_(min).unwrap()
}
pub fn clamp_min_out<S: Into<Scalar>>(&self, out: &Tensor, min: S) -> Tensor {
self.f_clamp_min_out(out, min).unwrap()
}
pub fn clamp_min_tensor(&self, min: &Tensor) -> Tensor {
self.f_clamp_min_tensor(min).unwrap()
}
pub fn clamp_min_tensor_(&mut self, min: &Tensor) -> Tensor {
self.f_clamp_min_tensor_(min).unwrap()
}
pub fn clamp_min_tensor_out(&self, out: &Tensor, min: &Tensor) -> Tensor {
self.f_clamp_min_tensor_out(out, min).unwrap()
}
pub fn clamp_out<S: Into<Scalar>>(&self, out: &Tensor, min: S, max: S) -> Tensor {
self.f_clamp_out(out, min, max).unwrap()
}
pub fn clamp_tensor<T: Borrow<Tensor>>(&self, min: Option<T>, max: Option<T>) -> Tensor {
self.f_clamp_tensor(min, max).unwrap()
}
pub fn clamp_tensor_<T: Borrow<Tensor>>(&mut self, min: Option<T>, max: Option<T>) -> Tensor {
self.f_clamp_tensor_(min, max).unwrap()
}
pub fn clamp_tensor_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
min: Option<T>,
max: Option<T>,
) -> Tensor {
self.f_clamp_tensor_out(out, min, max).unwrap()
}
pub fn clip<S: Into<Scalar>>(&self, min: S, max: S) -> Tensor {
self.f_clip(min, max).unwrap()
}
pub fn clip_<S: Into<Scalar>>(&mut self, min: S, max: S) -> Tensor {
self.f_clip_(min, max).unwrap()
}
pub fn clip_out<S: Into<Scalar>>(&self, out: &Tensor, min: S, max: S) -> Tensor {
self.f_clip_out(out, min, max).unwrap()
}
pub fn clip_tensor<T: Borrow<Tensor>>(&self, min: Option<T>, max: Option<T>) -> Tensor {
self.f_clip_tensor(min, max).unwrap()
}
pub fn clip_tensor_<T: Borrow<Tensor>>(&mut self, min: Option<T>, max: Option<T>) -> Tensor {
self.f_clip_tensor_(min, max).unwrap()
}
pub fn clip_tensor_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
min: Option<T>,
max: Option<T>,
) -> Tensor {
self.f_clip_tensor_out(out, min, max).unwrap()
}
pub fn clone(&self, out: &Tensor) -> Tensor {
self.f_clone(out).unwrap()
}
pub fn coalesce(&self) -> Tensor {
self.f_coalesce().unwrap()
}
pub fn col2im(
&self,
output_size: impl IntList,
kernel_size: impl IntList,
dilation: impl IntList,
padding: impl IntList,
stride: impl IntList,
) -> Tensor {
self.f_col2im(output_size, kernel_size, dilation, padding, stride).unwrap()
}
pub fn col2im_out(
&self,
out: &Tensor,
output_size: impl IntList,
kernel_size: impl IntList,
dilation: impl IntList,
padding: impl IntList,
stride: impl IntList,
) -> Tensor {
self.f_col2im_out(out, output_size, kernel_size, dilation, padding, stride).unwrap()
}
pub fn col_indices(&self) -> Tensor {
self.f_col_indices().unwrap()
}
pub fn col_indices_copy(&self) -> Tensor {
self.f_col_indices_copy().unwrap()
}
pub fn col_indices_copy_out(&self, out: &Tensor) -> Tensor {
self.f_col_indices_copy_out(out).unwrap()
}
pub fn column_stack<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
Tensor::f_column_stack(tensors).unwrap()
}
pub fn column_stack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
Tensor::f_column_stack_out(out, tensors).unwrap()
}
pub fn combinations(&self, r: i64, with_replacement: bool) -> Tensor {
self.f_combinations(r, with_replacement).unwrap()
}
pub fn complex(real: &Tensor, imag: &Tensor) -> Tensor {
Tensor::f_complex(real, imag).unwrap()
}
pub fn complex_out(out: &Tensor, real: &Tensor, imag: &Tensor) -> Tensor {
Tensor::f_complex_out(out, real, imag).unwrap()
}
pub fn concat<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Tensor {
Tensor::f_concat(tensors, dim).unwrap()
}
pub fn concat_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T], dim: i64) -> Tensor {
Tensor::f_concat_out(out, tensors, dim).unwrap()
}
pub fn concatenate<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Tensor {
Tensor::f_concatenate(tensors, dim).unwrap()
}
pub fn concatenate_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T], dim: i64) -> Tensor {
Tensor::f_concatenate_out(out, tensors, dim).unwrap()
}
pub fn conj(&self) -> Tensor {
self.f_conj().unwrap()
}
pub fn conj_physical(&self) -> Tensor {
self.f_conj_physical().unwrap()
}
pub fn conj_physical_(&mut self) -> Tensor {
self.f_conj_physical_().unwrap()
}
pub fn conj_physical_out(&self, out: &Tensor) -> Tensor {
self.f_conj_physical_out(out).unwrap()
}
pub fn constant_pad_nd(&self, pad: impl IntList) -> Tensor {
self.f_constant_pad_nd(pad).unwrap()
}
pub fn constant_pad_nd_out(&self, out: &Tensor, pad: impl IntList) -> Tensor {
self.f_constant_pad_nd_out(out, pad).unwrap()
}
pub fn contiguous(&self) -> Tensor {
self.f_contiguous().unwrap()
}
pub fn conv1d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_conv1d(weight, bias, stride, padding, dilation, groups).unwrap()
}
pub fn conv1d_padding<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: &str,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_conv1d_padding(weight, bias, stride, padding, dilation, groups).unwrap()
}
pub fn conv2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_conv2d(weight, bias, stride, padding, dilation, groups).unwrap()
}
pub fn conv2d_padding<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: &str,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_conv2d_padding(weight, bias, stride, padding, dilation, groups).unwrap()
}
pub fn conv3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_conv3d(weight, bias, stride, padding, dilation, groups).unwrap()
}
pub fn conv3d_padding<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: &str,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_conv3d_padding(weight, bias, stride, padding, dilation, groups).unwrap()
}
pub fn conv_depthwise3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Tensor {
self.f_conv_depthwise3d(weight, kernel_size, bias, stride, padding, dilation).unwrap()
}
pub fn conv_depthwise3d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Tensor {
self.f_conv_depthwise3d_out(out, weight, kernel_size, bias, stride, padding, dilation)
.unwrap()
}
pub fn conv_tbc(&self, weight: &Tensor, bias: &Tensor, pad: i64) -> Tensor {
self.f_conv_tbc(weight, bias, pad).unwrap()
}
pub fn conv_tbc_backward(
&self,
input: &Tensor,
weight: &Tensor,
bias: &Tensor,
pad: i64,
) -> (Tensor, Tensor, Tensor) {
self.f_conv_tbc_backward(input, weight, bias, pad).unwrap()
}
pub fn conv_tbc_out(&self, out: &Tensor, weight: &Tensor, bias: &Tensor, pad: i64) -> Tensor {
self.f_conv_tbc_out(out, weight, bias, pad).unwrap()
}
pub fn conv_transpose1d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
groups: i64,
dilation: impl IntList,
) -> Tensor {
self.f_conv_transpose1d(weight, bias, stride, padding, output_padding, groups, dilation)
.unwrap()
}
pub fn conv_transpose2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
groups: i64,
dilation: impl IntList,
) -> Tensor {
self.f_conv_transpose2d(weight, bias, stride, padding, output_padding, groups, dilation)
.unwrap()
}
pub fn conv_transpose3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
groups: i64,
dilation: impl IntList,
) -> Tensor {
self.f_conv_transpose3d(weight, bias, stride, padding, output_padding, groups, dilation)
.unwrap()
}
pub fn convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
) -> Tensor {
self.f_convolution(
weight,
bias,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
)
.unwrap()
}
pub fn convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
) -> Tensor {
self.f_convolution_out(
out,
weight,
bias,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
)
.unwrap()
}
pub fn convolution_overrideable<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
) -> Tensor {
self.f_convolution_overrideable(
weight,
bias,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
)
.unwrap()
}
pub fn convolution_overrideable_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
) -> Tensor {
self.f_convolution_overrideable_out(
out,
weight,
bias,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
)
.unwrap()
}
pub fn copy_sparse_to_sparse(&self, src: &Tensor, non_blocking: bool) -> Tensor {
self.f_copy_sparse_to_sparse(src, non_blocking).unwrap()
}
pub fn copy_sparse_to_sparse_(&mut self, src: &Tensor, non_blocking: bool) -> Tensor {
self.f_copy_sparse_to_sparse_(src, non_blocking).unwrap()
}
pub fn copy_sparse_to_sparse_out(
&self,
out: &Tensor,
src: &Tensor,
non_blocking: bool,
) -> Tensor {
self.f_copy_sparse_to_sparse_out(out, src, non_blocking).unwrap()
}
pub fn copysign(&self, other: &Tensor) -> Tensor {
self.f_copysign(other).unwrap()
}
pub fn copysign_(&mut self, other: &Tensor) -> Tensor {
self.f_copysign_(other).unwrap()
}
pub fn copysign_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_copysign_out(out, other).unwrap()
}
pub fn copysign_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_copysign_scalar(other).unwrap()
}
pub fn copysign_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_copysign_scalar_(other).unwrap()
}
pub fn copysign_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_copysign_scalar_out(out, other).unwrap()
}
pub fn corrcoef(&self) -> Tensor {
self.f_corrcoef().unwrap()
}
pub fn cos(&self) -> Tensor {
self.f_cos().unwrap()
}
pub fn cos_(&mut self) -> Tensor {
self.f_cos_().unwrap()
}
pub fn cos_out(&self, out: &Tensor) -> Tensor {
self.f_cos_out(out).unwrap()
}
pub fn cosh(&self) -> Tensor {
self.f_cosh().unwrap()
}
pub fn cosh_(&mut self) -> Tensor {
self.f_cosh_().unwrap()
}
pub fn cosh_out(&self, out: &Tensor) -> Tensor {
self.f_cosh_out(out).unwrap()
}
pub fn cosine_embedding_loss(
input1: &Tensor,
input2: &Tensor,
target: &Tensor,
margin: f64,
reduction: crate::Reduction,
) -> Tensor {
Tensor::f_cosine_embedding_loss(input1, input2, target, margin, reduction).unwrap()
}
pub fn cosine_similarity(x1: &Tensor, x2: &Tensor, dim: i64, eps: f64) -> Tensor {
Tensor::f_cosine_similarity(x1, x2, dim, eps).unwrap()
}
pub fn count_nonzero(&self, dim: impl Into<Option<i64>>) -> Tensor {
self.f_count_nonzero(dim).unwrap()
}
pub fn count_nonzero_dim_intlist(&self, dim: impl IntList) -> Tensor {
self.f_count_nonzero_dim_intlist(dim).unwrap()
}
pub fn count_nonzero_dim_intlist_out(&self, out: &Tensor, dim: impl IntList) -> Tensor {
self.f_count_nonzero_dim_intlist_out(out, dim).unwrap()
}
pub fn count_nonzero_out(&self, out: &Tensor, dim: impl Into<Option<i64>>) -> Tensor {
self.f_count_nonzero_out(out, dim).unwrap()
}
pub fn cov<T: Borrow<Tensor>>(
&self,
correction: i64,
fweights: Option<T>,
aweights: Option<T>,
) -> Tensor {
self.f_cov(correction, fweights, aweights).unwrap()
}
pub fn cross(&self, other: &Tensor, dim: impl Into<Option<i64>>) -> Tensor {
self.f_cross(other, dim).unwrap()
}
pub fn cross_entropy_loss<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
label_smoothing: f64,
) -> Tensor {
self.f_cross_entropy_loss(target, weight, reduction, ignore_index, label_smoothing).unwrap()
}
pub fn cross_out(&self, out: &Tensor, other: &Tensor, dim: impl Into<Option<i64>>) -> Tensor {
self.f_cross_out(out, other, dim).unwrap()
}
pub fn crow_indices(&self) -> Tensor {
self.f_crow_indices().unwrap()
}
pub fn crow_indices_copy(&self) -> Tensor {
self.f_crow_indices_copy().unwrap()
}
pub fn crow_indices_copy_out(&self, out: &Tensor) -> Tensor {
self.f_crow_indices_copy_out(out).unwrap()
}
pub fn ctc_loss(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
blank: i64,
reduction: crate::Reduction,
zero_infinity: bool,
) -> Tensor {
Tensor::f_ctc_loss(
log_probs,
targets,
input_lengths,
target_lengths,
blank,
reduction,
zero_infinity,
)
.unwrap()
}
pub fn ctc_loss_tensor(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &Tensor,
target_lengths: &Tensor,
blank: i64,
reduction: crate::Reduction,
zero_infinity: bool,
) -> Tensor {
Tensor::f_ctc_loss_tensor(
log_probs,
targets,
input_lengths,
target_lengths,
blank,
reduction,
zero_infinity,
)
.unwrap()
}
pub fn cudnn_affine_grid_generator(theta: &Tensor, n: i64, c: i64, h: i64, w: i64) -> Tensor {
Tensor::f_cudnn_affine_grid_generator(theta, n, c, h, w).unwrap()
}
pub fn cudnn_affine_grid_generator_backward(
grad: &Tensor,
n: i64,
c: i64,
h: i64,
w: i64,
) -> Tensor {
Tensor::f_cudnn_affine_grid_generator_backward(grad, n, c, h, w).unwrap()
}
pub fn cudnn_affine_grid_generator_backward_out(
out: &Tensor,
grad: &Tensor,
n: i64,
c: i64,
h: i64,
w: i64,
) -> Tensor {
Tensor::f_cudnn_affine_grid_generator_backward_out(out, grad, n, c, h, w).unwrap()
}
pub fn cudnn_affine_grid_generator_out(
out: &Tensor,
theta: &Tensor,
n: i64,
c: i64,
h: i64,
w: i64,
) -> Tensor {
Tensor::f_cudnn_affine_grid_generator_out(out, theta, n, c, h, w).unwrap()
}
pub fn cudnn_batch_norm<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
exponential_average_factor: f64,
epsilon: f64,
) -> (Tensor, Tensor, Tensor, Tensor) {
self.f_cudnn_batch_norm(
weight,
bias,
running_mean,
running_var,
training,
exponential_average_factor,
epsilon,
)
.unwrap()
}
pub fn cudnn_batch_norm_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
weight: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
save_mean: Option<T>,
save_var: Option<T>,
epsilon: f64,
reservespace: &Tensor,
) -> (Tensor, Tensor, Tensor) {
self.f_cudnn_batch_norm_backward(
grad_output,
weight,
running_mean,
running_var,
save_mean,
save_var,
epsilon,
reservespace,
)
.unwrap()
}
pub fn cudnn_batch_norm_backward_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
grad_output: &Tensor,
weight: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
save_mean: Option<T>,
save_var: Option<T>,
epsilon: f64,
reservespace: &Tensor,
) -> (Tensor, Tensor, Tensor) {
self.f_cudnn_batch_norm_backward_out(
out0,
out1,
out2,
grad_output,
weight,
running_mean,
running_var,
save_mean,
save_var,
epsilon,
reservespace,
)
.unwrap()
}
pub fn cudnn_batch_norm_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
weight: &Tensor,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
exponential_average_factor: f64,
epsilon: f64,
) -> (Tensor, Tensor, Tensor, Tensor) {
self.f_cudnn_batch_norm_out(
out0,
out1,
out2,
out3,
weight,
bias,
running_mean,
running_var,
training,
exponential_average_factor,
epsilon,
)
.unwrap()
}
pub fn cudnn_convolution(
&self,
weight: &Tensor,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Tensor {
self.f_cudnn_convolution(
weight,
padding,
stride,
dilation,
groups,
benchmark,
deterministic,
allow_tf32,
)
.unwrap()
}
pub fn cudnn_convolution_add_relu<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
weight: &Tensor,
z: &Tensor,
alpha: S,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_cudnn_convolution_add_relu(weight, z, alpha, bias, stride, padding, dilation, groups)
.unwrap()
}
pub fn cudnn_convolution_add_relu_out<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
out: &Tensor,
weight: &Tensor,
z: &Tensor,
alpha: S,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_cudnn_convolution_add_relu_out(
out, weight, z, alpha, bias, stride, padding, dilation, groups,
)
.unwrap()
}
pub fn cudnn_convolution_out(
&self,
out: &Tensor,
weight: &Tensor,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Tensor {
self.f_cudnn_convolution_out(
out,
weight,
padding,
stride,
dilation,
groups,
benchmark,
deterministic,
allow_tf32,
)
.unwrap()
}
pub fn cudnn_convolution_relu<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_cudnn_convolution_relu(weight, bias, stride, padding, dilation, groups).unwrap()
}
pub fn cudnn_convolution_relu_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_cudnn_convolution_relu_out(out, weight, bias, stride, padding, dilation, groups)
.unwrap()
}
pub fn cudnn_convolution_transpose(
&self,
weight: &Tensor,
padding: impl IntList,
output_padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Tensor {
self.f_cudnn_convolution_transpose(
weight,
padding,
output_padding,
stride,
dilation,
groups,
benchmark,
deterministic,
allow_tf32,
)
.unwrap()
}
pub fn cudnn_convolution_transpose_out(
&self,
out: &Tensor,
weight: &Tensor,
padding: impl IntList,
output_padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Tensor {
self.f_cudnn_convolution_transpose_out(
out,
weight,
padding,
output_padding,
stride,
dilation,
groups,
benchmark,
deterministic,
allow_tf32,
)
.unwrap()
}
pub fn cudnn_grid_sampler(&self, grid: &Tensor) -> Tensor {
self.f_cudnn_grid_sampler(grid).unwrap()
}
pub fn cudnn_grid_sampler_backward(
&self,
grid: &Tensor,
grad_output: &Tensor,
) -> (Tensor, Tensor) {
self.f_cudnn_grid_sampler_backward(grid, grad_output).unwrap()
}
pub fn cudnn_grid_sampler_backward_out(
&self,
out0: &Tensor,
out1: &Tensor,
grid: &Tensor,
grad_output: &Tensor,
) -> (Tensor, Tensor) {
self.f_cudnn_grid_sampler_backward_out(out0, out1, grid, grad_output).unwrap()
}
pub fn cudnn_grid_sampler_out(&self, out: &Tensor, grid: &Tensor) -> Tensor {
self.f_cudnn_grid_sampler_out(out, grid).unwrap()
}
pub fn cudnn_is_acceptable(&self) -> bool {
self.f_cudnn_is_acceptable().unwrap()
}
pub fn cummax(&self, dim: i64) -> (Tensor, Tensor) {
self.f_cummax(dim).unwrap()
}
pub fn cummax_out(&self, values: &Tensor, indices: &Tensor, dim: i64) -> (Tensor, Tensor) {
self.f_cummax_out(values, indices, dim).unwrap()
}
pub fn cummaxmin_backward(&self, grad: &Tensor, indices: &Tensor, dim: i64) -> Tensor {
self.f_cummaxmin_backward(grad, indices, dim).unwrap()
}
pub fn cummin(&self, dim: i64) -> (Tensor, Tensor) {
self.f_cummin(dim).unwrap()
}
pub fn cummin_out(&self, values: &Tensor, indices: &Tensor, dim: i64) -> (Tensor, Tensor) {
self.f_cummin_out(values, indices, dim).unwrap()
}
pub fn cumprod(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_cumprod(dim, dtype).unwrap()
}
pub fn cumprod_(&mut self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_cumprod_(dim, dtype).unwrap()
}
pub fn cumprod_backward(&self, grad: &Tensor, dim: i64, output: &Tensor) -> Tensor {
self.f_cumprod_backward(grad, dim, output).unwrap()
}
pub fn cumprod_out(&self, out: &Tensor, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_cumprod_out(out, dim, dtype).unwrap()
}
pub fn cumsum(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_cumsum(dim, dtype).unwrap()
}
pub fn cumsum_(&mut self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_cumsum_(dim, dtype).unwrap()
}
pub fn cumsum_out(&self, out: &Tensor, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_cumsum_out(out, dim, dtype).unwrap()
}
pub fn cumulative_trapezoid(y: &Tensor, dim: i64) -> Tensor {
Tensor::f_cumulative_trapezoid(y, dim).unwrap()
}
pub fn cumulative_trapezoid_x(y: &Tensor, x: &Tensor, dim: i64) -> Tensor {
Tensor::f_cumulative_trapezoid_x(y, x, dim).unwrap()
}
pub fn data(&self) -> Tensor {
self.f_data().unwrap()
}
pub fn deg2rad(&self) -> Tensor {
self.f_deg2rad().unwrap()
}
pub fn deg2rad_(&mut self) -> Tensor {
self.f_deg2rad_().unwrap()
}
pub fn deg2rad_out(&self, out: &Tensor) -> Tensor {
self.f_deg2rad_out(out).unwrap()
}
pub fn dense_dim(&self) -> i64 {
self.f_dense_dim().unwrap()
}
pub fn dequantize(&self) -> Tensor {
self.f_dequantize().unwrap()
}
pub fn dequantize_self_out(&self, out: &Tensor) -> Tensor {
self.f_dequantize_self_out(out).unwrap()
}
pub fn dequantize_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
Tensor::f_dequantize_tensors(tensors).unwrap()
}
pub fn dequantize_tensors_out<T: Borrow<Tensor>>(out: &[T], tensors: &[T]) {
Tensor::f_dequantize_tensors_out(out, tensors).unwrap()
}
pub fn det(&self) -> Tensor {
self.f_det().unwrap()
}
pub fn detach(&self) -> Tensor {
self.f_detach().unwrap()
}
pub fn detach_(&mut self) -> Tensor {
self.f_detach_().unwrap()
}
pub fn detach_copy(&self) -> Tensor {
self.f_detach_copy().unwrap()
}
pub fn detach_copy_out(&self, out: &Tensor) -> Tensor {
self.f_detach_copy_out(out).unwrap()
}
pub fn diag(&self, diagonal: i64) -> Tensor {
self.f_diag(diagonal).unwrap()
}
pub fn diag_embed(&self, offset: i64, dim1: i64, dim2: i64) -> Tensor {
self.f_diag_embed(offset, dim1, dim2).unwrap()
}
pub fn diag_embed_out(&self, out: &Tensor, offset: i64, dim1: i64, dim2: i64) -> Tensor {
self.f_diag_embed_out(out, offset, dim1, dim2).unwrap()
}
pub fn diag_out(&self, out: &Tensor, diagonal: i64) -> Tensor {
self.f_diag_out(out, diagonal).unwrap()
}
pub fn diagflat(&self, offset: i64) -> Tensor {
self.f_diagflat(offset).unwrap()
}
pub fn diagonal(&self, offset: i64, dim1: i64, dim2: i64) -> Tensor {
self.f_diagonal(offset, dim1, dim2).unwrap()
}
pub fn diagonal_backward(
grad_output: &Tensor,
input_sizes: impl IntList,
offset: i64,
dim1: i64,
dim2: i64,
) -> Tensor {
Tensor::f_diagonal_backward(grad_output, input_sizes, offset, dim1, dim2).unwrap()
}
pub fn diagonal_backward_out(
out: &Tensor,
grad_output: &Tensor,
input_sizes: impl IntList,
offset: i64,
dim1: i64,
dim2: i64,
) -> Tensor {
Tensor::f_diagonal_backward_out(out, grad_output, input_sizes, offset, dim1, dim2).unwrap()
}
pub fn diagonal_copy(&self, offset: i64, dim1: i64, dim2: i64) -> Tensor {
self.f_diagonal_copy(offset, dim1, dim2).unwrap()
}
pub fn diagonal_copy_out(&self, out: &Tensor, offset: i64, dim1: i64, dim2: i64) -> Tensor {
self.f_diagonal_copy_out(out, offset, dim1, dim2).unwrap()
}
pub fn diagonal_scatter(&self, src: &Tensor, offset: i64, dim1: i64, dim2: i64) -> Tensor {
self.f_diagonal_scatter(src, offset, dim1, dim2).unwrap()
}
pub fn diagonal_scatter_out(
&self,
out: &Tensor,
src: &Tensor,
offset: i64,
dim1: i64,
dim2: i64,
) -> Tensor {
self.f_diagonal_scatter_out(out, src, offset, dim1, dim2).unwrap()
}
pub fn diff<T: Borrow<Tensor>>(
&self,
n: i64,
dim: i64,
prepend: Option<T>,
append: Option<T>,
) -> Tensor {
self.f_diff(n, dim, prepend, append).unwrap()
}
pub fn diff_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
n: i64,
dim: i64,
prepend: Option<T>,
append: Option<T>,
) -> Tensor {
self.f_diff_out(out, n, dim, prepend, append).unwrap()
}
pub fn digamma(&self) -> Tensor {
self.f_digamma().unwrap()
}
pub fn digamma_(&mut self) -> Tensor {
self.f_digamma_().unwrap()
}
pub fn digamma_out(&self, out: &Tensor) -> Tensor {
self.f_digamma_out(out).unwrap()
}
pub fn dist(&self, other: &Tensor) -> Tensor {
self.f_dist(other).unwrap()
}
pub fn dist_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_dist_out(out, other).unwrap()
}
pub fn g_div(&self, other: &Tensor) -> Tensor {
self.f_div(other).unwrap()
}
pub fn g_div_(&mut self, other: &Tensor) -> Tensor {
self.f_div_(other).unwrap()
}
pub fn div_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_div_out(out, other).unwrap()
}
pub fn div_out_mode(&self, out: &Tensor, other: &Tensor, rounding_mode: &str) -> Tensor {
self.f_div_out_mode(out, other, rounding_mode).unwrap()
}
pub fn g_div_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_div_scalar(other).unwrap()
}
pub fn g_div_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_div_scalar_(other).unwrap()
}
pub fn g_div_scalar_mode<S: Into<Scalar>>(&self, other: S, rounding_mode: &str) -> Tensor {
self.f_div_scalar_mode(other, rounding_mode).unwrap()
}
pub fn g_div_scalar_mode_<S: Into<Scalar>>(&mut self, other: S, rounding_mode: &str) -> Tensor {
self.f_div_scalar_mode_(other, rounding_mode).unwrap()
}
pub fn div_scalar_mode_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
rounding_mode: &str,
) -> Tensor {
self.f_div_scalar_mode_out(out, other, rounding_mode).unwrap()
}
pub fn div_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_div_scalar_out(out, other).unwrap()
}
pub fn g_div_tensor_mode(&self, other: &Tensor, rounding_mode: &str) -> Tensor {
self.f_div_tensor_mode(other, rounding_mode).unwrap()
}
pub fn g_div_tensor_mode_(&mut self, other: &Tensor, rounding_mode: &str) -> Tensor {
self.f_div_tensor_mode_(other, rounding_mode).unwrap()
}
pub fn divide(&self, other: &Tensor) -> Tensor {
self.f_divide(other).unwrap()
}
pub fn divide_(&mut self, other: &Tensor) -> Tensor {
self.f_divide_(other).unwrap()
}
pub fn divide_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_divide_out(out, other).unwrap()
}
pub fn divide_out_mode(&self, out: &Tensor, other: &Tensor, rounding_mode: &str) -> Tensor {
self.f_divide_out_mode(out, other, rounding_mode).unwrap()
}
pub fn divide_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_divide_scalar(other).unwrap()
}
pub fn divide_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_divide_scalar_(other).unwrap()
}
pub fn divide_scalar_mode<S: Into<Scalar>>(&self, other: S, rounding_mode: &str) -> Tensor {
self.f_divide_scalar_mode(other, rounding_mode).unwrap()
}
pub fn divide_scalar_mode_<S: Into<Scalar>>(
&mut self,
other: S,
rounding_mode: &str,
) -> Tensor {
self.f_divide_scalar_mode_(other, rounding_mode).unwrap()
}
pub fn divide_tensor_mode(&self, other: &Tensor, rounding_mode: &str) -> Tensor {
self.f_divide_tensor_mode(other, rounding_mode).unwrap()
}
pub fn divide_tensor_mode_(&mut self, other: &Tensor, rounding_mode: &str) -> Tensor {
self.f_divide_tensor_mode_(other, rounding_mode).unwrap()
}
pub fn dot(&self, tensor: &Tensor) -> Tensor {
self.f_dot(tensor).unwrap()
}
pub fn dot_out(&self, out: &Tensor, tensor: &Tensor) -> Tensor {
self.f_dot_out(out, tensor).unwrap()
}
pub fn dropout(&self, p: f64, train: bool) -> Tensor {
self.f_dropout(p, train).unwrap()
}
pub fn dropout_(&mut self, p: f64, train: bool) -> Tensor {
self.f_dropout_(p, train).unwrap()
}
pub fn dsplit(&self, sections: i64) -> Vec<Tensor> {
self.f_dsplit(sections).unwrap()
}
pub fn dsplit_array(&self, indices: impl IntList) -> Vec<Tensor> {
self.f_dsplit_array(indices).unwrap()
}
pub fn dstack<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
Tensor::f_dstack(tensors).unwrap()
}
pub fn dstack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
Tensor::f_dstack_out(out, tensors).unwrap()
}
pub fn einsum<T: Borrow<Tensor>>(
equation: &str,
tensors: &[T],
path: impl IntListOption,
) -> Tensor {
Tensor::f_einsum(equation, tensors, path).unwrap()
}
pub fn elu(&self) -> Tensor {
self.f_elu().unwrap()
}
pub fn elu_(&mut self) -> Tensor {
self.f_elu_().unwrap()
}
pub fn elu_backward<S: Into<Scalar>>(
grad_output: &Tensor,
alpha: S,
scale: S,
input_scale: S,
is_result: bool,
self_or_result: &Tensor,
) -> Tensor {
Tensor::f_elu_backward(grad_output, alpha, scale, input_scale, is_result, self_or_result)
.unwrap()
}
pub fn elu_backward_grad_input<S: Into<Scalar>>(
grad_input: &Tensor,
grad_output: &Tensor,
alpha: S,
scale: S,
input_scale: S,
is_result: bool,
self_or_result: &Tensor,
) -> Tensor {
Tensor::f_elu_backward_grad_input(
grad_input,
grad_output,
alpha,
scale,
input_scale,
is_result,
self_or_result,
)
.unwrap()
}
pub fn elu_out(&self, out: &Tensor) -> Tensor {
self.f_elu_out(out).unwrap()
}
pub fn embedding(
weight: &Tensor,
indices: &Tensor,
padding_idx: i64,
scale_grad_by_freq: bool,
sparse: bool,
) -> Tensor {
Tensor::f_embedding(weight, indices, padding_idx, scale_grad_by_freq, sparse).unwrap()
}
pub fn embedding_backward(
grad: &Tensor,
indices: &Tensor,
num_weights: i64,
padding_idx: i64,
scale_grad_by_freq: bool,
sparse: bool,
) -> Tensor {
Tensor::f_embedding_backward(
grad,
indices,
num_weights,
padding_idx,
scale_grad_by_freq,
sparse,
)
.unwrap()
}
pub fn embedding_bag<T: Borrow<Tensor>>(
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
) -> (Tensor, Tensor, Tensor, Tensor) {
Tensor::f_embedding_bag(
weight,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
)
.unwrap()
}
pub fn embedding_bag_padding_idx<T: Borrow<Tensor>>(
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
padding_idx: impl Into<Option<i64>>,
) -> (Tensor, Tensor, Tensor, Tensor) {
Tensor::f_embedding_bag_padding_idx(
weight,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx,
)
.unwrap()
}
pub fn embedding_dense_backward(
grad_output: &Tensor,
indices: &Tensor,
num_weights: i64,
padding_idx: i64,
scale_grad_by_freq: bool,
) -> Tensor {
Tensor::f_embedding_dense_backward(
grad_output,
indices,
num_weights,
padding_idx,
scale_grad_by_freq,
)
.unwrap()
}
pub fn embedding_dense_backward_out(
out: &Tensor,
grad_output: &Tensor,
indices: &Tensor,
num_weights: i64,
padding_idx: i64,
scale_grad_by_freq: bool,
) -> Tensor {
Tensor::f_embedding_dense_backward_out(
out,
grad_output,
indices,
num_weights,
padding_idx,
scale_grad_by_freq,
)
.unwrap()
}
pub fn embedding_out(
out: &Tensor,
weight: &Tensor,
indices: &Tensor,
padding_idx: i64,
scale_grad_by_freq: bool,
sparse: bool,
) -> Tensor {
Tensor::f_embedding_out(out, weight, indices, padding_idx, scale_grad_by_freq, sparse)
.unwrap()
}
pub fn embedding_renorm(&self, indices: &Tensor, max_norm: f64, norm_type: f64) -> Tensor {
self.f_embedding_renorm(indices, max_norm, norm_type).unwrap()
}
pub fn embedding_renorm_(&mut self, indices: &Tensor, max_norm: f64, norm_type: f64) -> Tensor {
self.f_embedding_renorm_(indices, max_norm, norm_type).unwrap()
}
pub fn embedding_renorm_out(
&self,
out: &Tensor,
indices: &Tensor,
max_norm: f64,
norm_type: f64,
) -> Tensor {
self.f_embedding_renorm_out(out, indices, max_norm, norm_type).unwrap()
}
pub fn embedding_sparse_backward(
grad: &Tensor,
indices: &Tensor,
num_weights: i64,
padding_idx: i64,
scale_grad_by_freq: bool,
) -> Tensor {
Tensor::f_embedding_sparse_backward(
grad,
indices,
num_weights,
padding_idx,
scale_grad_by_freq,
)
.unwrap()
}
pub fn empty(size: impl IntList, options: (Kind, Device)) -> Tensor {
Tensor::f_empty(size, options).unwrap()
}
pub fn empty_like(&self) -> Tensor {
self.f_empty_like().unwrap()
}
pub fn empty_like_out(&self, out: &Tensor) -> Tensor {
self.f_empty_like_out(out).unwrap()
}
pub fn empty_out(out: &Tensor, size: impl IntList) -> Tensor {
Tensor::f_empty_out(out, size).unwrap()
}
pub fn empty_permuted(
size: impl IntList,
physical_layout: impl IntList,
options: (Kind, Device),
) -> Tensor {
Tensor::f_empty_permuted(size, physical_layout, options).unwrap()
}
pub fn empty_permuted_out(
out: &Tensor,
size: impl IntList,
physical_layout: impl IntList,
) -> Tensor {
Tensor::f_empty_permuted_out(out, size, physical_layout).unwrap()
}
pub fn empty_quantized(
size: impl IntList,
qtensor: &Tensor,
options: (Kind, Device),
) -> Tensor {
Tensor::f_empty_quantized(size, qtensor, options).unwrap()
}
pub fn empty_quantized_out(out: &Tensor, size: impl IntList, qtensor: &Tensor) -> Tensor {
Tensor::f_empty_quantized_out(out, size, qtensor).unwrap()
}
pub fn empty_strided(
size: impl IntList,
stride: impl IntList,
options: (Kind, Device),
) -> Tensor {
Tensor::f_empty_strided(size, stride, options).unwrap()
}
pub fn empty_strided_out(out: &Tensor, size: impl IntList, stride: impl IntList) -> Tensor {
Tensor::f_empty_strided_out(out, size, stride).unwrap()
}
pub fn eq<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_eq(other).unwrap()
}
pub fn eq_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_eq_(other).unwrap()
}
pub fn eq_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_eq_scalar_out(out, other).unwrap()
}
pub fn eq_tensor(&self, other: &Tensor) -> Tensor {
self.f_eq_tensor(other).unwrap()
}
pub fn eq_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_eq_tensor_(other).unwrap()
}
pub fn eq_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_eq_tensor_out(out, other).unwrap()
}
pub fn equal(&self, other: &Tensor) -> bool {
self.f_equal(other).unwrap()
}
pub fn erf(&self) -> Tensor {
self.f_erf().unwrap()
}
pub fn erf_(&mut self) -> Tensor {
self.f_erf_().unwrap()
}
pub fn erf_out(&self, out: &Tensor) -> Tensor {
self.f_erf_out(out).unwrap()
}
pub fn erfc(&self) -> Tensor {
self.f_erfc().unwrap()
}
pub fn erfc_(&mut self) -> Tensor {
self.f_erfc_().unwrap()
}
pub fn erfc_out(&self, out: &Tensor) -> Tensor {
self.f_erfc_out(out).unwrap()
}
pub fn erfinv(&self) -> Tensor {
self.f_erfinv().unwrap()
}
pub fn erfinv_(&mut self) -> Tensor {
self.f_erfinv_().unwrap()
}
pub fn erfinv_out(&self, out: &Tensor) -> Tensor {
self.f_erfinv_out(out).unwrap()
}
pub fn exp(&self) -> Tensor {
self.f_exp().unwrap()
}
pub fn exp2(&self) -> Tensor {
self.f_exp2().unwrap()
}
pub fn exp2_(&mut self) -> Tensor {
self.f_exp2_().unwrap()
}
pub fn exp2_out(&self, out: &Tensor) -> Tensor {
self.f_exp2_out(out).unwrap()
}
pub fn exp_(&mut self) -> Tensor {
self.f_exp_().unwrap()
}
pub fn exp_out(&self, out: &Tensor) -> Tensor {
self.f_exp_out(out).unwrap()
}
pub fn expand(&self, size: impl IntList, implicit: bool) -> Tensor {
self.f_expand(size, implicit).unwrap()
}
pub fn expand_as(&self, other: &Tensor) -> Tensor {
self.f_expand_as(other).unwrap()
}
pub fn expand_copy(&self, size: impl IntList, implicit: bool) -> Tensor {
self.f_expand_copy(size, implicit).unwrap()
}
pub fn expand_copy_out(&self, out: &Tensor, size: impl IntList, implicit: bool) -> Tensor {
self.f_expand_copy_out(out, size, implicit).unwrap()
}
pub fn expm1(&self) -> Tensor {
self.f_expm1().unwrap()
}
pub fn expm1_(&mut self) -> Tensor {
self.f_expm1_().unwrap()
}
pub fn expm1_out(&self, out: &Tensor) -> Tensor {
self.f_expm1_out(out).unwrap()
}
pub fn exponential(&self, lambd: f64) -> Tensor {
self.f_exponential(lambd).unwrap()
}
pub fn exponential_(&mut self, lambd: f64) -> Tensor {
self.f_exponential_(lambd).unwrap()
}
pub fn exponential_out(&self, out: &Tensor, lambd: f64) -> Tensor {
self.f_exponential_out(out, lambd).unwrap()
}
pub fn eye(n: i64, options: (Kind, Device)) -> Tensor {
Tensor::f_eye(n, options).unwrap()
}
pub fn eye_m(n: i64, m: i64, options: (Kind, Device)) -> Tensor {
Tensor::f_eye_m(n, m, options).unwrap()
}
pub fn eye_m_out(out: &Tensor, n: i64, m: i64) -> Tensor {
Tensor::f_eye_m_out(out, n, m).unwrap()
}
pub fn eye_out(out: &Tensor, n: i64) -> Tensor {
Tensor::f_eye_out(out, n).unwrap()
}
pub fn fake_quantize_per_channel_affine(
&self,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
) -> Tensor {
self.f_fake_quantize_per_channel_affine(scale, zero_point, axis, quant_min, quant_max)
.unwrap()
}
pub fn fake_quantize_per_channel_affine_cachemask(
&self,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
) -> (Tensor, Tensor) {
self.f_fake_quantize_per_channel_affine_cachemask(
scale, zero_point, axis, quant_min, quant_max,
)
.unwrap()
}
pub fn fake_quantize_per_channel_affine_cachemask_backward(
grad: &Tensor,
mask: &Tensor,
) -> Tensor {
Tensor::f_fake_quantize_per_channel_affine_cachemask_backward(grad, mask).unwrap()
}
pub fn fake_quantize_per_channel_affine_cachemask_out(
&self,
out0: &Tensor,
out1: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
) -> (Tensor, Tensor) {
self.f_fake_quantize_per_channel_affine_cachemask_out(
out0, out1, scale, zero_point, axis, quant_min, quant_max,
)
.unwrap()
}
pub fn fake_quantize_per_tensor_affine(
&self,
scale: f64,
zero_point: i64,
quant_min: i64,
quant_max: i64,
) -> Tensor {
self.f_fake_quantize_per_tensor_affine(scale, zero_point, quant_min, quant_max).unwrap()
}
pub fn fake_quantize_per_tensor_affine_cachemask(
&self,
scale: f64,
zero_point: i64,
quant_min: i64,
quant_max: i64,
) -> (Tensor, Tensor) {
self.f_fake_quantize_per_tensor_affine_cachemask(scale, zero_point, quant_min, quant_max)
.unwrap()
}
pub fn fake_quantize_per_tensor_affine_cachemask_backward(
grad: &Tensor,
mask: &Tensor,
) -> Tensor {
Tensor::f_fake_quantize_per_tensor_affine_cachemask_backward(grad, mask).unwrap()
}
pub fn fake_quantize_per_tensor_affine_cachemask_out(
&self,
out0: &Tensor,
out1: &Tensor,
scale: f64,
zero_point: i64,
quant_min: i64,
quant_max: i64,
) -> (Tensor, Tensor) {
self.f_fake_quantize_per_tensor_affine_cachemask_out(
out0, out1, scale, zero_point, quant_min, quant_max,
)
.unwrap()
}
pub fn fake_quantize_per_tensor_affine_tensor_qparams(
&self,
scale: &Tensor,
zero_point: &Tensor,
quant_min: i64,
quant_max: i64,
) -> Tensor {
self.f_fake_quantize_per_tensor_affine_tensor_qparams(
scale, zero_point, quant_min, quant_max,
)
.unwrap()
}
pub fn fbgemm_linear_fp16_weight(&self, packed_weight: &Tensor, bias: &Tensor) -> Tensor {
self.f_fbgemm_linear_fp16_weight(packed_weight, bias).unwrap()
}
pub fn fbgemm_linear_fp16_weight_fp32_activation(
&self,
packed_weight: &Tensor,
bias: &Tensor,
) -> Tensor {
self.f_fbgemm_linear_fp16_weight_fp32_activation(packed_weight, bias).unwrap()
}
pub fn fbgemm_linear_int8_weight<S: Into<Scalar>>(
&self,
weight: &Tensor,
packed: &Tensor,
col_offsets: &Tensor,
weight_scale: S,
weight_zero_point: S,
bias: &Tensor,
) -> Tensor {
self.f_fbgemm_linear_int8_weight(
weight,
packed,
col_offsets,
weight_scale,
weight_zero_point,
bias,
)
.unwrap()
}
pub fn fbgemm_linear_int8_weight_fp32_activation<S: Into<Scalar>>(
&self,
weight: &Tensor,
packed: &Tensor,
col_offsets: &Tensor,
weight_scale: S,
weight_zero_point: S,
bias: &Tensor,
) -> Tensor {
self.f_fbgemm_linear_int8_weight_fp32_activation(
weight,
packed,
col_offsets,
weight_scale,
weight_zero_point,
bias,
)
.unwrap()
}
pub fn fbgemm_pack_gemm_matrix_fp16(&self) -> Tensor {
self.f_fbgemm_pack_gemm_matrix_fp16().unwrap()
}
pub fn fbgemm_pack_quantized_matrix(&self) -> Tensor {
self.f_fbgemm_pack_quantized_matrix().unwrap()
}
pub fn fbgemm_pack_quantized_matrix_kn(&self, k: i64, n: i64) -> Tensor {
self.f_fbgemm_pack_quantized_matrix_kn(k, n).unwrap()
}
pub fn feature_alpha_dropout(&self, p: f64, train: bool) -> Tensor {
self.f_feature_alpha_dropout(p, train).unwrap()
}
pub fn feature_alpha_dropout_(&mut self, p: f64, train: bool) -> Tensor {
self.f_feature_alpha_dropout_(p, train).unwrap()
}
pub fn feature_dropout(&self, p: f64, train: bool) -> Tensor {
self.f_feature_dropout(p, train).unwrap()
}
pub fn feature_dropout_(&mut self, p: f64, train: bool) -> Tensor {
self.f_feature_dropout_(p, train).unwrap()
}
pub fn fft_fft(&self, n: impl Into<Option<i64>>, dim: i64, norm: &str) -> Tensor {
self.f_fft_fft(n, dim, norm).unwrap()
}
pub fn fft_fft2(&self, s: impl IntListOption, dim: impl IntList, norm: &str) -> Tensor {
self.f_fft_fft2(s, dim, norm).unwrap()
}
pub fn fft_fft2_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Tensor {
self.f_fft_fft2_out(out, s, dim, norm).unwrap()
}
pub fn fft_fft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Tensor {
self.f_fft_fft_out(out, n, dim, norm).unwrap()
}
pub fn fft_fftfreq(n: i64, d: f64, options: (Kind, Device)) -> Tensor {
Tensor::f_fft_fftfreq(n, d, options).unwrap()
}
pub fn fft_fftfreq_out(out: &Tensor, n: i64, d: f64) -> Tensor {
Tensor::f_fft_fftfreq_out(out, n, d).unwrap()
}
pub fn fft_fftn(&self, s: impl IntListOption, dim: impl IntListOption, norm: &str) -> Tensor {
self.f_fft_fftn(s, dim, norm).unwrap()
}
pub fn fft_fftn_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Tensor {
self.f_fft_fftn_out(out, s, dim, norm).unwrap()
}
pub fn fft_fftshift(&self, dim: impl IntListOption) -> Tensor {
self.f_fft_fftshift(dim).unwrap()
}
pub fn fft_hfft(&self, n: impl Into<Option<i64>>, dim: i64, norm: &str) -> Tensor {
self.f_fft_hfft(n, dim, norm).unwrap()
}
pub fn fft_hfft2(&self, s: impl IntListOption, dim: impl IntList, norm: &str) -> Tensor {
self.f_fft_hfft2(s, dim, norm).unwrap()
}
pub fn fft_hfft2_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Tensor {
self.f_fft_hfft2_out(out, s, dim, norm).unwrap()
}
pub fn fft_hfft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Tensor {
self.f_fft_hfft_out(out, n, dim, norm).unwrap()
}
pub fn fft_hfftn(&self, s: impl IntListOption, dim: impl IntListOption, norm: &str) -> Tensor {
self.f_fft_hfftn(s, dim, norm).unwrap()
}
pub fn fft_hfftn_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Tensor {
self.f_fft_hfftn_out(out, s, dim, norm).unwrap()
}
pub fn fft_ifft(&self, n: impl Into<Option<i64>>, dim: i64, norm: &str) -> Tensor {
self.f_fft_ifft(n, dim, norm).unwrap()
}
pub fn fft_ifft2(&self, s: impl IntListOption, dim: impl IntList, norm: &str) -> Tensor {
self.f_fft_ifft2(s, dim, norm).unwrap()
}
pub fn fft_ifft2_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Tensor {
self.f_fft_ifft2_out(out, s, dim, norm).unwrap()
}
pub fn fft_ifft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Tensor {
self.f_fft_ifft_out(out, n, dim, norm).unwrap()
}
pub fn fft_ifftn(&self, s: impl IntListOption, dim: impl IntListOption, norm: &str) -> Tensor {
self.f_fft_ifftn(s, dim, norm).unwrap()
}
pub fn fft_ifftn_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Tensor {
self.f_fft_ifftn_out(out, s, dim, norm).unwrap()
}
pub fn fft_ifftshift(&self, dim: impl IntListOption) -> Tensor {
self.f_fft_ifftshift(dim).unwrap()
}
pub fn fft_ihfft(&self, n: impl Into<Option<i64>>, dim: i64, norm: &str) -> Tensor {
self.f_fft_ihfft(n, dim, norm).unwrap()
}
pub fn fft_ihfft2(&self, s: impl IntListOption, dim: impl IntList, norm: &str) -> Tensor {
self.f_fft_ihfft2(s, dim, norm).unwrap()
}
pub fn fft_ihfft2_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Tensor {
self.f_fft_ihfft2_out(out, s, dim, norm).unwrap()
}
pub fn fft_ihfft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Tensor {
self.f_fft_ihfft_out(out, n, dim, norm).unwrap()
}
pub fn fft_ihfftn(&self, s: impl IntListOption, dim: impl IntListOption, norm: &str) -> Tensor {
self.f_fft_ihfftn(s, dim, norm).unwrap()
}
pub fn fft_ihfftn_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Tensor {
self.f_fft_ihfftn_out(out, s, dim, norm).unwrap()
}
pub fn fft_irfft(&self, n: impl Into<Option<i64>>, dim: i64, norm: &str) -> Tensor {
self.f_fft_irfft(n, dim, norm).unwrap()
}
pub fn fft_irfft2(&self, s: impl IntListOption, dim: impl IntList, norm: &str) -> Tensor {
self.f_fft_irfft2(s, dim, norm).unwrap()
}
pub fn fft_irfft2_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Tensor {
self.f_fft_irfft2_out(out, s, dim, norm).unwrap()
}
pub fn fft_irfft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Tensor {
self.f_fft_irfft_out(out, n, dim, norm).unwrap()
}
pub fn fft_irfftn(&self, s: impl IntListOption, dim: impl IntListOption, norm: &str) -> Tensor {
self.f_fft_irfftn(s, dim, norm).unwrap()
}
pub fn fft_irfftn_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Tensor {
self.f_fft_irfftn_out(out, s, dim, norm).unwrap()
}
pub fn fft_rfft(&self, n: impl Into<Option<i64>>, dim: i64, norm: &str) -> Tensor {
self.f_fft_rfft(n, dim, norm).unwrap()
}
pub fn fft_rfft2(&self, s: impl IntListOption, dim: impl IntList, norm: &str) -> Tensor {
self.f_fft_rfft2(s, dim, norm).unwrap()
}
pub fn fft_rfft2_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Tensor {
self.f_fft_rfft2_out(out, s, dim, norm).unwrap()
}
pub fn fft_rfft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Tensor {
self.f_fft_rfft_out(out, n, dim, norm).unwrap()
}
pub fn fft_rfftfreq(n: i64, d: f64, options: (Kind, Device)) -> Tensor {
Tensor::f_fft_rfftfreq(n, d, options).unwrap()
}
pub fn fft_rfftfreq_out(out: &Tensor, n: i64, d: f64) -> Tensor {
Tensor::f_fft_rfftfreq_out(out, n, d).unwrap()
}
pub fn fft_rfftn(&self, s: impl IntListOption, dim: impl IntListOption, norm: &str) -> Tensor {
self.f_fft_rfftn(s, dim, norm).unwrap()
}
pub fn fft_rfftn_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Tensor {
self.f_fft_rfftn_out(out, s, dim, norm).unwrap()
}
pub fn fill<S: Into<Scalar>>(&self, value: S) -> Tensor {
self.f_fill(value).unwrap()
}
pub fn fill_<S: Into<Scalar>>(&mut self, value: S) -> Tensor {
self.f_fill_(value).unwrap()
}
pub fn fill_diagonal_<S: Into<Scalar>>(&mut self, fill_value: S, wrap: bool) -> Tensor {
self.f_fill_diagonal_(fill_value, wrap).unwrap()
}
pub fn fill_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, value: S) -> Tensor {
self.f_fill_scalar_out(out, value).unwrap()
}
pub fn fill_tensor(&self, value: &Tensor) -> Tensor {
self.f_fill_tensor(value).unwrap()
}
pub fn fill_tensor_(&mut self, value: &Tensor) -> Tensor {
self.f_fill_tensor_(value).unwrap()
}
pub fn fill_tensor_out(&self, out: &Tensor, value: &Tensor) -> Tensor {
self.f_fill_tensor_out(out, value).unwrap()
}
pub fn fix(&self) -> Tensor {
self.f_fix().unwrap()
}
pub fn fix_(&mut self) -> Tensor {
self.f_fix_().unwrap()
}
pub fn fix_out(&self, out: &Tensor) -> Tensor {
self.f_fix_out(out).unwrap()
}
pub fn flatten(&self, start_dim: i64, end_dim: i64) -> Tensor {
self.f_flatten(start_dim, end_dim).unwrap()
}
pub fn flatten_dense_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
Tensor::f_flatten_dense_tensors(tensors).unwrap()
}
pub fn flip(&self, dims: impl IntList) -> Tensor {
self.f_flip(dims).unwrap()
}
pub fn flip_out(&self, out: &Tensor, dims: impl IntList) -> Tensor {
self.f_flip_out(out, dims).unwrap()
}
pub fn fliplr(&self) -> Tensor {
self.f_fliplr().unwrap()
}
pub fn flipud(&self) -> Tensor {
self.f_flipud().unwrap()
}
pub fn float_power(&self, exponent: &Tensor) -> Tensor {
self.f_float_power(exponent).unwrap()
}
pub fn float_power_<S: Into<Scalar>>(&mut self, exponent: S) -> Tensor {
self.f_float_power_(exponent).unwrap()
}
pub fn float_power_scalar<S: Into<Scalar>>(self_scalar: S, exponent: &Tensor) -> Tensor {
Tensor::f_float_power_scalar(self_scalar, exponent).unwrap()
}
pub fn float_power_scalar_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
exponent: &Tensor,
) -> Tensor {
Tensor::f_float_power_scalar_out(out, self_scalar, exponent).unwrap()
}
pub fn float_power_tensor_(&mut self, exponent: &Tensor) -> Tensor {
self.f_float_power_tensor_(exponent).unwrap()
}
pub fn float_power_tensor_scalar<S: Into<Scalar>>(&self, exponent: S) -> Tensor {
self.f_float_power_tensor_scalar(exponent).unwrap()
}
pub fn float_power_tensor_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
exponent: S,
) -> Tensor {
self.f_float_power_tensor_scalar_out(out, exponent).unwrap()
}
pub fn float_power_tensor_tensor_out(&self, out: &Tensor, exponent: &Tensor) -> Tensor {
self.f_float_power_tensor_tensor_out(out, exponent).unwrap()
}
pub fn floor(&self) -> Tensor {
self.f_floor().unwrap()
}
pub fn floor_(&mut self) -> Tensor {
self.f_floor_().unwrap()
}
pub fn floor_divide(&self, other: &Tensor) -> Tensor {
self.f_floor_divide(other).unwrap()
}
pub fn floor_divide_(&mut self, other: &Tensor) -> Tensor {
self.f_floor_divide_(other).unwrap()
}
pub fn floor_divide_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_floor_divide_out(out, other).unwrap()
}
pub fn floor_divide_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_floor_divide_scalar(other).unwrap()
}
pub fn floor_divide_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_floor_divide_scalar_(other).unwrap()
}
pub fn floor_out(&self, out: &Tensor) -> Tensor {
self.f_floor_out(out).unwrap()
}
pub fn fmax(&self, other: &Tensor) -> Tensor {
self.f_fmax(other).unwrap()
}
pub fn fmax_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_fmax_out(out, other).unwrap()
}
pub fn fmin(&self, other: &Tensor) -> Tensor {
self.f_fmin(other).unwrap()
}
pub fn fmin_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_fmin_out(out, other).unwrap()
}
pub fn fmod<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_fmod(other).unwrap()
}
pub fn fmod_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_fmod_(other).unwrap()
}
pub fn fmod_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_fmod_scalar_out(out, other).unwrap()
}
pub fn fmod_tensor(&self, other: &Tensor) -> Tensor {
self.f_fmod_tensor(other).unwrap()
}
pub fn fmod_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_fmod_tensor_(other).unwrap()
}
pub fn fmod_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_fmod_tensor_out(out, other).unwrap()
}
pub fn frac(&self) -> Tensor {
self.f_frac().unwrap()
}
pub fn frac_(&mut self) -> Tensor {
self.f_frac_().unwrap()
}
pub fn frac_out(&self, out: &Tensor) -> Tensor {
self.f_frac_out(out).unwrap()
}
pub fn fractional_max_pool2d(
&self,
kernel_size: impl IntList,
output_size: impl IntList,
random_samples: &Tensor,
) -> (Tensor, Tensor) {
self.f_fractional_max_pool2d(kernel_size, output_size, random_samples).unwrap()
}
pub fn fractional_max_pool2d_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
output_size: impl IntList,
indices: &Tensor,
) -> Tensor {
self.f_fractional_max_pool2d_backward(grad_output, kernel_size, output_size, indices)
.unwrap()
}
pub fn fractional_max_pool2d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
output_size: impl IntList,
indices: &Tensor,
) -> Tensor {
self.f_fractional_max_pool2d_backward_grad_input(
grad_input,
grad_output,
kernel_size,
output_size,
indices,
)
.unwrap()
}
pub fn fractional_max_pool2d_output(
&self,
output: &Tensor,
indices: &Tensor,
kernel_size: impl IntList,
output_size: impl IntList,
random_samples: &Tensor,
) -> (Tensor, Tensor) {
self.f_fractional_max_pool2d_output(
output,
indices,
kernel_size,
output_size,
random_samples,
)
.unwrap()
}
pub fn fractional_max_pool3d(
&self,
kernel_size: impl IntList,
output_size: impl IntList,
random_samples: &Tensor,
) -> (Tensor, Tensor) {
self.f_fractional_max_pool3d(kernel_size, output_size, random_samples).unwrap()
}
pub fn fractional_max_pool3d_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
output_size: impl IntList,
indices: &Tensor,
) -> Tensor {
self.f_fractional_max_pool3d_backward(grad_output, kernel_size, output_size, indices)
.unwrap()
}
pub fn fractional_max_pool3d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
output_size: impl IntList,
indices: &Tensor,
) -> Tensor {
self.f_fractional_max_pool3d_backward_grad_input(
grad_input,
grad_output,
kernel_size,
output_size,
indices,
)
.unwrap()
}
pub fn fractional_max_pool3d_output(
&self,
output: &Tensor,
indices: &Tensor,
kernel_size: impl IntList,
output_size: impl IntList,
random_samples: &Tensor,
) -> (Tensor, Tensor) {
self.f_fractional_max_pool3d_output(
output,
indices,
kernel_size,
output_size,
random_samples,
)
.unwrap()
}
pub fn frexp(&self) -> (Tensor, Tensor) {
self.f_frexp().unwrap()
}
pub fn frexp_tensor_out(&self, mantissa: &Tensor, exponent: &Tensor) -> (Tensor, Tensor) {
self.f_frexp_tensor_out(mantissa, exponent).unwrap()
}
pub fn frobenius_norm(&self, dim: impl IntList, keepdim: bool) -> Tensor {
self.f_frobenius_norm(dim, keepdim).unwrap()
}
pub fn frobenius_norm_out(&self, out: &Tensor, dim: impl IntList, keepdim: bool) -> Tensor {
self.f_frobenius_norm_out(out, dim, keepdim).unwrap()
}
pub fn from_file(
filename: &str,
shared: bool,
size: impl Into<Option<i64>>,
options: (Kind, Device),
) -> Tensor {
Tensor::f_from_file(filename, shared, size, options).unwrap()
}
pub fn from_file_out(
out: &Tensor,
filename: &str,
shared: bool,
size: impl Into<Option<i64>>,
) -> Tensor {
Tensor::f_from_file_out(out, filename, shared, size).unwrap()
}
pub fn full<S: Into<Scalar>>(
size: impl IntList,
fill_value: S,
options: (Kind, Device),
) -> Tensor {
Tensor::f_full(size, fill_value, options).unwrap()
}
pub fn full_like<S: Into<Scalar>>(&self, fill_value: S) -> Tensor {
self.f_full_like(fill_value).unwrap()
}
pub fn full_like_out<S: Into<Scalar>>(&self, out: &Tensor, fill_value: S) -> Tensor {
self.f_full_like_out(out, fill_value).unwrap()
}
pub fn full_out<S: Into<Scalar>>(out: &Tensor, size: impl IntList, fill_value: S) -> Tensor {
Tensor::f_full_out(out, size, fill_value).unwrap()
}
pub fn fused_moving_avg_obs_fake_quant(
&self,
observer_on: &Tensor,
fake_quant_on: &Tensor,
running_min: &Tensor,
running_max: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
averaging_const: f64,
quant_min: i64,
quant_max: i64,
ch_axis: i64,
per_row_fake_quant: bool,
symmetric_quant: bool,
) -> Tensor {
self.f_fused_moving_avg_obs_fake_quant(
observer_on,
fake_quant_on,
running_min,
running_max,
scale,
zero_point,
averaging_const,
quant_min,
quant_max,
ch_axis,
per_row_fake_quant,
symmetric_quant,
)
.unwrap()
}
pub fn gather(&self, dim: i64, index: &Tensor, sparse_grad: bool) -> Tensor {
self.f_gather(dim, index, sparse_grad).unwrap()
}
pub fn gather_backward(
&self,
grad: &Tensor,
dim: i64,
index: &Tensor,
sparse_grad: bool,
) -> Tensor {
self.f_gather_backward(grad, dim, index, sparse_grad).unwrap()
}
pub fn gather_out(&self, out: &Tensor, dim: i64, index: &Tensor, sparse_grad: bool) -> Tensor {
self.f_gather_out(out, dim, index, sparse_grad).unwrap()
}
pub fn gcd(&self, other: &Tensor) -> Tensor {
self.f_gcd(other).unwrap()
}
pub fn gcd_(&mut self, other: &Tensor) -> Tensor {
self.f_gcd_(other).unwrap()
}
pub fn gcd_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_gcd_out(out, other).unwrap()
}
pub fn ge<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_ge(other).unwrap()
}
pub fn ge_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_ge_(other).unwrap()
}
pub fn ge_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_ge_scalar_out(out, other).unwrap()
}
pub fn ge_tensor(&self, other: &Tensor) -> Tensor {
self.f_ge_tensor(other).unwrap()
}
pub fn ge_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_ge_tensor_(other).unwrap()
}
pub fn ge_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_ge_tensor_out(out, other).unwrap()
}
pub fn gelu(&self, approximate: &str) -> Tensor {
self.f_gelu(approximate).unwrap()
}
pub fn gelu_(&mut self, approximate: &str) -> Tensor {
self.f_gelu_(approximate).unwrap()
}
pub fn gelu_backward(&self, grad_output: &Tensor, approximate: &str) -> Tensor {
self.f_gelu_backward(grad_output, approximate).unwrap()
}
pub fn gelu_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
approximate: &str,
) -> Tensor {
self.f_gelu_backward_grad_input(grad_input, grad_output, approximate).unwrap()
}
pub fn gelu_out(&self, out: &Tensor, approximate: &str) -> Tensor {
self.f_gelu_out(out, approximate).unwrap()
}
pub fn geometric(&self, p: f64) -> Tensor {
self.f_geometric(p).unwrap()
}
pub fn geometric_(&mut self, p: f64) -> Tensor {
self.f_geometric_(p).unwrap()
}
pub fn geometric_out(&self, out: &Tensor, p: f64) -> Tensor {
self.f_geometric_out(out, p).unwrap()
}
pub fn geqrf(&self) -> (Tensor, Tensor) {
self.f_geqrf().unwrap()
}
pub fn geqrf_a(&self, a: &Tensor, tau: &Tensor) -> (Tensor, Tensor) {
self.f_geqrf_a(a, tau).unwrap()
}
pub fn ger(&self, vec2: &Tensor) -> Tensor {
self.f_ger(vec2).unwrap()
}
pub fn ger_out(&self, out: &Tensor, vec2: &Tensor) -> Tensor {
self.f_ger_out(out, vec2).unwrap()
}
pub fn glu(&self, dim: i64) -> Tensor {
self.f_glu(dim).unwrap()
}
pub fn glu_backward(&self, grad_output: &Tensor, dim: i64) -> Tensor {
self.f_glu_backward(grad_output, dim).unwrap()
}
pub fn glu_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
dim: i64,
) -> Tensor {
self.f_glu_backward_grad_input(grad_input, grad_output, dim).unwrap()
}
pub fn glu_backward_jvp(
grad_x: &Tensor,
grad_glu: &Tensor,
x: &Tensor,
dgrad_glu: &Tensor,
dx: &Tensor,
dim: i64,
) -> Tensor {
Tensor::f_glu_backward_jvp(grad_x, grad_glu, x, dgrad_glu, dx, dim).unwrap()
}
pub fn glu_backward_jvp_out(
out: &Tensor,
grad_x: &Tensor,
grad_glu: &Tensor,
x: &Tensor,
dgrad_glu: &Tensor,
dx: &Tensor,
dim: i64,
) -> Tensor {
Tensor::f_glu_backward_jvp_out(out, grad_x, grad_glu, x, dgrad_glu, dx, dim).unwrap()
}
pub fn glu_jvp(glu: &Tensor, x: &Tensor, dx: &Tensor, dim: i64) -> Tensor {
Tensor::f_glu_jvp(glu, x, dx, dim).unwrap()
}
pub fn glu_jvp_out(out: &Tensor, glu: &Tensor, x: &Tensor, dx: &Tensor, dim: i64) -> Tensor {
Tensor::f_glu_jvp_out(out, glu, x, dx, dim).unwrap()
}
pub fn glu_out(&self, out: &Tensor, dim: i64) -> Tensor {
self.f_glu_out(out, dim).unwrap()
}
pub fn grad(&self) -> Tensor {
self.f_grad().unwrap()
}
pub fn greater<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_greater(other).unwrap()
}
pub fn greater_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_greater_(other).unwrap()
}
pub fn greater_equal<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_greater_equal(other).unwrap()
}
pub fn greater_equal_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_greater_equal_(other).unwrap()
}
pub fn greater_equal_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_greater_equal_scalar_out(out, other).unwrap()
}
pub fn greater_equal_tensor(&self, other: &Tensor) -> Tensor {
self.f_greater_equal_tensor(other).unwrap()
}
pub fn greater_equal_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_greater_equal_tensor_(other).unwrap()
}
pub fn greater_equal_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_greater_equal_tensor_out(out, other).unwrap()
}
pub fn greater_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_greater_scalar_out(out, other).unwrap()
}
pub fn greater_tensor(&self, other: &Tensor) -> Tensor {
self.f_greater_tensor(other).unwrap()
}
pub fn greater_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_greater_tensor_(other).unwrap()
}
pub fn greater_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_greater_tensor_out(out, other).unwrap()
}
pub fn grid_sampler(
&self,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Tensor {
self.f_grid_sampler(grid, interpolation_mode, padding_mode, align_corners).unwrap()
}
pub fn grid_sampler_2d(
&self,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Tensor {
self.f_grid_sampler_2d(grid, interpolation_mode, padding_mode, align_corners).unwrap()
}
pub fn grid_sampler_2d_out(
&self,
out: &Tensor,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Tensor {
self.f_grid_sampler_2d_out(out, grid, interpolation_mode, padding_mode, align_corners)
.unwrap()
}
pub fn grid_sampler_3d(
&self,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Tensor {
self.f_grid_sampler_3d(grid, interpolation_mode, padding_mode, align_corners).unwrap()
}
pub fn grid_sampler_3d_out(
&self,
out: &Tensor,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Tensor {
self.f_grid_sampler_3d_out(out, grid, interpolation_mode, padding_mode, align_corners)
.unwrap()
}
pub fn group_norm<T: Borrow<Tensor>>(
&self,
num_groups: i64,
weight: Option<T>,
bias: Option<T>,
eps: f64,
cudnn_enabled: bool,
) -> Tensor {
self.f_group_norm(num_groups, weight, bias, eps, cudnn_enabled).unwrap()
}
pub fn gru<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> (Tensor, Tensor) {
self.f_gru(hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first)
.unwrap()
}
pub fn gru_cell<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: Option<T>,
b_hh: Option<T>,
) -> Tensor {
self.f_gru_cell(hx, w_ih, w_hh, b_ih, b_hh).unwrap()
}
pub fn gru_data<T: Borrow<Tensor>>(
data: &Tensor,
batch_sizes: &Tensor,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
) -> (Tensor, Tensor) {
Tensor::f_gru_data(
data,
batch_sizes,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
)
.unwrap()
}
pub fn gt<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_gt(other).unwrap()
}
pub fn gt_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_gt_(other).unwrap()
}
pub fn gt_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_gt_scalar_out(out, other).unwrap()
}
pub fn gt_tensor(&self, other: &Tensor) -> Tensor {
self.f_gt_tensor(other).unwrap()
}
pub fn gt_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_gt_tensor_(other).unwrap()
}
pub fn gt_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_gt_tensor_out(out, other).unwrap()
}
pub fn hamming_window(window_length: i64, options: (Kind, Device)) -> Tensor {
Tensor::f_hamming_window(window_length, options).unwrap()
}
pub fn hamming_window_out(out: &Tensor, window_length: i64) -> Tensor {
Tensor::f_hamming_window_out(out, window_length).unwrap()
}
pub fn hamming_window_periodic(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Tensor {
Tensor::f_hamming_window_periodic(window_length, periodic, options).unwrap()
}
pub fn hamming_window_periodic_alpha(
window_length: i64,
periodic: bool,
alpha: f64,
options: (Kind, Device),
) -> Tensor {
Tensor::f_hamming_window_periodic_alpha(window_length, periodic, alpha, options).unwrap()
}
pub fn hamming_window_periodic_alpha_beta(
window_length: i64,
periodic: bool,
alpha: f64,
beta: f64,
options: (Kind, Device),
) -> Tensor {
Tensor::f_hamming_window_periodic_alpha_beta(window_length, periodic, alpha, beta, options)
.unwrap()
}
pub fn hamming_window_periodic_alpha_beta_out(
out: &Tensor,
window_length: i64,
periodic: bool,
alpha: f64,
beta: f64,
) -> Tensor {
Tensor::f_hamming_window_periodic_alpha_beta_out(out, window_length, periodic, alpha, beta)
.unwrap()
}
pub fn hamming_window_periodic_alpha_out(
out: &Tensor,
window_length: i64,
periodic: bool,
alpha: f64,
) -> Tensor {
Tensor::f_hamming_window_periodic_alpha_out(out, window_length, periodic, alpha).unwrap()
}
pub fn hamming_window_periodic_out(out: &Tensor, window_length: i64, periodic: bool) -> Tensor {
Tensor::f_hamming_window_periodic_out(out, window_length, periodic).unwrap()
}
pub fn hann_window(window_length: i64, options: (Kind, Device)) -> Tensor {
Tensor::f_hann_window(window_length, options).unwrap()
}
pub fn hann_window_out(out: &Tensor, window_length: i64) -> Tensor {
Tensor::f_hann_window_out(out, window_length).unwrap()
}
pub fn hann_window_periodic(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Tensor {
Tensor::f_hann_window_periodic(window_length, periodic, options).unwrap()
}
pub fn hann_window_periodic_out(out: &Tensor, window_length: i64, periodic: bool) -> Tensor {
Tensor::f_hann_window_periodic_out(out, window_length, periodic).unwrap()
}
pub fn hardshrink(&self) -> Tensor {
self.f_hardshrink().unwrap()
}
pub fn hardshrink_backward<S: Into<Scalar>>(&self, grad_out: &Tensor, lambd: S) -> Tensor {
self.f_hardshrink_backward(grad_out, lambd).unwrap()
}
pub fn hardshrink_backward_grad_input<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_out: &Tensor,
lambd: S,
) -> Tensor {
self.f_hardshrink_backward_grad_input(grad_input, grad_out, lambd).unwrap()
}
pub fn hardshrink_out(&self, out: &Tensor) -> Tensor {
self.f_hardshrink_out(out).unwrap()
}
pub fn hardsigmoid(&self) -> Tensor {
self.f_hardsigmoid().unwrap()
}
pub fn hardsigmoid_(&mut self) -> Tensor {
self.f_hardsigmoid_().unwrap()
}
pub fn hardsigmoid_backward(&self, grad_output: &Tensor) -> Tensor {
self.f_hardsigmoid_backward(grad_output).unwrap()
}
pub fn hardsigmoid_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
) -> Tensor {
self.f_hardsigmoid_backward_grad_input(grad_input, grad_output).unwrap()
}
pub fn hardsigmoid_out(&self, out: &Tensor) -> Tensor {
self.f_hardsigmoid_out(out).unwrap()
}
pub fn hardswish(&self) -> Tensor {
self.f_hardswish().unwrap()
}
pub fn hardswish_(&mut self) -> Tensor {
self.f_hardswish_().unwrap()
}
pub fn hardswish_backward(&self, grad_output: &Tensor) -> Tensor {
self.f_hardswish_backward(grad_output).unwrap()
}
pub fn hardswish_backward_out(&self, out: &Tensor, grad_output: &Tensor) -> Tensor {
self.f_hardswish_backward_out(out, grad_output).unwrap()
}
pub fn hardswish_out(&self, out: &Tensor) -> Tensor {
self.f_hardswish_out(out).unwrap()
}
pub fn hardtanh(&self) -> Tensor {
self.f_hardtanh().unwrap()
}
pub fn hardtanh_(&mut self) -> Tensor {
self.f_hardtanh_().unwrap()
}
pub fn hardtanh_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
min_val: S,
max_val: S,
) -> Tensor {
self.f_hardtanh_backward(grad_output, min_val, max_val).unwrap()
}
pub fn hardtanh_backward_grad_input<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
min_val: S,
max_val: S,
) -> Tensor {
self.f_hardtanh_backward_grad_input(grad_input, grad_output, min_val, max_val).unwrap()
}
pub fn hardtanh_out(&self, out: &Tensor) -> Tensor {
self.f_hardtanh_out(out).unwrap()
}
pub fn heaviside(&self, values: &Tensor) -> Tensor {
self.f_heaviside(values).unwrap()
}
pub fn heaviside_(&mut self, values: &Tensor) -> Tensor {
self.f_heaviside_(values).unwrap()
}
pub fn heaviside_out(&self, out: &Tensor, values: &Tensor) -> Tensor {
self.f_heaviside_out(out, values).unwrap()
}
pub fn hinge_embedding_loss(
&self,
target: &Tensor,
margin: f64,
reduction: crate::Reduction,
) -> Tensor {
self.f_hinge_embedding_loss(target, margin, reduction).unwrap()
}
pub fn histc(&self, bins: i64) -> Tensor {
self.f_histc(bins).unwrap()
}
pub fn histc_out(&self, out: &Tensor, bins: i64) -> Tensor {
self.f_histc_out(out, bins).unwrap()
}
pub fn histogram<T: Borrow<Tensor>>(
&self,
bins: &Tensor,
weight: Option<T>,
density: bool,
) -> (Tensor, Tensor) {
self.f_histogram(bins, weight, density).unwrap()
}
pub fn histogram_bin_ct<T: Borrow<Tensor>>(
&self,
bins: i64,
range: impl DoubleList,
weight: Option<T>,
density: bool,
) -> (Tensor, Tensor) {
self.f_histogram_bin_ct(bins, range, weight, density).unwrap()
}
pub fn histogram_bin_ct_out<T: Borrow<Tensor>>(
&self,
hist: &Tensor,
bin_edges: &Tensor,
bins: i64,
range: impl DoubleList,
weight: Option<T>,
density: bool,
) -> (Tensor, Tensor) {
self.f_histogram_bin_ct_out(hist, bin_edges, bins, range, weight, density).unwrap()
}
pub fn histogram_bins_tensor_out<T: Borrow<Tensor>>(
&self,
hist: &Tensor,
bin_edges: &Tensor,
bins: &Tensor,
weight: Option<T>,
density: bool,
) -> (Tensor, Tensor) {
self.f_histogram_bins_tensor_out(hist, bin_edges, bins, weight, density).unwrap()
}
pub fn hsplit(&self, sections: i64) -> Vec<Tensor> {
self.f_hsplit(sections).unwrap()
}
pub fn hsplit_array(&self, indices: impl IntList) -> Vec<Tensor> {
self.f_hsplit_array(indices).unwrap()
}
pub fn hspmm(mat1: &Tensor, mat2: &Tensor) -> Tensor {
Tensor::f_hspmm(mat1, mat2).unwrap()
}
pub fn hspmm_out(out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Tensor {
Tensor::f_hspmm_out(out, mat1, mat2).unwrap()
}
pub fn hstack<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
Tensor::f_hstack(tensors).unwrap()
}
pub fn hstack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
Tensor::f_hstack_out(out, tensors).unwrap()
}
pub fn huber_loss(&self, target: &Tensor, reduction: crate::Reduction, delta: f64) -> Tensor {
self.f_huber_loss(target, reduction, delta).unwrap()
}
pub fn huber_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
delta: f64,
) -> Tensor {
self.f_huber_loss_backward(grad_output, target, reduction, delta).unwrap()
}
pub fn huber_loss_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
delta: f64,
) -> Tensor {
self.f_huber_loss_backward_out(grad_input, grad_output, target, reduction, delta).unwrap()
}
pub fn huber_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
delta: f64,
) -> Tensor {
self.f_huber_loss_out(out, target, reduction, delta).unwrap()
}
pub fn hypot(&self, other: &Tensor) -> Tensor {
self.f_hypot(other).unwrap()
}
pub fn hypot_(&mut self, other: &Tensor) -> Tensor {
self.f_hypot_(other).unwrap()
}
pub fn hypot_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_hypot_out(out, other).unwrap()
}
pub fn i0(&self) -> Tensor {
self.f_i0().unwrap()
}
pub fn i0_(&mut self) -> Tensor {
self.f_i0_().unwrap()
}
pub fn i0_out(&self, out: &Tensor) -> Tensor {
self.f_i0_out(out).unwrap()
}
pub fn igamma(&self, other: &Tensor) -> Tensor {
self.f_igamma(other).unwrap()
}
pub fn igamma_(&mut self, other: &Tensor) -> Tensor {
self.f_igamma_(other).unwrap()
}
pub fn igamma_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_igamma_out(out, other).unwrap()
}
pub fn igammac(&self, other: &Tensor) -> Tensor {
self.f_igammac(other).unwrap()
}
pub fn igammac_(&mut self, other: &Tensor) -> Tensor {
self.f_igammac_(other).unwrap()
}
pub fn igammac_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_igammac_out(out, other).unwrap()
}
pub fn im2col(
&self,
kernel_size: impl IntList,
dilation: impl IntList,
padding: impl IntList,
stride: impl IntList,
) -> Tensor {
self.f_im2col(kernel_size, dilation, padding, stride).unwrap()
}
pub fn im2col_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
dilation: impl IntList,
padding: impl IntList,
stride: impl IntList,
) -> Tensor {
self.f_im2col_out(out, kernel_size, dilation, padding, stride).unwrap()
}
pub fn imag(&self) -> Tensor {
self.f_imag().unwrap()
}
pub fn index<T: Borrow<Tensor>>(&self, indices: &[Option<T>]) -> Tensor {
self.f_index(indices).unwrap()
}
pub fn index_add(&self, dim: i64, index: &Tensor, source: &Tensor) -> Tensor {
self.f_index_add(dim, index, source).unwrap()
}
pub fn index_add_(&mut self, dim: i64, index: &Tensor, source: &Tensor) -> Tensor {
self.f_index_add_(dim, index, source).unwrap()
}
pub fn index_add_out(&self, out: &Tensor, dim: i64, index: &Tensor, source: &Tensor) -> Tensor {
self.f_index_add_out(out, dim, index, source).unwrap()
}
pub fn index_copy(&self, dim: i64, index: &Tensor, source: &Tensor) -> Tensor {
self.f_index_copy(dim, index, source).unwrap()
}
pub fn index_copy_(&mut self, dim: i64, index: &Tensor, source: &Tensor) -> Tensor {
self.f_index_copy_(dim, index, source).unwrap()
}
pub fn index_copy_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
source: &Tensor,
) -> Tensor {
self.f_index_copy_out(out, dim, index, source).unwrap()
}
pub fn index_fill<S: Into<Scalar>>(&self, dim: i64, index: &Tensor, value: S) -> Tensor {
self.f_index_fill(dim, index, value).unwrap()
}
pub fn index_fill_<S: Into<Scalar>>(&mut self, dim: i64, index: &Tensor, value: S) -> Tensor {
self.f_index_fill_(dim, index, value).unwrap()
}
pub fn index_fill_int_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
value: S,
) -> Tensor {
self.f_index_fill_int_scalar_out(out, dim, index, value).unwrap()
}
pub fn index_fill_int_tensor(&self, dim: i64, index: &Tensor, value: &Tensor) -> Tensor {
self.f_index_fill_int_tensor(dim, index, value).unwrap()
}
pub fn index_fill_int_tensor_(&mut self, dim: i64, index: &Tensor, value: &Tensor) -> Tensor {
self.f_index_fill_int_tensor_(dim, index, value).unwrap()
}
pub fn index_fill_int_tensor_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
value: &Tensor,
) -> Tensor {
self.f_index_fill_int_tensor_out(out, dim, index, value).unwrap()
}
pub fn index_put<T: Borrow<Tensor>>(
&self,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
) -> Tensor {
self.f_index_put(indices, values, accumulate).unwrap()
}
pub fn index_put_<T: Borrow<Tensor>>(
&mut self,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
) -> Tensor {
self.f_index_put_(indices, values, accumulate).unwrap()
}
pub fn index_put_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
) -> Tensor {
self.f_index_put_out(out, indices, values, accumulate).unwrap()
}
pub fn index_reduce(
&self,
dim: i64,
index: &Tensor,
source: &Tensor,
reduce: &str,
include_self: bool,
) -> Tensor {
self.f_index_reduce(dim, index, source, reduce, include_self).unwrap()
}
pub fn index_reduce_(
&mut self,
dim: i64,
index: &Tensor,
source: &Tensor,
reduce: &str,
include_self: bool,
) -> Tensor {
self.f_index_reduce_(dim, index, source, reduce, include_self).unwrap()
}
pub fn index_reduce_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
source: &Tensor,
reduce: &str,
include_self: bool,
) -> Tensor {
self.f_index_reduce_out(out, dim, index, source, reduce, include_self).unwrap()
}
pub fn index_select(&self, dim: i64, index: &Tensor) -> Tensor {
self.f_index_select(dim, index).unwrap()
}
pub fn index_select_backward(
grad: &Tensor,
self_sizes: impl IntList,
dim: i64,
index: &Tensor,
) -> Tensor {
Tensor::f_index_select_backward(grad, self_sizes, dim, index).unwrap()
}
pub fn index_select_out(&self, out: &Tensor, dim: i64, index: &Tensor) -> Tensor {
self.f_index_select_out(out, dim, index).unwrap()
}
pub fn index_tensor_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
indices: &[Option<T>],
) -> Tensor {
self.f_index_tensor_out(out, indices).unwrap()
}
pub fn indices(&self) -> Tensor {
self.f_indices().unwrap()
}
pub fn indices_copy(&self) -> Tensor {
self.f_indices_copy().unwrap()
}
pub fn indices_copy_out(&self, out: &Tensor) -> Tensor {
self.f_indices_copy_out(out).unwrap()
}
pub fn infinitely_differentiable_gelu_backward(&self, grad: &Tensor) -> Tensor {
self.f_infinitely_differentiable_gelu_backward(grad).unwrap()
}
pub fn inner(&self, other: &Tensor) -> Tensor {
self.f_inner(other).unwrap()
}
pub fn inner_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_inner_out(out, other).unwrap()
}
pub fn instance_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
use_input_stats: bool,
momentum: f64,
eps: f64,
cudnn_enabled: bool,
) -> Tensor {
self.f_instance_norm(
weight,
bias,
running_mean,
running_var,
use_input_stats,
momentum,
eps,
cudnn_enabled,
)
.unwrap()
}
pub fn int_repr(&self) -> Tensor {
self.f_int_repr().unwrap()
}
pub fn int_repr_out(&self, out: &Tensor) -> Tensor {
self.f_int_repr_out(out).unwrap()
}
pub fn inverse(&self) -> Tensor {
self.f_inverse().unwrap()
}
pub fn inverse_out(&self, out: &Tensor) -> Tensor {
self.f_inverse_out(out).unwrap()
}
pub fn is_coalesced(&self) -> bool {
self.f_is_coalesced().unwrap()
}
pub fn is_complex(&self) -> bool {
self.f_is_complex().unwrap()
}
pub fn is_conj(&self) -> bool {
self.f_is_conj().unwrap()
}
pub fn is_distributed(&self) -> bool {
self.f_is_distributed().unwrap()
}
pub fn is_floating_point(&self) -> bool {
self.f_is_floating_point().unwrap()
}
pub fn is_inference(&self) -> bool {
self.f_is_inference().unwrap()
}
pub fn is_leaf(&self) -> bool {
self.f_is_leaf().unwrap()
}
pub fn is_neg(&self) -> bool {
self.f_is_neg().unwrap()
}
pub fn is_nonzero(&self) -> bool {
self.f_is_nonzero().unwrap()
}
pub fn is_pinned(&self, device: Device) -> bool {
self.f_is_pinned(device).unwrap()
}
pub fn is_same_size(&self, other: &Tensor) -> bool {
self.f_is_same_size(other).unwrap()
}
pub fn is_set_to(&self, tensor: &Tensor) -> bool {
self.f_is_set_to(tensor).unwrap()
}
pub fn is_signed(&self) -> bool {
self.f_is_signed().unwrap()
}
pub fn is_vulkan_available() -> bool {
Tensor::f_is_vulkan_available().unwrap()
}
pub fn isclose(&self, other: &Tensor, rtol: f64, atol: f64, equal_nan: bool) -> Tensor {
self.f_isclose(other, rtol, atol, equal_nan).unwrap()
}
pub fn isfinite(&self) -> Tensor {
self.f_isfinite().unwrap()
}
pub fn isin(
elements: &Tensor,
test_elements: &Tensor,
assume_unique: bool,
invert: bool,
) -> Tensor {
Tensor::f_isin(elements, test_elements, assume_unique, invert).unwrap()
}
pub fn isin_scalar_tensor<S: Into<Scalar>>(
element: S,
test_elements: &Tensor,
assume_unique: bool,
invert: bool,
) -> Tensor {
Tensor::f_isin_scalar_tensor(element, test_elements, assume_unique, invert).unwrap()
}
pub fn isin_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
element: S,
test_elements: &Tensor,
assume_unique: bool,
invert: bool,
) -> Tensor {
Tensor::f_isin_scalar_tensor_out(out, element, test_elements, assume_unique, invert)
.unwrap()
}
pub fn isin_tensor_scalar<S: Into<Scalar>>(
elements: &Tensor,
test_element: S,
assume_unique: bool,
invert: bool,
) -> Tensor {
Tensor::f_isin_tensor_scalar(elements, test_element, assume_unique, invert).unwrap()
}
pub fn isin_tensor_scalar_out<S: Into<Scalar>>(
out: &Tensor,
elements: &Tensor,
test_element: S,
assume_unique: bool,
invert: bool,
) -> Tensor {
Tensor::f_isin_tensor_scalar_out(out, elements, test_element, assume_unique, invert)
.unwrap()
}
pub fn isin_tensor_tensor_out(
out: &Tensor,
elements: &Tensor,
test_elements: &Tensor,
assume_unique: bool,
invert: bool,
) -> Tensor {
Tensor::f_isin_tensor_tensor_out(out, elements, test_elements, assume_unique, invert)
.unwrap()
}
pub fn isinf(&self) -> Tensor {
self.f_isinf().unwrap()
}
pub fn isinf_out(&self, out: &Tensor) -> Tensor {
self.f_isinf_out(out).unwrap()
}
pub fn isnan(&self) -> Tensor {
self.f_isnan().unwrap()
}
pub fn isnan_out(&self, out: &Tensor) -> Tensor {
self.f_isnan_out(out).unwrap()
}
pub fn isneginf(&self) -> Tensor {
self.f_isneginf().unwrap()
}
pub fn isneginf_out(&self, out: &Tensor) -> Tensor {
self.f_isneginf_out(out).unwrap()
}
pub fn isposinf(&self) -> Tensor {
self.f_isposinf().unwrap()
}
pub fn isposinf_out(&self, out: &Tensor) -> Tensor {
self.f_isposinf_out(out).unwrap()
}
pub fn isreal(&self) -> Tensor {
self.f_isreal().unwrap()
}
pub fn istft<T: Borrow<Tensor>>(
&self,
n_fft: i64,
hop_length: impl Into<Option<i64>>,
win_length: impl Into<Option<i64>>,
window: Option<T>,
center: bool,
normalized: bool,
onesided: bool,
length: impl Into<Option<i64>>,
return_complex: bool,
) -> Tensor {
self.f_istft(
n_fft,
hop_length,
win_length,
window,
center,
normalized,
onesided,
length,
return_complex,
)
.unwrap()
}
pub fn kaiser_window(window_length: i64, options: (Kind, Device)) -> Tensor {
Tensor::f_kaiser_window(window_length, options).unwrap()
}
pub fn kaiser_window_beta(
window_length: i64,
periodic: bool,
beta: f64,
options: (Kind, Device),
) -> Tensor {
Tensor::f_kaiser_window_beta(window_length, periodic, beta, options).unwrap()
}
pub fn kaiser_window_beta_out(
out: &Tensor,
window_length: i64,
periodic: bool,
beta: f64,
) -> Tensor {
Tensor::f_kaiser_window_beta_out(out, window_length, periodic, beta).unwrap()
}
pub fn kaiser_window_out(out: &Tensor, window_length: i64) -> Tensor {
Tensor::f_kaiser_window_out(out, window_length).unwrap()
}
pub fn kaiser_window_periodic(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Tensor {
Tensor::f_kaiser_window_periodic(window_length, periodic, options).unwrap()
}
pub fn kaiser_window_periodic_out(out: &Tensor, window_length: i64, periodic: bool) -> Tensor {
Tensor::f_kaiser_window_periodic_out(out, window_length, periodic).unwrap()
}
pub fn kl_div(&self, target: &Tensor, reduction: crate::Reduction, log_target: bool) -> Tensor {
self.f_kl_div(target, reduction, log_target).unwrap()
}
pub fn kron(&self, other: &Tensor) -> Tensor {
self.f_kron(other).unwrap()
}
pub fn kron_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_kron_out(out, other).unwrap()
}
pub fn kthvalue(&self, k: i64, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
self.f_kthvalue(k, dim, keepdim).unwrap()
}
pub fn kthvalue_values(
&self,
values: &Tensor,
indices: &Tensor,
k: i64,
dim: i64,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_kthvalue_values(values, indices, k, dim, keepdim).unwrap()
}
pub fn l1_loss(&self, target: &Tensor, reduction: crate::Reduction) -> Tensor {
self.f_l1_loss(target, reduction).unwrap()
}
pub fn layer_norm<T: Borrow<Tensor>>(
&self,
normalized_shape: impl IntList,
weight: Option<T>,
bias: Option<T>,
eps: f64,
cudnn_enable: bool,
) -> Tensor {
self.f_layer_norm(normalized_shape, weight, bias, eps, cudnn_enable).unwrap()
}
pub fn lcm(&self, other: &Tensor) -> Tensor {
self.f_lcm(other).unwrap()
}
pub fn lcm_(&mut self, other: &Tensor) -> Tensor {
self.f_lcm_(other).unwrap()
}
pub fn lcm_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_lcm_out(out, other).unwrap()
}
pub fn ldexp(&self, other: &Tensor) -> Tensor {
self.f_ldexp(other).unwrap()
}
pub fn ldexp_(&mut self, other: &Tensor) -> Tensor {
self.f_ldexp_(other).unwrap()
}
pub fn ldexp_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_ldexp_out(out, other).unwrap()
}
pub fn le<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_le(other).unwrap()
}
pub fn le_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_le_(other).unwrap()
}
pub fn le_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_le_scalar_out(out, other).unwrap()
}
pub fn le_tensor(&self, other: &Tensor) -> Tensor {
self.f_le_tensor(other).unwrap()
}
pub fn le_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_le_tensor_(other).unwrap()
}
pub fn le_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_le_tensor_out(out, other).unwrap()
}
pub fn leaky_relu(&self) -> Tensor {
self.f_leaky_relu().unwrap()
}
pub fn leaky_relu_(&mut self) -> Tensor {
self.f_leaky_relu_().unwrap()
}
pub fn leaky_relu_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
negative_slope: S,
self_is_result: bool,
) -> Tensor {
self.f_leaky_relu_backward(grad_output, negative_slope, self_is_result).unwrap()
}
pub fn leaky_relu_backward_grad_input<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
negative_slope: S,
self_is_result: bool,
) -> Tensor {
self.f_leaky_relu_backward_grad_input(
grad_input,
grad_output,
negative_slope,
self_is_result,
)
.unwrap()
}
pub fn leaky_relu_out(&self, out: &Tensor) -> Tensor {
self.f_leaky_relu_out(out).unwrap()
}
pub fn lerp<S: Into<Scalar>>(&self, end: &Tensor, weight: S) -> Tensor {
self.f_lerp(end, weight).unwrap()
}
pub fn lerp_<S: Into<Scalar>>(&mut self, end: &Tensor, weight: S) -> Tensor {
self.f_lerp_(end, weight).unwrap()
}
pub fn lerp_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
end: &Tensor,
weight: S,
) -> Tensor {
self.f_lerp_scalar_out(out, end, weight).unwrap()
}
pub fn lerp_tensor(&self, end: &Tensor, weight: &Tensor) -> Tensor {
self.f_lerp_tensor(end, weight).unwrap()
}
pub fn lerp_tensor_(&mut self, end: &Tensor, weight: &Tensor) -> Tensor {
self.f_lerp_tensor_(end, weight).unwrap()
}
pub fn lerp_tensor_out(&self, out: &Tensor, end: &Tensor, weight: &Tensor) -> Tensor {
self.f_lerp_tensor_out(out, end, weight).unwrap()
}
pub fn less<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_less(other).unwrap()
}
pub fn less_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_less_(other).unwrap()
}
pub fn less_equal<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_less_equal(other).unwrap()
}
pub fn less_equal_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_less_equal_(other).unwrap()
}
pub fn less_equal_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_less_equal_scalar_out(out, other).unwrap()
}
pub fn less_equal_tensor(&self, other: &Tensor) -> Tensor {
self.f_less_equal_tensor(other).unwrap()
}
pub fn less_equal_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_less_equal_tensor_(other).unwrap()
}
pub fn less_equal_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_less_equal_tensor_out(out, other).unwrap()
}
pub fn less_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_less_scalar_out(out, other).unwrap()
}
pub fn less_tensor(&self, other: &Tensor) -> Tensor {
self.f_less_tensor(other).unwrap()
}
pub fn less_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_less_tensor_(other).unwrap()
}
pub fn less_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_less_tensor_out(out, other).unwrap()
}
pub fn lgamma(&self) -> Tensor {
self.f_lgamma().unwrap()
}
pub fn lgamma_(&mut self) -> Tensor {
self.f_lgamma_().unwrap()
}
pub fn lgamma_out(&self, out: &Tensor) -> Tensor {
self.f_lgamma_out(out).unwrap()
}
pub fn lift(&self) -> Tensor {
self.f_lift().unwrap()
}
pub fn lift_fresh(&self) -> Tensor {
self.f_lift_fresh().unwrap()
}
pub fn lift_fresh_copy(&self) -> Tensor {
self.f_lift_fresh_copy().unwrap()
}
pub fn lift_fresh_copy_out(&self, out: &Tensor) -> Tensor {
self.f_lift_fresh_copy_out(out).unwrap()
}
pub fn lift_out(&self, out: &Tensor) -> Tensor {
self.f_lift_out(out).unwrap()
}
pub fn linalg_cholesky(&self, upper: bool) -> Tensor {
self.f_linalg_cholesky(upper).unwrap()
}
pub fn linalg_cholesky_ex(&self, upper: bool, check_errors: bool) -> (Tensor, Tensor) {
self.f_linalg_cholesky_ex(upper, check_errors).unwrap()
}
pub fn linalg_cholesky_ex_l(
&self,
l: &Tensor,
info: &Tensor,
upper: bool,
check_errors: bool,
) -> (Tensor, Tensor) {
self.f_linalg_cholesky_ex_l(l, info, upper, check_errors).unwrap()
}
pub fn linalg_cholesky_out(&self, out: &Tensor, upper: bool) -> Tensor {
self.f_linalg_cholesky_out(out, upper).unwrap()
}
pub fn linalg_cond<S: Into<Scalar>>(&self, p: S) -> Tensor {
self.f_linalg_cond(p).unwrap()
}
pub fn linalg_cond_out<S: Into<Scalar>>(&self, out: &Tensor, p: S) -> Tensor {
self.f_linalg_cond_out(out, p).unwrap()
}
pub fn linalg_cond_p_str(&self, p: &str) -> Tensor {
self.f_linalg_cond_p_str(p).unwrap()
}
pub fn linalg_cond_p_str_out(&self, out: &Tensor, p: &str) -> Tensor {
self.f_linalg_cond_p_str_out(out, p).unwrap()
}
pub fn linalg_cross(&self, other: &Tensor, dim: i64) -> Tensor {
self.f_linalg_cross(other, dim).unwrap()
}
pub fn linalg_cross_out(&self, out: &Tensor, other: &Tensor, dim: i64) -> Tensor {
self.f_linalg_cross_out(out, other, dim).unwrap()
}
pub fn linalg_det(a: &Tensor) -> Tensor {
Tensor::f_linalg_det(a).unwrap()
}
pub fn linalg_det_out(out: &Tensor, a: &Tensor) -> Tensor {
Tensor::f_linalg_det_out(out, a).unwrap()
}
pub fn linalg_diagonal(a: &Tensor, offset: i64, dim1: i64, dim2: i64) -> Tensor {
Tensor::f_linalg_diagonal(a, offset, dim1, dim2).unwrap()
}
pub fn linalg_eig(&self) -> (Tensor, Tensor) {
self.f_linalg_eig().unwrap()
}
pub fn linalg_eig_out(&self, eigenvalues: &Tensor, eigenvectors: &Tensor) -> (Tensor, Tensor) {
self.f_linalg_eig_out(eigenvalues, eigenvectors).unwrap()
}
pub fn linalg_eigh(&self, uplo: &str) -> (Tensor, Tensor) {
self.f_linalg_eigh(uplo).unwrap()
}
pub fn linalg_eigh_eigvals(
&self,
eigvals: &Tensor,
eigvecs: &Tensor,
uplo: &str,
) -> (Tensor, Tensor) {
self.f_linalg_eigh_eigvals(eigvals, eigvecs, uplo).unwrap()
}
pub fn linalg_eigvals(&self) -> Tensor {
self.f_linalg_eigvals().unwrap()
}
pub fn linalg_eigvals_out(&self, out: &Tensor) -> Tensor {
self.f_linalg_eigvals_out(out).unwrap()
}
pub fn linalg_eigvalsh(&self, uplo: &str) -> Tensor {
self.f_linalg_eigvalsh(uplo).unwrap()
}
pub fn linalg_eigvalsh_out(&self, out: &Tensor, uplo: &str) -> Tensor {
self.f_linalg_eigvalsh_out(out, uplo).unwrap()
}
pub fn linalg_householder_product(&self, tau: &Tensor) -> Tensor {
self.f_linalg_householder_product(tau).unwrap()
}
pub fn linalg_householder_product_out(&self, out: &Tensor, tau: &Tensor) -> Tensor {
self.f_linalg_householder_product_out(out, tau).unwrap()
}
pub fn linalg_inv(a: &Tensor) -> Tensor {
Tensor::f_linalg_inv(a).unwrap()
}
pub fn linalg_inv_ex(a: &Tensor, check_errors: bool) -> (Tensor, Tensor) {
Tensor::f_linalg_inv_ex(a, check_errors).unwrap()
}
pub fn linalg_inv_ex_inverse(
inverse: &Tensor,
info: &Tensor,
a: &Tensor,
check_errors: bool,
) -> (Tensor, Tensor) {
Tensor::f_linalg_inv_ex_inverse(inverse, info, a, check_errors).unwrap()
}
pub fn linalg_inv_out(out: &Tensor, a: &Tensor) -> Tensor {
Tensor::f_linalg_inv_out(out, a).unwrap()
}
pub fn linalg_ldl_factor(&self, hermitian: bool) -> (Tensor, Tensor) {
self.f_linalg_ldl_factor(hermitian).unwrap()
}
pub fn linalg_ldl_factor_ex(
&self,
hermitian: bool,
check_errors: bool,
) -> (Tensor, Tensor, Tensor) {
self.f_linalg_ldl_factor_ex(hermitian, check_errors).unwrap()
}
pub fn linalg_ldl_factor_ex_out(
&self,
ld: &Tensor,
pivots: &Tensor,
info: &Tensor,
hermitian: bool,
check_errors: bool,
) -> (Tensor, Tensor, Tensor) {
self.f_linalg_ldl_factor_ex_out(ld, pivots, info, hermitian, check_errors).unwrap()
}
pub fn linalg_ldl_factor_out(
&self,
ld: &Tensor,
pivots: &Tensor,
hermitian: bool,
) -> (Tensor, Tensor) {
self.f_linalg_ldl_factor_out(ld, pivots, hermitian).unwrap()
}
pub fn linalg_ldl_solve(ld: &Tensor, pivots: &Tensor, b: &Tensor, hermitian: bool) -> Tensor {
Tensor::f_linalg_ldl_solve(ld, pivots, b, hermitian).unwrap()
}
pub fn linalg_ldl_solve_out(
out: &Tensor,
ld: &Tensor,
pivots: &Tensor,
b: &Tensor,
hermitian: bool,
) -> Tensor {
Tensor::f_linalg_ldl_solve_out(out, ld, pivots, b, hermitian).unwrap()
}
pub fn linalg_lstsq(
&self,
b: &Tensor,
rcond: impl Into<Option<f64>>,
driver: &str,
) -> (Tensor, Tensor, Tensor, Tensor) {
self.f_linalg_lstsq(b, rcond, driver).unwrap()
}
pub fn linalg_lstsq_out(
&self,
solution: &Tensor,
residuals: &Tensor,
rank: &Tensor,
singular_values: &Tensor,
b: &Tensor,
rcond: impl Into<Option<f64>>,
driver: &str,
) -> (Tensor, Tensor, Tensor, Tensor) {
self.f_linalg_lstsq_out(solution, residuals, rank, singular_values, b, rcond, driver)
.unwrap()
}
pub fn linalg_lu(a: &Tensor, pivot: bool) -> (Tensor, Tensor, Tensor) {
Tensor::f_linalg_lu(a, pivot).unwrap()
}
pub fn linalg_lu_factor(a: &Tensor, pivot: bool) -> (Tensor, Tensor) {
Tensor::f_linalg_lu_factor(a, pivot).unwrap()
}
pub fn linalg_lu_factor_ex(
a: &Tensor,
pivot: bool,
check_errors: bool,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_linalg_lu_factor_ex(a, pivot, check_errors).unwrap()
}
pub fn linalg_lu_factor_ex_out(
lu: &Tensor,
pivots: &Tensor,
info: &Tensor,
a: &Tensor,
pivot: bool,
check_errors: bool,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_linalg_lu_factor_ex_out(lu, pivots, info, a, pivot, check_errors).unwrap()
}
pub fn linalg_lu_factor_out(
lu: &Tensor,
pivots: &Tensor,
a: &Tensor,
pivot: bool,
) -> (Tensor, Tensor) {
Tensor::f_linalg_lu_factor_out(lu, pivots, a, pivot).unwrap()
}
pub fn linalg_lu_out(
p: &Tensor,
l: &Tensor,
u: &Tensor,
a: &Tensor,
pivot: bool,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_linalg_lu_out(p, l, u, a, pivot).unwrap()
}
pub fn linalg_lu_solve(
lu: &Tensor,
pivots: &Tensor,
b: &Tensor,
left: bool,
adjoint: bool,
) -> Tensor {
Tensor::f_linalg_lu_solve(lu, pivots, b, left, adjoint).unwrap()
}
pub fn linalg_lu_solve_out(
out: &Tensor,
lu: &Tensor,
pivots: &Tensor,
b: &Tensor,
left: bool,
adjoint: bool,
) -> Tensor {
Tensor::f_linalg_lu_solve_out(out, lu, pivots, b, left, adjoint).unwrap()
}
pub fn linalg_matmul(&self, other: &Tensor) -> Tensor {
self.f_linalg_matmul(other).unwrap()
}
pub fn linalg_matmul_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_linalg_matmul_out(out, other).unwrap()
}
pub fn linalg_matrix_exp(&self) -> Tensor {
self.f_linalg_matrix_exp().unwrap()
}
pub fn linalg_matrix_exp_out(&self, out: &Tensor) -> Tensor {
self.f_linalg_matrix_exp_out(out).unwrap()
}
pub fn linalg_matrix_power(&self, n: i64) -> Tensor {
self.f_linalg_matrix_power(n).unwrap()
}
pub fn linalg_matrix_power_out(&self, out: &Tensor, n: i64) -> Tensor {
self.f_linalg_matrix_power_out(out, n).unwrap()
}
pub fn linalg_matrix_rank(&self, tol: f64, hermitian: bool) -> Tensor {
self.f_linalg_matrix_rank(tol, hermitian).unwrap()
}
pub fn linalg_matrix_rank_atol_rtol_float(
&self,
atol: impl Into<Option<f64>>,
rtol: impl Into<Option<f64>>,
hermitian: bool,
) -> Tensor {
self.f_linalg_matrix_rank_atol_rtol_float(atol, rtol, hermitian).unwrap()
}
pub fn linalg_matrix_rank_atol_rtol_float_out(
&self,
out: &Tensor,
atol: impl Into<Option<f64>>,
rtol: impl Into<Option<f64>>,
hermitian: bool,
) -> Tensor {
self.f_linalg_matrix_rank_atol_rtol_float_out(out, atol, rtol, hermitian).unwrap()
}
pub fn linalg_matrix_rank_atol_rtol_tensor<T: Borrow<Tensor>>(
&self,
atol: Option<T>,
rtol: Option<T>,
hermitian: bool,
) -> Tensor {
self.f_linalg_matrix_rank_atol_rtol_tensor(atol, rtol, hermitian).unwrap()
}
pub fn linalg_matrix_rank_atol_rtol_tensor_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
atol: Option<T>,
rtol: Option<T>,
hermitian: bool,
) -> Tensor {
self.f_linalg_matrix_rank_atol_rtol_tensor_out(out, atol, rtol, hermitian).unwrap()
}
pub fn linalg_matrix_rank_out(&self, out: &Tensor, tol: f64, hermitian: bool) -> Tensor {
self.f_linalg_matrix_rank_out(out, tol, hermitian).unwrap()
}
pub fn linalg_matrix_rank_out_tol_tensor(
&self,
out: &Tensor,
tol: &Tensor,
hermitian: bool,
) -> Tensor {
self.f_linalg_matrix_rank_out_tol_tensor(out, tol, hermitian).unwrap()
}
pub fn linalg_matrix_rank_tol_tensor(&self, tol: &Tensor, hermitian: bool) -> Tensor {
self.f_linalg_matrix_rank_tol_tensor(tol, hermitian).unwrap()
}
pub fn linalg_multi_dot<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
Tensor::f_linalg_multi_dot(tensors).unwrap()
}
pub fn linalg_multi_dot_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
Tensor::f_linalg_multi_dot_out(out, tensors).unwrap()
}
pub fn linalg_norm<S: Into<Scalar>>(
&self,
ord: S,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_linalg_norm(ord, dim, keepdim, dtype).unwrap()
}
pub fn linalg_norm_ord_str(
&self,
ord: &str,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_linalg_norm_ord_str(ord, dim, keepdim, dtype).unwrap()
}
pub fn linalg_norm_ord_str_out(
&self,
out: &Tensor,
ord: &str,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_linalg_norm_ord_str_out(out, ord, dim, keepdim, dtype).unwrap()
}
pub fn linalg_norm_out<S: Into<Scalar>>(
&self,
out: &Tensor,
ord: S,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_linalg_norm_out(out, ord, dim, keepdim, dtype).unwrap()
}
pub fn linalg_pinv(&self, rcond: f64, hermitian: bool) -> Tensor {
self.f_linalg_pinv(rcond, hermitian).unwrap()
}
pub fn linalg_pinv_atol_rtol_float(
&self,
atol: impl Into<Option<f64>>,
rtol: impl Into<Option<f64>>,
hermitian: bool,
) -> Tensor {
self.f_linalg_pinv_atol_rtol_float(atol, rtol, hermitian).unwrap()
}
pub fn linalg_pinv_atol_rtol_float_out(
&self,
out: &Tensor,
atol: impl Into<Option<f64>>,
rtol: impl Into<Option<f64>>,
hermitian: bool,
) -> Tensor {
self.f_linalg_pinv_atol_rtol_float_out(out, atol, rtol, hermitian).unwrap()
}
pub fn linalg_pinv_atol_rtol_tensor<T: Borrow<Tensor>>(
&self,
atol: Option<T>,
rtol: Option<T>,
hermitian: bool,
) -> Tensor {
self.f_linalg_pinv_atol_rtol_tensor(atol, rtol, hermitian).unwrap()
}
pub fn linalg_pinv_atol_rtol_tensor_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
atol: Option<T>,
rtol: Option<T>,
hermitian: bool,
) -> Tensor {
self.f_linalg_pinv_atol_rtol_tensor_out(out, atol, rtol, hermitian).unwrap()
}
pub fn linalg_pinv_out(&self, out: &Tensor, rcond: f64, hermitian: bool) -> Tensor {
self.f_linalg_pinv_out(out, rcond, hermitian).unwrap()
}
pub fn linalg_pinv_out_rcond_tensor(
&self,
out: &Tensor,
rcond: &Tensor,
hermitian: bool,
) -> Tensor {
self.f_linalg_pinv_out_rcond_tensor(out, rcond, hermitian).unwrap()
}
pub fn linalg_pinv_rcond_tensor(&self, rcond: &Tensor, hermitian: bool) -> Tensor {
self.f_linalg_pinv_rcond_tensor(rcond, hermitian).unwrap()
}
pub fn linalg_qr(a: &Tensor, mode: &str) -> (Tensor, Tensor) {
Tensor::f_linalg_qr(a, mode).unwrap()
}
pub fn linalg_qr_out(q: &Tensor, r: &Tensor, a: &Tensor, mode: &str) -> (Tensor, Tensor) {
Tensor::f_linalg_qr_out(q, r, a, mode).unwrap()
}
pub fn linalg_slogdet(a: &Tensor) -> (Tensor, Tensor) {
Tensor::f_linalg_slogdet(a).unwrap()
}
pub fn linalg_slogdet_out(sign: &Tensor, logabsdet: &Tensor, a: &Tensor) -> (Tensor, Tensor) {
Tensor::f_linalg_slogdet_out(sign, logabsdet, a).unwrap()
}
pub fn linalg_solve(a: &Tensor, b: &Tensor, left: bool) -> Tensor {
Tensor::f_linalg_solve(a, b, left).unwrap()
}
pub fn linalg_solve_ex(
a: &Tensor,
b: &Tensor,
left: bool,
check_errors: bool,
) -> (Tensor, Tensor) {
Tensor::f_linalg_solve_ex(a, b, left, check_errors).unwrap()
}
pub fn linalg_solve_ex_out(
result: &Tensor,
info: &Tensor,
a: &Tensor,
b: &Tensor,
left: bool,
check_errors: bool,
) -> (Tensor, Tensor) {
Tensor::f_linalg_solve_ex_out(result, info, a, b, left, check_errors).unwrap()
}
pub fn linalg_solve_out(out: &Tensor, a: &Tensor, b: &Tensor, left: bool) -> Tensor {
Tensor::f_linalg_solve_out(out, a, b, left).unwrap()
}
pub fn linalg_solve_triangular(
&self,
b: &Tensor,
upper: bool,
left: bool,
unitriangular: bool,
) -> Tensor {
self.f_linalg_solve_triangular(b, upper, left, unitriangular).unwrap()
}
pub fn linalg_solve_triangular_out(
&self,
out: &Tensor,
b: &Tensor,
upper: bool,
left: bool,
unitriangular: bool,
) -> Tensor {
self.f_linalg_solve_triangular_out(out, b, upper, left, unitriangular).unwrap()
}
pub fn linalg_svd(a: &Tensor, full_matrices: bool, driver: &str) -> (Tensor, Tensor, Tensor) {
Tensor::f_linalg_svd(a, full_matrices, driver).unwrap()
}
pub fn linalg_svd_u(
u: &Tensor,
s: &Tensor,
vh: &Tensor,
a: &Tensor,
full_matrices: bool,
driver: &str,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_linalg_svd_u(u, s, vh, a, full_matrices, driver).unwrap()
}
pub fn linalg_svdvals(a: &Tensor, driver: &str) -> Tensor {
Tensor::f_linalg_svdvals(a, driver).unwrap()
}
pub fn linalg_svdvals_out(out: &Tensor, a: &Tensor, driver: &str) -> Tensor {
Tensor::f_linalg_svdvals_out(out, a, driver).unwrap()
}
pub fn linalg_tensorinv(&self, ind: i64) -> Tensor {
self.f_linalg_tensorinv(ind).unwrap()
}
pub fn linalg_tensorinv_out(&self, out: &Tensor, ind: i64) -> Tensor {
self.f_linalg_tensorinv_out(out, ind).unwrap()
}
pub fn linalg_tensorsolve(&self, other: &Tensor, dims: impl IntListOption) -> Tensor {
self.f_linalg_tensorsolve(other, dims).unwrap()
}
pub fn linalg_tensorsolve_out(
&self,
out: &Tensor,
other: &Tensor,
dims: impl IntListOption,
) -> Tensor {
self.f_linalg_tensorsolve_out(out, other, dims).unwrap()
}
pub fn linalg_vander(x: &Tensor, n: impl Into<Option<i64>>) -> Tensor {
Tensor::f_linalg_vander(x, n).unwrap()
}
pub fn linalg_vecdot(x: &Tensor, y: &Tensor, dim: i64) -> Tensor {
Tensor::f_linalg_vecdot(x, y, dim).unwrap()
}
pub fn linalg_vecdot_out(out: &Tensor, x: &Tensor, y: &Tensor, dim: i64) -> Tensor {
Tensor::f_linalg_vecdot_out(out, x, y, dim).unwrap()
}
pub fn linear<T: Borrow<Tensor>>(&self, weight: &Tensor, bias: Option<T>) -> Tensor {
self.f_linear(weight, bias).unwrap()
}
pub fn linear_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
) -> Tensor {
self.f_linear_out(out, weight, bias).unwrap()
}
pub fn linspace<S: Into<Scalar>>(
start: S,
end: S,
steps: i64,
options: (Kind, Device),
) -> Tensor {
Tensor::f_linspace(start, end, steps, options).unwrap()
}
pub fn linspace_out<S: Into<Scalar>>(out: &Tensor, start: S, end: S, steps: i64) -> Tensor {
Tensor::f_linspace_out(out, start, end, steps).unwrap()
}
pub fn log(&self) -> Tensor {
self.f_log().unwrap()
}
pub fn log10(&self) -> Tensor {
self.f_log10().unwrap()
}
pub fn log10_(&mut self) -> Tensor {
self.f_log10_().unwrap()
}
pub fn log10_out(&self, out: &Tensor) -> Tensor {
self.f_log10_out(out).unwrap()
}
pub fn log1p(&self) -> Tensor {
self.f_log1p().unwrap()
}
pub fn log1p_(&mut self) -> Tensor {
self.f_log1p_().unwrap()
}
pub fn log1p_out(&self, out: &Tensor) -> Tensor {
self.f_log1p_out(out).unwrap()
}
pub fn log2(&self) -> Tensor {
self.f_log2().unwrap()
}
pub fn log2_(&mut self) -> Tensor {
self.f_log2_().unwrap()
}
pub fn log2_out(&self, out: &Tensor) -> Tensor {
self.f_log2_out(out).unwrap()
}
pub fn log_(&mut self) -> Tensor {
self.f_log_().unwrap()
}
pub fn log_normal(&self, mean: f64, std: f64) -> Tensor {
self.f_log_normal(mean, std).unwrap()
}
pub fn log_normal_(&mut self, mean: f64, std: f64) -> Tensor {
self.f_log_normal_(mean, std).unwrap()
}
pub fn log_normal_out(&self, out: &Tensor, mean: f64, std: f64) -> Tensor {
self.f_log_normal_out(out, mean, std).unwrap()
}
pub fn log_out(&self, out: &Tensor) -> Tensor {
self.f_log_out(out).unwrap()
}
pub fn log_sigmoid(&self) -> Tensor {
self.f_log_sigmoid().unwrap()
}
pub fn log_sigmoid_backward(&self, grad_output: &Tensor, buffer: &Tensor) -> Tensor {
self.f_log_sigmoid_backward(grad_output, buffer).unwrap()
}
pub fn log_sigmoid_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
buffer: &Tensor,
) -> Tensor {
self.f_log_sigmoid_backward_grad_input(grad_input, grad_output, buffer).unwrap()
}
pub fn log_sigmoid_out(&self, out: &Tensor) -> Tensor {
self.f_log_sigmoid_out(out).unwrap()
}
pub fn log_softmax(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_log_softmax(dim, dtype).unwrap()
}
pub fn log_softmax_int_out(
&self,
out: &Tensor,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_log_softmax_int_out(out, dim, dtype).unwrap()
}
pub fn logaddexp(&self, other: &Tensor) -> Tensor {
self.f_logaddexp(other).unwrap()
}
pub fn logaddexp2(&self, other: &Tensor) -> Tensor {
self.f_logaddexp2(other).unwrap()
}
pub fn logaddexp2_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_logaddexp2_out(out, other).unwrap()
}
pub fn logaddexp_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_logaddexp_out(out, other).unwrap()
}
pub fn logcumsumexp(&self, dim: i64) -> Tensor {
self.f_logcumsumexp(dim).unwrap()
}
pub fn logcumsumexp_out(&self, out: &Tensor, dim: i64) -> Tensor {
self.f_logcumsumexp_out(out, dim).unwrap()
}
pub fn logdet(&self) -> Tensor {
self.f_logdet().unwrap()
}
pub fn logical_and(&self, other: &Tensor) -> Tensor {
self.f_logical_and(other).unwrap()
}
pub fn logical_and_(&mut self, other: &Tensor) -> Tensor {
self.f_logical_and_(other).unwrap()
}
pub fn logical_and_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_logical_and_out(out, other).unwrap()
}
pub fn logical_not(&self) -> Tensor {
self.f_logical_not().unwrap()
}
pub fn logical_not_(&mut self) -> Tensor {
self.f_logical_not_().unwrap()
}
pub fn logical_not_out(&self, out: &Tensor) -> Tensor {
self.f_logical_not_out(out).unwrap()
}
pub fn logical_or(&self, other: &Tensor) -> Tensor {
self.f_logical_or(other).unwrap()
}
pub fn logical_or_(&mut self, other: &Tensor) -> Tensor {
self.f_logical_or_(other).unwrap()
}
pub fn logical_or_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_logical_or_out(out, other).unwrap()
}
pub fn logical_xor(&self, other: &Tensor) -> Tensor {
self.f_logical_xor(other).unwrap()
}
pub fn logical_xor_(&mut self, other: &Tensor) -> Tensor {
self.f_logical_xor_(other).unwrap()
}
pub fn logical_xor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_logical_xor_out(out, other).unwrap()
}
pub fn logit(&self, eps: impl Into<Option<f64>>) -> Tensor {
self.f_logit(eps).unwrap()
}
pub fn logit_(&mut self, eps: impl Into<Option<f64>>) -> Tensor {
self.f_logit_(eps).unwrap()
}
pub fn logit_backward(&self, grad_output: &Tensor, eps: impl Into<Option<f64>>) -> Tensor {
self.f_logit_backward(grad_output, eps).unwrap()
}
pub fn logit_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
eps: impl Into<Option<f64>>,
) -> Tensor {
self.f_logit_backward_grad_input(grad_input, grad_output, eps).unwrap()
}
pub fn logit_out(&self, out: &Tensor, eps: impl Into<Option<f64>>) -> Tensor {
self.f_logit_out(out, eps).unwrap()
}
pub fn logspace<S: Into<Scalar>>(
start: S,
end: S,
steps: i64,
base: f64,
options: (Kind, Device),
) -> Tensor {
Tensor::f_logspace(start, end, steps, base, options).unwrap()
}
pub fn logspace_out<S: Into<Scalar>>(
out: &Tensor,
start: S,
end: S,
steps: i64,
base: f64,
) -> Tensor {
Tensor::f_logspace_out(out, start, end, steps, base).unwrap()
}
pub fn logsumexp(&self, dim: impl IntList, keepdim: bool) -> Tensor {
self.f_logsumexp(dim, keepdim).unwrap()
}
pub fn logsumexp_out(&self, out: &Tensor, dim: impl IntList, keepdim: bool) -> Tensor {
self.f_logsumexp_out(out, dim, keepdim).unwrap()
}
pub fn lstm<T: Borrow<Tensor>>(
&self,
hx: &[T],
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> (Tensor, Tensor, Tensor) {
self.f_lstm(hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first)
.unwrap()
}
pub fn lstm_cell<T: Borrow<Tensor>>(
&self,
hx: &[T],
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: Option<T>,
b_hh: Option<T>,
) -> (Tensor, Tensor) {
self.f_lstm_cell(hx, w_ih, w_hh, b_ih, b_hh).unwrap()
}
pub fn lstm_data<T: Borrow<Tensor>>(
data: &Tensor,
batch_sizes: &Tensor,
hx: &[T],
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_lstm_data(
data,
batch_sizes,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
)
.unwrap()
}
pub fn lstm_mps_backward<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &[T],
out2: &[T],
grad_y: Option<T>,
grad_hy: Option<T>,
grad_cy: Option<T>,
z_state: &Tensor,
cell_state_fwd: &Tensor,
layersoutputs: &Tensor,
hx: &[T],
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) {
self.f_lstm_mps_backward(
out0,
out1,
out2,
grad_y,
grad_hy,
grad_cy,
z_state,
cell_state_fwd,
layersoutputs,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
)
.unwrap()
}
pub fn lt<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_lt(other).unwrap()
}
pub fn lt_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_lt_(other).unwrap()
}
pub fn lt_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_lt_scalar_out(out, other).unwrap()
}
pub fn lt_tensor(&self, other: &Tensor) -> Tensor {
self.f_lt_tensor(other).unwrap()
}
pub fn lt_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_lt_tensor_(other).unwrap()
}
pub fn lt_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_lt_tensor_out(out, other).unwrap()
}
pub fn lu_solve(&self, lu_data: &Tensor, lu_pivots: &Tensor) -> Tensor {
self.f_lu_solve(lu_data, lu_pivots).unwrap()
}
pub fn lu_solve_out(&self, out: &Tensor, lu_data: &Tensor, lu_pivots: &Tensor) -> Tensor {
self.f_lu_solve_out(out, lu_data, lu_pivots).unwrap()
}
pub fn lu_unpack(
lu_data: &Tensor,
lu_pivots: &Tensor,
unpack_data: bool,
unpack_pivots: bool,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_lu_unpack(lu_data, lu_pivots, unpack_data, unpack_pivots).unwrap()
}
pub fn lu_unpack_out(
p: &Tensor,
l: &Tensor,
u: &Tensor,
lu_data: &Tensor,
lu_pivots: &Tensor,
unpack_data: bool,
unpack_pivots: bool,
) -> (Tensor, Tensor, Tensor) {
Tensor::f_lu_unpack_out(p, l, u, lu_data, lu_pivots, unpack_data, unpack_pivots).unwrap()
}
pub fn margin_ranking_loss(
input1: &Tensor,
input2: &Tensor,
target: &Tensor,
margin: f64,
reduction: crate::Reduction,
) -> Tensor {
Tensor::f_margin_ranking_loss(input1, input2, target, margin, reduction).unwrap()
}
pub fn masked_fill<S: Into<Scalar>>(&self, mask: &Tensor, value: S) -> Tensor {
self.f_masked_fill(mask, value).unwrap()
}
pub fn masked_fill_<S: Into<Scalar>>(&mut self, mask: &Tensor, value: S) -> Tensor {
self.f_masked_fill_(mask, value).unwrap()
}
pub fn masked_fill_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
mask: &Tensor,
value: S,
) -> Tensor {
self.f_masked_fill_scalar_out(out, mask, value).unwrap()
}
pub fn masked_fill_tensor(&self, mask: &Tensor, value: &Tensor) -> Tensor {
self.f_masked_fill_tensor(mask, value).unwrap()
}
pub fn masked_fill_tensor_(&mut self, mask: &Tensor, value: &Tensor) -> Tensor {
self.f_masked_fill_tensor_(mask, value).unwrap()
}
pub fn masked_fill_tensor_out(&self, out: &Tensor, mask: &Tensor, value: &Tensor) -> Tensor {
self.f_masked_fill_tensor_out(out, mask, value).unwrap()
}
pub fn masked_scatter(&self, mask: &Tensor, source: &Tensor) -> Tensor {
self.f_masked_scatter(mask, source).unwrap()
}
pub fn masked_scatter_(&mut self, mask: &Tensor, source: &Tensor) -> Tensor {
self.f_masked_scatter_(mask, source).unwrap()
}
pub fn masked_scatter_out(&self, out: &Tensor, mask: &Tensor, source: &Tensor) -> Tensor {
self.f_masked_scatter_out(out, mask, source).unwrap()
}
pub fn masked_select(&self, mask: &Tensor) -> Tensor {
self.f_masked_select(mask).unwrap()
}
pub fn masked_select_backward(&self, grad: &Tensor, mask: &Tensor) -> Tensor {
self.f_masked_select_backward(grad, mask).unwrap()
}
pub fn masked_select_out(&self, out: &Tensor, mask: &Tensor) -> Tensor {
self.f_masked_select_out(out, mask).unwrap()
}
pub fn matmul(&self, other: &Tensor) -> Tensor {
self.f_matmul(other).unwrap()
}
pub fn matmul_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_matmul_out(out, other).unwrap()
}
pub fn matrix_exp(&self) -> Tensor {
self.f_matrix_exp().unwrap()
}
pub fn matrix_exp_backward(&self, grad: &Tensor) -> Tensor {
self.f_matrix_exp_backward(grad).unwrap()
}
pub fn matrix_h(&self) -> Tensor {
self.f_matrix_h().unwrap()
}
pub fn matrix_power(&self, n: i64) -> Tensor {
self.f_matrix_power(n).unwrap()
}
pub fn matrix_power_out(&self, out: &Tensor, n: i64) -> Tensor {
self.f_matrix_power_out(out, n).unwrap()
}
pub fn max(&self) -> Tensor {
self.f_max().unwrap()
}
pub fn max_dim(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
self.f_max_dim(dim, keepdim).unwrap()
}
pub fn max_dim_max(
&self,
max: &Tensor,
max_values: &Tensor,
dim: i64,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_max_dim_max(max, max_values, dim, keepdim).unwrap()
}
pub fn max_other(&self, other: &Tensor) -> Tensor {
self.f_max_other(other).unwrap()
}
pub fn max_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_max_out(out, other).unwrap()
}
pub fn max_pool1d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_max_pool1d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
}
pub fn max_pool1d_with_indices(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> (Tensor, Tensor) {
self.f_max_pool1d_with_indices(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
}
pub fn max_pool2d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_max_pool2d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
}
pub fn max_pool2d_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_max_pool2d_backward(grad_output, kernel_size, stride, padding, dilation, ceil_mode)
.unwrap()
}
pub fn max_pool2d_backward_out(
&self,
out: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_max_pool2d_backward_out(
out,
grad_output,
kernel_size,
stride,
padding,
dilation,
ceil_mode,
)
.unwrap()
}
pub fn max_pool2d_with_indices(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> (Tensor, Tensor) {
self.f_max_pool2d_with_indices(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
}
pub fn max_pool2d_with_indices_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
indices: &Tensor,
) -> Tensor {
self.f_max_pool2d_with_indices_backward(
grad_output,
kernel_size,
stride,
padding,
dilation,
ceil_mode,
indices,
)
.unwrap()
}
pub fn max_pool2d_with_indices_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
indices: &Tensor,
) -> Tensor {
self.f_max_pool2d_with_indices_backward_grad_input(
grad_input,
grad_output,
kernel_size,
stride,
padding,
dilation,
ceil_mode,
indices,
)
.unwrap()
}
pub fn max_pool2d_with_indices_out(
&self,
out: &Tensor,
indices: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> (Tensor, Tensor) {
self.f_max_pool2d_with_indices_out(
out,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode,
)
.unwrap()
}
pub fn max_pool3d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_max_pool3d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
}
pub fn max_pool3d_with_indices(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> (Tensor, Tensor) {
self.f_max_pool3d_with_indices(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
}
pub fn max_pool3d_with_indices_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
indices: &Tensor,
) -> Tensor {
self.f_max_pool3d_with_indices_backward(
grad_output,
kernel_size,
stride,
padding,
dilation,
ceil_mode,
indices,
)
.unwrap()
}
pub fn max_pool3d_with_indices_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
indices: &Tensor,
) -> Tensor {
self.f_max_pool3d_with_indices_backward_grad_input(
grad_input,
grad_output,
kernel_size,
stride,
padding,
dilation,
ceil_mode,
indices,
)
.unwrap()
}
pub fn max_pool3d_with_indices_out(
&self,
out: &Tensor,
indices: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> (Tensor, Tensor) {
self.f_max_pool3d_with_indices_out(
out,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode,
)
.unwrap()
}
pub fn max_unary_out(&self, out: &Tensor) -> Tensor {
self.f_max_unary_out(out).unwrap()
}
pub fn max_unpool2d(&self, indices: &Tensor, output_size: impl IntList) -> Tensor {
self.f_max_unpool2d(indices, output_size).unwrap()
}
pub fn max_unpool2d_out(
&self,
out: &Tensor,
indices: &Tensor,
output_size: impl IntList,
) -> Tensor {
self.f_max_unpool2d_out(out, indices, output_size).unwrap()
}
pub fn max_unpool3d(
&self,
indices: &Tensor,
output_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
) -> Tensor {
self.f_max_unpool3d(indices, output_size, stride, padding).unwrap()
}
pub fn max_unpool3d_out(
&self,
out: &Tensor,
indices: &Tensor,
output_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
) -> Tensor {
self.f_max_unpool3d_out(out, indices, output_size, stride, padding).unwrap()
}
pub fn maximum(&self, other: &Tensor) -> Tensor {
self.f_maximum(other).unwrap()
}
pub fn maximum_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_maximum_out(out, other).unwrap()
}
pub fn mean(&self, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_mean(dtype).unwrap()
}
pub fn mean_dim(
&self,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_mean_dim(dim, keepdim, dtype).unwrap()
}
pub fn mean_out(
&self,
out: &Tensor,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_mean_out(out, dim, keepdim, dtype).unwrap()
}
pub fn median(&self) -> Tensor {
self.f_median().unwrap()
}
pub fn median_dim(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
self.f_median_dim(dim, keepdim).unwrap()
}
pub fn median_dim_values(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_median_dim_values(values, indices, dim, keepdim).unwrap()
}
pub fn median_out(&self, out: &Tensor) -> Tensor {
self.f_median_out(out).unwrap()
}
pub fn meshgrid<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
Tensor::f_meshgrid(tensors).unwrap()
}
pub fn meshgrid_indexing<T: Borrow<Tensor>>(tensors: &[T], indexing: &str) -> Vec<Tensor> {
Tensor::f_meshgrid_indexing(tensors, indexing).unwrap()
}
pub fn mh(&self) -> Tensor {
self.f_mh().unwrap()
}
pub fn min(&self) -> Tensor {
self.f_min().unwrap()
}
pub fn min_dim(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
self.f_min_dim(dim, keepdim).unwrap()
}
pub fn min_dim_min(
&self,
min: &Tensor,
min_indices: &Tensor,
dim: i64,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_min_dim_min(min, min_indices, dim, keepdim).unwrap()
}
pub fn min_other(&self, other: &Tensor) -> Tensor {
self.f_min_other(other).unwrap()
}
pub fn min_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_min_out(out, other).unwrap()
}
pub fn min_unary_out(&self, out: &Tensor) -> Tensor {
self.f_min_unary_out(out).unwrap()
}
pub fn minimum(&self, other: &Tensor) -> Tensor {
self.f_minimum(other).unwrap()
}
pub fn minimum_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_minimum_out(out, other).unwrap()
}
pub fn miopen_batch_norm<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
exponential_average_factor: f64,
epsilon: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_miopen_batch_norm(
weight,
bias,
running_mean,
running_var,
training,
exponential_average_factor,
epsilon,
)
.unwrap()
}
pub fn miopen_batch_norm_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
weight: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
save_mean: Option<T>,
save_var: Option<T>,
epsilon: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_miopen_batch_norm_backward(
grad_output,
weight,
running_mean,
running_var,
save_mean,
save_var,
epsilon,
)
.unwrap()
}
pub fn miopen_batch_norm_backward_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
grad_output: &Tensor,
weight: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
save_mean: Option<T>,
save_var: Option<T>,
epsilon: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_miopen_batch_norm_backward_out(
out0,
out1,
out2,
grad_output,
weight,
running_mean,
running_var,
save_mean,
save_var,
epsilon,
)
.unwrap()
}
pub fn miopen_batch_norm_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
weight: &Tensor,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
exponential_average_factor: f64,
epsilon: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_miopen_batch_norm_out(
out0,
out1,
out2,
weight,
bias,
running_mean,
running_var,
training,
exponential_average_factor,
epsilon,
)
.unwrap()
}
pub fn miopen_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Tensor {
self.f_miopen_convolution(
weight,
bias,
padding,
stride,
dilation,
groups,
benchmark,
deterministic,
)
.unwrap()
}
pub fn miopen_convolution_add_relu<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
weight: &Tensor,
z: &Tensor,
alpha: S,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_miopen_convolution_add_relu(
weight, z, alpha, bias, stride, padding, dilation, groups,
)
.unwrap()
}
pub fn miopen_convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Tensor {
self.f_miopen_convolution_out(
out,
weight,
bias,
padding,
stride,
dilation,
groups,
benchmark,
deterministic,
)
.unwrap()
}
pub fn miopen_convolution_relu<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_miopen_convolution_relu(weight, bias, stride, padding, dilation, groups).unwrap()
}
pub fn miopen_convolution_transpose<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
output_padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Tensor {
self.f_miopen_convolution_transpose(
weight,
bias,
padding,
output_padding,
stride,
dilation,
groups,
benchmark,
deterministic,
)
.unwrap()
}
pub fn miopen_convolution_transpose_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
output_padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Tensor {
self.f_miopen_convolution_transpose_out(
out,
weight,
bias,
padding,
output_padding,
stride,
dilation,
groups,
benchmark,
deterministic,
)
.unwrap()
}
pub fn miopen_depthwise_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Tensor {
self.f_miopen_depthwise_convolution(
weight,
bias,
padding,
stride,
dilation,
groups,
benchmark,
deterministic,
)
.unwrap()
}
pub fn miopen_depthwise_convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Tensor {
self.f_miopen_depthwise_convolution_out(
out,
weight,
bias,
padding,
stride,
dilation,
groups,
benchmark,
deterministic,
)
.unwrap()
}
pub fn miopen_rnn<T: Borrow<Tensor>>(
&self,
weight: &[T],
weight_stride0: i64,
hx: &Tensor,
cx: Option<T>,
mode: i64,
hidden_size: i64,
num_layers: i64,
batch_first: bool,
dropout: f64,
train: bool,
bidirectional: bool,
batch_sizes: impl IntList,
dropout_state: Option<T>,
) -> (Tensor, Tensor, Tensor, Tensor, Tensor) {
self.f_miopen_rnn(
weight,
weight_stride0,
hx,
cx,
mode,
hidden_size,
num_layers,
batch_first,
dropout,
train,
bidirectional,
batch_sizes,
dropout_state,
)
.unwrap()
}
pub fn miopen_rnn_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
out4: &Tensor,
weight: &[T],
weight_stride0: i64,
hx: &Tensor,
cx: Option<T>,
mode: i64,
hidden_size: i64,
num_layers: i64,
batch_first: bool,
dropout: f64,
train: bool,
bidirectional: bool,
batch_sizes: impl IntList,
dropout_state: Option<T>,
) -> (Tensor, Tensor, Tensor, Tensor, Tensor) {
self.f_miopen_rnn_out(
out0,
out1,
out2,
out3,
out4,
weight,
weight_stride0,
hx,
cx,
mode,
hidden_size,
num_layers,
batch_first,
dropout,
train,
bidirectional,
batch_sizes,
dropout_state,
)
.unwrap()
}
pub fn mish(&self) -> Tensor {
self.f_mish().unwrap()
}
pub fn mish_(&mut self) -> Tensor {
self.f_mish_().unwrap()
}
pub fn mish_backward(&self, grad_output: &Tensor) -> Tensor {
self.f_mish_backward(grad_output).unwrap()
}
pub fn mish_out(&self, out: &Tensor) -> Tensor {
self.f_mish_out(out).unwrap()
}
pub fn mkldnn_adaptive_avg_pool2d(&self, output_size: impl IntList) -> Tensor {
self.f_mkldnn_adaptive_avg_pool2d(output_size).unwrap()
}
pub fn mkldnn_adaptive_avg_pool2d_backward(&self, grad_output: &Tensor) -> Tensor {
self.f_mkldnn_adaptive_avg_pool2d_backward(grad_output).unwrap()
}
pub fn mkldnn_adaptive_avg_pool2d_backward_out(
&self,
out: &Tensor,
grad_output: &Tensor,
) -> Tensor {
self.f_mkldnn_adaptive_avg_pool2d_backward_out(out, grad_output).unwrap()
}
pub fn mkldnn_adaptive_avg_pool2d_out(
&self,
out: &Tensor,
output_size: impl IntList,
) -> Tensor {
self.f_mkldnn_adaptive_avg_pool2d_out(out, output_size).unwrap()
}
pub fn mkldnn_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_mkldnn_convolution(weight, bias, padding, stride, dilation, groups).unwrap()
}
pub fn mkldnn_convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_mkldnn_convolution_out(out, weight, bias, padding, stride, dilation, groups).unwrap()
}
pub fn mkldnn_linear<T: Borrow<Tensor>>(&self, weight: &Tensor, bias: Option<T>) -> Tensor {
self.f_mkldnn_linear(weight, bias).unwrap()
}
pub fn mkldnn_linear_backward_input(
input_size: impl IntList,
grad_output: &Tensor,
weight: &Tensor,
) -> Tensor {
Tensor::f_mkldnn_linear_backward_input(input_size, grad_output, weight).unwrap()
}
pub fn mkldnn_linear_backward_input_out(
out: &Tensor,
input_size: impl IntList,
grad_output: &Tensor,
weight: &Tensor,
) -> Tensor {
Tensor::f_mkldnn_linear_backward_input_out(out, input_size, grad_output, weight).unwrap()
}
pub fn mkldnn_linear_backward_weights(
&self,
grad_output: &Tensor,
weight: &Tensor,
bias_defined: bool,
) -> (Tensor, Tensor) {
self.f_mkldnn_linear_backward_weights(grad_output, weight, bias_defined).unwrap()
}
pub fn mkldnn_linear_backward_weights_out(
&self,
out0: &Tensor,
out1: &Tensor,
grad_output: &Tensor,
weight: &Tensor,
bias_defined: bool,
) -> (Tensor, Tensor) {
self.f_mkldnn_linear_backward_weights_out(out0, out1, grad_output, weight, bias_defined)
.unwrap()
}
pub fn mkldnn_linear_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
) -> Tensor {
self.f_mkldnn_linear_out(out, weight, bias).unwrap()
}
pub fn mkldnn_max_pool2d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_mkldnn_max_pool2d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
}
pub fn mkldnn_max_pool2d_backward(
&self,
grad_output: &Tensor,
output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_mkldnn_max_pool2d_backward(
grad_output,
output,
kernel_size,
stride,
padding,
dilation,
ceil_mode,
)
.unwrap()
}
pub fn mkldnn_max_pool2d_backward_out(
&self,
out: &Tensor,
grad_output: &Tensor,
output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_mkldnn_max_pool2d_backward_out(
out,
grad_output,
output,
kernel_size,
stride,
padding,
dilation,
ceil_mode,
)
.unwrap()
}
pub fn mkldnn_max_pool2d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_mkldnn_max_pool2d_out(out, kernel_size, stride, padding, dilation, ceil_mode)
.unwrap()
}
pub fn mkldnn_max_pool3d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_mkldnn_max_pool3d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
}
pub fn mkldnn_max_pool3d_backward(
&self,
grad_output: &Tensor,
output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_mkldnn_max_pool3d_backward(
grad_output,
output,
kernel_size,
stride,
padding,
dilation,
ceil_mode,
)
.unwrap()
}
pub fn mkldnn_max_pool3d_backward_out(
&self,
out: &Tensor,
grad_output: &Tensor,
output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_mkldnn_max_pool3d_backward_out(
out,
grad_output,
output,
kernel_size,
stride,
padding,
dilation,
ceil_mode,
)
.unwrap()
}
pub fn mkldnn_max_pool3d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_mkldnn_max_pool3d_out(out, kernel_size, stride, padding, dilation, ceil_mode)
.unwrap()
}
pub fn mkldnn_reorder_conv2d_weight(
&self,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
input_size: impl IntListOption,
) -> Tensor {
self.f_mkldnn_reorder_conv2d_weight(padding, stride, dilation, groups, input_size).unwrap()
}
pub fn mkldnn_reorder_conv2d_weight_out(
&self,
out: &Tensor,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
input_size: impl IntListOption,
) -> Tensor {
self.f_mkldnn_reorder_conv2d_weight_out(out, padding, stride, dilation, groups, input_size)
.unwrap()
}
pub fn mkldnn_reorder_conv3d_weight(
&self,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_mkldnn_reorder_conv3d_weight(padding, stride, dilation, groups).unwrap()
}
pub fn mkldnn_reorder_conv3d_weight_out(
&self,
out: &Tensor,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Tensor {
self.f_mkldnn_reorder_conv3d_weight_out(out, padding, stride, dilation, groups).unwrap()
}
pub fn mkldnn_rnn_layer(
&self,
weight0: &Tensor,
weight1: &Tensor,
weight2: &Tensor,
weight3: &Tensor,
hx_: &Tensor,
cx_: &Tensor,
reverse: bool,
batch_sizes: impl IntList,
mode: i64,
hidden_size: i64,
num_layers: i64,
has_biases: bool,
bidirectional: bool,
batch_first: bool,
train: bool,
) -> (Tensor, Tensor, Tensor, Tensor) {
self.f_mkldnn_rnn_layer(
weight0,
weight1,
weight2,
weight3,
hx_,
cx_,
reverse,
batch_sizes,
mode,
hidden_size,
num_layers,
has_biases,
bidirectional,
batch_first,
train,
)
.unwrap()
}
pub fn mkldnn_rnn_layer_backward<T: Borrow<Tensor>>(
&self,
weight1: &Tensor,
weight2: &Tensor,
weight3: &Tensor,
weight4: &Tensor,
hx_: &Tensor,
cx_tmp: &Tensor,
output: &Tensor,
hy_: &Tensor,
cy_: &Tensor,
grad_output: Option<T>,
grad_hy: Option<T>,
grad_cy: Option<T>,
reverse: bool,
mode: i64,
hidden_size: i64,
num_layers: i64,
has_biases: bool,
train: bool,
bidirectional: bool,
batch_sizes: impl IntList,
batch_first: bool,
workspace: &Tensor,
) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) {
self.f_mkldnn_rnn_layer_backward(
weight1,
weight2,
weight3,
weight4,
hx_,
cx_tmp,
output,
hy_,
cy_,
grad_output,
grad_hy,
grad_cy,
reverse,
mode,
hidden_size,
num_layers,
has_biases,
train,
bidirectional,
batch_sizes,
batch_first,
workspace,
)
.unwrap()
}
pub fn mkldnn_rnn_layer_backward_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
out4: &Tensor,
out5: &Tensor,
out6: &Tensor,
weight1: &Tensor,
weight2: &Tensor,
weight3: &Tensor,
weight4: &Tensor,
hx_: &Tensor,
cx_tmp: &Tensor,
output: &Tensor,
hy_: &Tensor,
cy_: &Tensor,
grad_output: Option<T>,
grad_hy: Option<T>,
grad_cy: Option<T>,
reverse: bool,
mode: i64,
hidden_size: i64,
num_layers: i64,
has_biases: bool,
train: bool,
bidirectional: bool,
batch_sizes: impl IntList,
batch_first: bool,
workspace: &Tensor,
) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) {
self.f_mkldnn_rnn_layer_backward_out(
out0,
out1,
out2,
out3,
out4,
out5,
out6,
weight1,
weight2,
weight3,
weight4,
hx_,
cx_tmp,
output,
hy_,
cy_,
grad_output,
grad_hy,
grad_cy,
reverse,
mode,
hidden_size,
num_layers,
has_biases,
train,
bidirectional,
batch_sizes,
batch_first,
workspace,
)
.unwrap()
}
pub fn mkldnn_rnn_layer_out(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
weight0: &Tensor,
weight1: &Tensor,
weight2: &Tensor,
weight3: &Tensor,
hx_: &Tensor,
cx_: &Tensor,
reverse: bool,
batch_sizes: impl IntList,
mode: i64,
hidden_size: i64,
num_layers: i64,
has_biases: bool,
bidirectional: bool,
batch_first: bool,
train: bool,
) -> (Tensor, Tensor, Tensor, Tensor) {
self.f_mkldnn_rnn_layer_out(
out0,
out1,
out2,
out3,
weight0,
weight1,
weight2,
weight3,
hx_,
cx_,
reverse,
batch_sizes,
mode,
hidden_size,
num_layers,
has_biases,
bidirectional,
batch_first,
train,
)
.unwrap()
}
pub fn mm(&self, mat2: &Tensor) -> Tensor {
self.f_mm(mat2).unwrap()
}
pub fn mm_out(&self, out: &Tensor, mat2: &Tensor) -> Tensor {
self.f_mm_out(out, mat2).unwrap()
}
pub fn mode(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
self.f_mode(dim, keepdim).unwrap()
}
pub fn mode_values(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_mode_values(values, indices, dim, keepdim).unwrap()
}
pub fn moveaxis(&self, source: impl IntList, destination: impl IntList) -> Tensor {
self.f_moveaxis(source, destination).unwrap()
}
pub fn moveaxis_int(&self, source: i64, destination: i64) -> Tensor {
self.f_moveaxis_int(source, destination).unwrap()
}
pub fn movedim(&self, source: impl IntList, destination: impl IntList) -> Tensor {
self.f_movedim(source, destination).unwrap()
}
pub fn movedim_int(&self, source: i64, destination: i64) -> Tensor {
self.f_movedim_int(source, destination).unwrap()
}
pub fn mse_loss(&self, target: &Tensor, reduction: crate::Reduction) -> Tensor {
self.f_mse_loss(target, reduction).unwrap()
}
pub fn mse_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Tensor {
self.f_mse_loss_backward(grad_output, target, reduction).unwrap()
}
pub fn mse_loss_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Tensor {
self.f_mse_loss_backward_grad_input(grad_input, grad_output, target, reduction).unwrap()
}
pub fn mse_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Tensor {
self.f_mse_loss_out(out, target, reduction).unwrap()
}
pub fn msort(&self) -> Tensor {
self.f_msort().unwrap()
}
pub fn msort_out(&self, out: &Tensor) -> Tensor {
self.f_msort_out(out).unwrap()
}
pub fn mt(&self) -> Tensor {
self.f_mt().unwrap()
}
pub fn g_mul(&self, other: &Tensor) -> Tensor {
self.f_mul(other).unwrap()
}
pub fn g_mul_(&mut self, other: &Tensor) -> Tensor {
self.f_mul_(other).unwrap()
}
pub fn mul_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_mul_out(out, other).unwrap()
}
pub fn g_mul_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_mul_scalar(other).unwrap()
}
pub fn g_mul_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_mul_scalar_(other).unwrap()
}
pub fn mul_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_mul_scalar_out(out, other).unwrap()
}
pub fn multi_margin_loss_backward<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
grad_output: &Tensor,
target: &Tensor,
p: S,
margin: S,
weight: Option<T>,
reduction: crate::Reduction,
) -> Tensor {
self.f_multi_margin_loss_backward(grad_output, target, p, margin, weight, reduction)
.unwrap()
}
pub fn multi_margin_loss_backward_grad_input<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
p: S,
margin: S,
weight: Option<T>,
reduction: crate::Reduction,
) -> Tensor {
self.f_multi_margin_loss_backward_grad_input(
grad_input,
grad_output,
target,
p,
margin,
weight,
reduction,
)
.unwrap()
}
pub fn multilabel_margin_loss(&self, target: &Tensor, reduction: crate::Reduction) -> Tensor {
self.f_multilabel_margin_loss(target, reduction).unwrap()
}
pub fn multilabel_margin_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
is_target: &Tensor,
) -> Tensor {
self.f_multilabel_margin_loss_backward(grad_output, target, reduction, is_target).unwrap()
}
pub fn multilabel_margin_loss_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
is_target: &Tensor,
) -> Tensor {
self.f_multilabel_margin_loss_backward_grad_input(
grad_input,
grad_output,
target,
reduction,
is_target,
)
.unwrap()
}
pub fn multilabel_margin_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Tensor {
self.f_multilabel_margin_loss_out(out, target, reduction).unwrap()
}
pub fn multinomial(&self, num_samples: i64, replacement: bool) -> Tensor {
self.f_multinomial(num_samples, replacement).unwrap()
}
pub fn multinomial_out(&self, out: &Tensor, num_samples: i64, replacement: bool) -> Tensor {
self.f_multinomial_out(out, num_samples, replacement).unwrap()
}
pub fn multiply(&self, other: &Tensor) -> Tensor {
self.f_multiply(other).unwrap()
}
pub fn multiply_(&mut self, other: &Tensor) -> Tensor {
self.f_multiply_(other).unwrap()
}
pub fn multiply_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_multiply_out(out, other).unwrap()
}
pub fn multiply_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_multiply_scalar(other).unwrap()
}
pub fn multiply_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_multiply_scalar_(other).unwrap()
}
pub fn mv(&self, vec: &Tensor) -> Tensor {
self.f_mv(vec).unwrap()
}
pub fn mv_out(&self, out: &Tensor, vec: &Tensor) -> Tensor {
self.f_mv_out(out, vec).unwrap()
}
pub fn mvlgamma(&self, p: i64) -> Tensor {
self.f_mvlgamma(p).unwrap()
}
pub fn mvlgamma_(&mut self, p: i64) -> Tensor {
self.f_mvlgamma_(p).unwrap()
}
pub fn mvlgamma_out(&self, out: &Tensor, p: i64) -> Tensor {
self.f_mvlgamma_out(out, p).unwrap()
}
pub fn nan_to_num(
&self,
nan: impl Into<Option<f64>>,
posinf: impl Into<Option<f64>>,
neginf: impl Into<Option<f64>>,
) -> Tensor {
self.f_nan_to_num(nan, posinf, neginf).unwrap()
}
pub fn nan_to_num_(
&mut self,
nan: impl Into<Option<f64>>,
posinf: impl Into<Option<f64>>,
neginf: impl Into<Option<f64>>,
) -> Tensor {
self.f_nan_to_num_(nan, posinf, neginf).unwrap()
}
pub fn nan_to_num_out(
&self,
out: &Tensor,
nan: impl Into<Option<f64>>,
posinf: impl Into<Option<f64>>,
neginf: impl Into<Option<f64>>,
) -> Tensor {
self.f_nan_to_num_out(out, nan, posinf, neginf).unwrap()
}
pub fn nanmean(
&self,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_nanmean(dim, keepdim, dtype).unwrap()
}
pub fn nanmean_out(
&self,
out: &Tensor,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_nanmean_out(out, dim, keepdim, dtype).unwrap()
}
pub fn nanmedian(&self) -> Tensor {
self.f_nanmedian().unwrap()
}
pub fn nanmedian_dim(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
self.f_nanmedian_dim(dim, keepdim).unwrap()
}
pub fn nanmedian_dim_values(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_nanmedian_dim_values(values, indices, dim, keepdim).unwrap()
}
pub fn nanmedian_out(&self, out: &Tensor) -> Tensor {
self.f_nanmedian_out(out).unwrap()
}
pub fn nanquantile(
&self,
q: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Tensor {
self.f_nanquantile(q, dim, keepdim, interpolation).unwrap()
}
pub fn nanquantile_out(
&self,
out: &Tensor,
q: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Tensor {
self.f_nanquantile_out(out, q, dim, keepdim, interpolation).unwrap()
}
pub fn nanquantile_scalar(
&self,
q: f64,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Tensor {
self.f_nanquantile_scalar(q, dim, keepdim, interpolation).unwrap()
}
pub fn nanquantile_scalar_out(
&self,
out: &Tensor,
q: f64,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Tensor {
self.f_nanquantile_scalar_out(out, q, dim, keepdim, interpolation).unwrap()
}
pub fn nansum(
&self,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_nansum(dim, keepdim, dtype).unwrap()
}
pub fn nansum_out(
&self,
out: &Tensor,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_nansum_out(out, dim, keepdim, dtype).unwrap()
}
pub fn narrow(&self, dim: i64, start: i64, length: i64) -> Tensor {
self.f_narrow(dim, start, length).unwrap()
}
pub fn narrow_copy(&self, dim: i64, start: i64, length: i64) -> Tensor {
self.f_narrow_copy(dim, start, length).unwrap()
}
pub fn narrow_copy_out(&self, out: &Tensor, dim: i64, start: i64, length: i64) -> Tensor {
self.f_narrow_copy_out(out, dim, start, length).unwrap()
}
pub fn narrow_tensor(&self, dim: i64, start: &Tensor, length: i64) -> Tensor {
self.f_narrow_tensor(dim, start, length).unwrap()
}
pub fn native_batch_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
momentum: f64,
eps: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_native_batch_norm(weight, bias, running_mean, running_var, training, momentum, eps)
.unwrap()
}
pub fn native_batch_norm_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
save_mean: &Tensor,
save_invstd: &Tensor,
weight: Option<T>,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
momentum: f64,
eps: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_native_batch_norm_out(
out,
save_mean,
save_invstd,
weight,
bias,
running_mean,
running_var,
training,
momentum,
eps,
)
.unwrap()
}
pub fn native_channel_shuffle(&self, groups: i64) -> Tensor {
self.f_native_channel_shuffle(groups).unwrap()
}
pub fn native_dropout(&self, p: f64, train: bool) -> (Tensor, Tensor) {
self.f_native_dropout(p, train).unwrap()
}
pub fn native_dropout_backward(grad_output: &Tensor, mask: &Tensor, scale: f64) -> Tensor {
Tensor::f_native_dropout_backward(grad_output, mask, scale).unwrap()
}
pub fn native_dropout_backward_out(
out: &Tensor,
grad_output: &Tensor,
mask: &Tensor,
scale: f64,
) -> Tensor {
Tensor::f_native_dropout_backward_out(out, grad_output, mask, scale).unwrap()
}
pub fn native_dropout_out(
&self,
out0: &Tensor,
out1: &Tensor,
p: f64,
train: bool,
) -> (Tensor, Tensor) {
self.f_native_dropout_out(out0, out1, p, train).unwrap()
}
pub fn native_group_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
n: i64,
c: i64,
hxw: i64,
group: i64,
eps: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_native_group_norm(weight, bias, n, c, hxw, group, eps).unwrap()
}
pub fn native_group_norm_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
weight: Option<T>,
bias: Option<T>,
n: i64,
c: i64,
hxw: i64,
group: i64,
eps: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_native_group_norm_out(out0, out1, out2, weight, bias, n, c, hxw, group, eps).unwrap()
}
pub fn native_layer_norm<T: Borrow<Tensor>>(
&self,
normalized_shape: impl IntList,
weight: Option<T>,
bias: Option<T>,
eps: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_native_layer_norm(normalized_shape, weight, bias, eps).unwrap()
}
pub fn native_layer_norm_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
normalized_shape: impl IntList,
weight: Option<T>,
bias: Option<T>,
eps: f64,
) -> (Tensor, Tensor, Tensor) {
self.f_native_layer_norm_out(out0, out1, out2, normalized_shape, weight, bias, eps).unwrap()
}
pub fn native_norm(&self) -> Tensor {
self.f_native_norm().unwrap()
}
pub fn native_norm_out(&self, out: &Tensor) -> Tensor {
self.f_native_norm_out(out).unwrap()
}
pub fn native_norm_scalaropt_dim_dtype<S: Into<Scalar>>(
&self,
p: S,
dim: impl IntList,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_native_norm_scalaropt_dim_dtype(p, dim, keepdim, dtype).unwrap()
}
pub fn native_norm_scalaropt_dim_dtype_out<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
dim: impl IntList,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_native_norm_scalaropt_dim_dtype_out(out, p, dim, keepdim, dtype).unwrap()
}
pub fn ne<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_ne(other).unwrap()
}
pub fn ne_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_ne_(other).unwrap()
}
pub fn ne_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_ne_scalar_out(out, other).unwrap()
}
pub fn ne_tensor(&self, other: &Tensor) -> Tensor {
self.f_ne_tensor(other).unwrap()
}
pub fn ne_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_ne_tensor_(other).unwrap()
}
pub fn ne_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_ne_tensor_out(out, other).unwrap()
}
pub fn neg(&self) -> Tensor {
self.f_neg().unwrap()
}
pub fn neg_(&mut self) -> Tensor {
self.f_neg_().unwrap()
}
pub fn neg_out(&self, out: &Tensor) -> Tensor {
self.f_neg_out(out).unwrap()
}
pub fn negative(&self) -> Tensor {
self.f_negative().unwrap()
}
pub fn negative_(&mut self) -> Tensor {
self.f_negative_().unwrap()
}
pub fn negative_out(&self, out: &Tensor) -> Tensor {
self.f_negative_out(out).unwrap()
}
pub fn nested_to_padded_tensor(&self, padding: f64, output_size: impl IntListOption) -> Tensor {
self.f_nested_to_padded_tensor(padding, output_size).unwrap()
}
pub fn new_empty(&self, size: impl IntList, options: (Kind, Device)) -> Tensor {
self.f_new_empty(size, options).unwrap()
}
pub fn new_empty_out(&self, out: &Tensor, size: impl IntList) -> Tensor {
self.f_new_empty_out(out, size).unwrap()
}
pub fn new_empty_strided(
&self,
size: impl IntList,
stride: impl IntList,
options: (Kind, Device),
) -> Tensor {
self.f_new_empty_strided(size, stride, options).unwrap()
}
pub fn new_empty_strided_out(
&self,
out: &Tensor,
size: impl IntList,
stride: impl IntList,
) -> Tensor {
self.f_new_empty_strided_out(out, size, stride).unwrap()
}
pub fn new_full<S: Into<Scalar>>(
&self,
size: impl IntList,
fill_value: S,
options: (Kind, Device),
) -> Tensor {
self.f_new_full(size, fill_value, options).unwrap()
}
pub fn new_full_out<S: Into<Scalar>>(
&self,
out: &Tensor,
size: impl IntList,
fill_value: S,
) -> Tensor {
self.f_new_full_out(out, size, fill_value).unwrap()
}
pub fn new_ones(&self, size: impl IntList, options: (Kind, Device)) -> Tensor {
self.f_new_ones(size, options).unwrap()
}
pub fn new_ones_out(&self, out: &Tensor, size: impl IntList) -> Tensor {
self.f_new_ones_out(out, size).unwrap()
}
pub fn new_zeros(&self, size: impl IntList, options: (Kind, Device)) -> Tensor {
self.f_new_zeros(size, options).unwrap()
}
pub fn new_zeros_out(&self, out: &Tensor, size: impl IntList) -> Tensor {
self.f_new_zeros_out(out, size).unwrap()
}
pub fn nextafter(&self, other: &Tensor) -> Tensor {
self.f_nextafter(other).unwrap()
}
pub fn nextafter_(&mut self, other: &Tensor) -> Tensor {
self.f_nextafter_(other).unwrap()
}
pub fn nextafter_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_nextafter_out(out, other).unwrap()
}
pub fn g_nll_loss<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Tensor {
self.f_nll_loss(target, weight, reduction, ignore_index).unwrap()
}
pub fn nll_loss2d<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Tensor {
self.f_nll_loss2d(target, weight, reduction, ignore_index).unwrap()
}
pub fn nll_loss2d_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
total_weight: &Tensor,
) -> Tensor {
self.f_nll_loss2d_backward(
grad_output,
target,
weight,
reduction,
ignore_index,
total_weight,
)
.unwrap()
}
pub fn nll_loss2d_backward_grad_input<T: Borrow<Tensor>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
total_weight: &Tensor,
) -> Tensor {
self.f_nll_loss2d_backward_grad_input(
grad_input,
grad_output,
target,
weight,
reduction,
ignore_index,
total_weight,
)
.unwrap()
}
pub fn nll_loss2d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Tensor {
self.f_nll_loss2d_out(out, target, weight, reduction, ignore_index).unwrap()
}
pub fn nll_loss_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
total_weight: &Tensor,
) -> Tensor {
self.f_nll_loss_backward(grad_output, target, weight, reduction, ignore_index, total_weight)
.unwrap()
}
pub fn nll_loss_backward_grad_input<T: Borrow<Tensor>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
total_weight: &Tensor,
) -> Tensor {
self.f_nll_loss_backward_grad_input(
grad_input,
grad_output,
target,
weight,
reduction,
ignore_index,
total_weight,
)
.unwrap()
}
pub fn nll_loss_nd<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Tensor {
self.f_nll_loss_nd(target, weight, reduction, ignore_index).unwrap()
}
pub fn nll_loss_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Tensor {
self.f_nll_loss_out(out, target, weight, reduction, ignore_index).unwrap()
}
pub fn nonzero(&self) -> Tensor {
self.f_nonzero().unwrap()
}
pub fn nonzero_numpy(&self) -> Vec<Tensor> {
self.f_nonzero_numpy().unwrap()
}
pub fn nonzero_out(&self, out: &Tensor) -> Tensor {
self.f_nonzero_out(out).unwrap()
}
pub fn nonzero_static(&self, size: i64, fill_value: i64) -> Tensor {
self.f_nonzero_static(size, fill_value).unwrap()
}
pub fn nonzero_static_out(&self, out: &Tensor, size: i64, fill_value: i64) -> Tensor {
self.f_nonzero_static_out(out, size, fill_value).unwrap()
}
pub fn norm(&self) -> Tensor {
self.f_norm().unwrap()
}
pub fn norm_dtype_out<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
dim: impl IntList,
keepdim: bool,
dtype: Kind,
) -> Tensor {
self.f_norm_dtype_out(out, p, dim, keepdim, dtype).unwrap()
}
pub fn norm_except_dim(v: &Tensor, pow: i64, dim: i64) -> Tensor {
Tensor::f_norm_except_dim(v, pow, dim).unwrap()
}
pub fn norm_out<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
dim: impl IntList,
keepdim: bool,
) -> Tensor {
self.f_norm_out(out, p, dim, keepdim).unwrap()
}
pub fn norm_scalar_out(&self, out: &Tensor) -> Tensor {
self.f_norm_scalar_out(out).unwrap()
}
pub fn norm_scalaropt_dim<S: Into<Scalar>>(
&self,
p: S,
dim: impl IntList,
keepdim: bool,
) -> Tensor {
self.f_norm_scalaropt_dim(p, dim, keepdim).unwrap()
}
pub fn norm_scalaropt_dim_dtype<S: Into<Scalar>>(
&self,
p: S,
dim: impl IntList,
keepdim: bool,
dtype: Kind,
) -> Tensor {
self.f_norm_scalaropt_dim_dtype(p, dim, keepdim, dtype).unwrap()
}
pub fn norm_scalaropt_dtype<S: Into<Scalar>>(&self, p: S, dtype: Kind) -> Tensor {
self.f_norm_scalaropt_dtype(p, dtype).unwrap()
}
pub fn norm_scalaropt_dtype_out<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
dtype: Kind,
) -> Tensor {
self.f_norm_scalaropt_dtype_out(out, p, dtype).unwrap()
}
pub fn normal_(&mut self, mean: f64, std: f64) -> Tensor {
self.f_normal_(mean, std).unwrap()
}
pub fn normal_functional(&self, mean: f64, std: f64) -> Tensor {
self.f_normal_functional(mean, std).unwrap()
}
pub fn not_equal<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_not_equal(other).unwrap()
}
pub fn not_equal_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_not_equal_(other).unwrap()
}
pub fn not_equal_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_not_equal_scalar_out(out, other).unwrap()
}
pub fn not_equal_tensor(&self, other: &Tensor) -> Tensor {
self.f_not_equal_tensor(other).unwrap()
}
pub fn not_equal_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_not_equal_tensor_(other).unwrap()
}
pub fn not_equal_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_not_equal_tensor_out(out, other).unwrap()
}
pub fn nuclear_norm(&self, keepdim: bool) -> Tensor {
self.f_nuclear_norm(keepdim).unwrap()
}
pub fn nuclear_norm_dim(&self, dim: impl IntList, keepdim: bool) -> Tensor {
self.f_nuclear_norm_dim(dim, keepdim).unwrap()
}
pub fn nuclear_norm_dim_out(&self, out: &Tensor, dim: impl IntList, keepdim: bool) -> Tensor {
self.f_nuclear_norm_dim_out(out, dim, keepdim).unwrap()
}
pub fn nuclear_norm_out(&self, out: &Tensor, keepdim: bool) -> Tensor {
self.f_nuclear_norm_out(out, keepdim).unwrap()
}
pub fn numpy_t(&self) -> Tensor {
self.f_numpy_t().unwrap()
}
pub fn one_hot(&self, num_classes: i64) -> Tensor {
self.f_one_hot(num_classes).unwrap()
}
pub fn ones(size: impl IntList, options: (Kind, Device)) -> Tensor {
Tensor::f_ones(size, options).unwrap()
}
pub fn ones_like(&self) -> Tensor {
self.f_ones_like().unwrap()
}
pub fn ones_like_out(&self, out: &Tensor) -> Tensor {
self.f_ones_like_out(out).unwrap()
}
pub fn ones_out(out: &Tensor, size: impl IntList) -> Tensor {
Tensor::f_ones_out(out, size).unwrap()
}
pub fn orgqr(&self, input2: &Tensor) -> Tensor {
self.f_orgqr(input2).unwrap()
}
pub fn orgqr_out(&self, out: &Tensor, input2: &Tensor) -> Tensor {
self.f_orgqr_out(out, input2).unwrap()
}
pub fn ormqr(&self, input2: &Tensor, input3: &Tensor, left: bool, transpose: bool) -> Tensor {
self.f_ormqr(input2, input3, left, transpose).unwrap()
}
pub fn ormqr_out(
&self,
out: &Tensor,
input2: &Tensor,
input3: &Tensor,
left: bool,
transpose: bool,
) -> Tensor {
self.f_ormqr_out(out, input2, input3, left, transpose).unwrap()
}
pub fn outer(&self, vec2: &Tensor) -> Tensor {
self.f_outer(vec2).unwrap()
}
pub fn outer_out(&self, out: &Tensor, vec2: &Tensor) -> Tensor {
self.f_outer_out(out, vec2).unwrap()
}
pub fn output_nr(&self) -> i64 {
self.f_output_nr().unwrap()
}
pub fn pad(&self, pad: impl IntList, mode: &str, value: impl Into<Option<f64>>) -> Tensor {
self.f_pad(pad, mode, value).unwrap()
}
pub fn pad_sequence<T: Borrow<Tensor>>(
sequences: &[T],
batch_first: bool,
padding_value: f64,
) -> Tensor {
Tensor::f_pad_sequence(sequences, batch_first, padding_value).unwrap()
}
pub fn pairwise_distance(x1: &Tensor, x2: &Tensor, p: f64, eps: f64, keepdim: bool) -> Tensor {
Tensor::f_pairwise_distance(x1, x2, p, eps, keepdim).unwrap()
}
pub fn pdist(&self, p: f64) -> Tensor {
self.f_pdist(p).unwrap()
}
pub fn permute(&self, dims: impl IntList) -> Tensor {
self.f_permute(dims).unwrap()
}
pub fn permute_copy(&self, dims: impl IntList) -> Tensor {
self.f_permute_copy(dims).unwrap()
}
pub fn permute_copy_out(&self, out: &Tensor, dims: impl IntList) -> Tensor {
self.f_permute_copy_out(out, dims).unwrap()
}
pub fn pin_memory(&self, device: Device) -> Tensor {
self.f_pin_memory(device).unwrap()
}
pub fn pinverse(&self, rcond: f64) -> Tensor {
self.f_pinverse(rcond).unwrap()
}
pub fn pixel_shuffle(&self, upscale_factor: i64) -> Tensor {
self.f_pixel_shuffle(upscale_factor).unwrap()
}
pub fn pixel_shuffle_out(&self, out: &Tensor, upscale_factor: i64) -> Tensor {
self.f_pixel_shuffle_out(out, upscale_factor).unwrap()
}
pub fn pixel_unshuffle(&self, downscale_factor: i64) -> Tensor {
self.f_pixel_unshuffle(downscale_factor).unwrap()
}
pub fn pixel_unshuffle_out(&self, out: &Tensor, downscale_factor: i64) -> Tensor {
self.f_pixel_unshuffle_out(out, downscale_factor).unwrap()
}
pub fn poisson(&self) -> Tensor {
self.f_poisson().unwrap()
}
pub fn poisson_nll_loss(
&self,
target: &Tensor,
log_input: bool,
full: bool,
eps: f64,
reduction: crate::Reduction,
) -> Tensor {
self.f_poisson_nll_loss(target, log_input, full, eps, reduction).unwrap()
}
pub fn poisson_out(&self, out: &Tensor) -> Tensor {
self.f_poisson_out(out).unwrap()
}
pub fn polar(abs: &Tensor, angle: &Tensor) -> Tensor {
Tensor::f_polar(abs, angle).unwrap()
}
pub fn polar_out(out: &Tensor, abs: &Tensor, angle: &Tensor) -> Tensor {
Tensor::f_polar_out(out, abs, angle).unwrap()
}
pub fn polygamma(&self, n: i64) -> Tensor {
self.f_polygamma(n).unwrap()
}
pub fn polygamma_(&mut self, n: i64) -> Tensor {
self.f_polygamma_(n).unwrap()
}
pub fn polygamma_out(&self, out: &Tensor, n: i64) -> Tensor {
self.f_polygamma_out(out, n).unwrap()
}
pub fn positive(&self) -> Tensor {
self.f_positive().unwrap()
}
pub fn pow(&self, exponent: &Tensor) -> Tensor {
self.f_pow(exponent).unwrap()
}
pub fn pow_<S: Into<Scalar>>(&mut self, exponent: S) -> Tensor {
self.f_pow_(exponent).unwrap()
}
pub fn pow_scalar<S: Into<Scalar>>(self_scalar: S, exponent: &Tensor) -> Tensor {
Tensor::f_pow_scalar(self_scalar, exponent).unwrap()
}
pub fn pow_scalar_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
exponent: &Tensor,
) -> Tensor {
Tensor::f_pow_scalar_out(out, self_scalar, exponent).unwrap()
}
pub fn pow_tensor_(&mut self, exponent: &Tensor) -> Tensor {
self.f_pow_tensor_(exponent).unwrap()
}
pub fn pow_tensor_scalar<S: Into<Scalar>>(&self, exponent: S) -> Tensor {
self.f_pow_tensor_scalar(exponent).unwrap()
}
pub fn pow_tensor_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, exponent: S) -> Tensor {
self.f_pow_tensor_scalar_out(out, exponent).unwrap()
}
pub fn pow_tensor_tensor_out(&self, out: &Tensor, exponent: &Tensor) -> Tensor {
self.f_pow_tensor_tensor_out(out, exponent).unwrap()
}
pub fn prelu(&self, weight: &Tensor) -> Tensor {
self.f_prelu(weight).unwrap()
}
pub fn prod(&self, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_prod(dtype).unwrap()
}
pub fn prod_dim_int(&self, dim: i64, keepdim: bool, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_prod_dim_int(dim, keepdim, dtype).unwrap()
}
pub fn prod_int_out(
&self,
out: &Tensor,
dim: i64,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_prod_int_out(out, dim, keepdim, dtype).unwrap()
}
pub fn prod_out(&self, out: &Tensor, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_prod_out(out, dtype).unwrap()
}
pub fn put(&self, index: &Tensor, source: &Tensor, accumulate: bool) -> Tensor {
self.f_put(index, source, accumulate).unwrap()
}
pub fn put_(&mut self, index: &Tensor, source: &Tensor, accumulate: bool) -> Tensor {
self.f_put_(index, source, accumulate).unwrap()
}
pub fn put_out(
&self,
out: &Tensor,
index: &Tensor,
source: &Tensor,
accumulate: bool,
) -> Tensor {
self.f_put_out(out, index, source, accumulate).unwrap()
}
pub fn q_per_channel_axis(&self) -> i64 {
self.f_q_per_channel_axis().unwrap()
}
pub fn q_per_channel_scales(&self) -> Tensor {
self.f_q_per_channel_scales().unwrap()
}
pub fn q_per_channel_scales_out(&self, out: &Tensor) -> Tensor {
self.f_q_per_channel_scales_out(out).unwrap()
}
pub fn q_per_channel_zero_points(&self) -> Tensor {
self.f_q_per_channel_zero_points().unwrap()
}
pub fn q_per_channel_zero_points_out(&self, out: &Tensor) -> Tensor {
self.f_q_per_channel_zero_points_out(out).unwrap()
}
pub fn q_scale(&self) -> f64 {
self.f_q_scale().unwrap()
}
pub fn q_zero_point(&self) -> i64 {
self.f_q_zero_point().unwrap()
}
pub fn qr(&self, some: bool) -> (Tensor, Tensor) {
self.f_qr(some).unwrap()
}
pub fn qr_q(&self, q: &Tensor, r: &Tensor, some: bool) -> (Tensor, Tensor) {
self.f_qr_q(q, r, some).unwrap()
}
pub fn quantile(
&self,
q: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Tensor {
self.f_quantile(q, dim, keepdim, interpolation).unwrap()
}
pub fn quantile_out(
&self,
out: &Tensor,
q: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Tensor {
self.f_quantile_out(out, q, dim, keepdim, interpolation).unwrap()
}
pub fn quantile_scalar(
&self,
q: f64,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Tensor {
self.f_quantile_scalar(q, dim, keepdim, interpolation).unwrap()
}
pub fn quantile_scalar_out(
&self,
out: &Tensor,
q: f64,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Tensor {
self.f_quantile_scalar_out(out, q, dim, keepdim, interpolation).unwrap()
}
pub fn quantize_per_channel(
&self,
scales: &Tensor,
zero_points: &Tensor,
axis: i64,
dtype: Kind,
) -> Tensor {
self.f_quantize_per_channel(scales, zero_points, axis, dtype).unwrap()
}
pub fn quantize_per_channel_out(
&self,
out: &Tensor,
scales: &Tensor,
zero_points: &Tensor,
axis: i64,
dtype: Kind,
) -> Tensor {
self.f_quantize_per_channel_out(out, scales, zero_points, axis, dtype).unwrap()
}
pub fn quantize_per_tensor(&self, scale: f64, zero_point: i64, dtype: Kind) -> Tensor {
self.f_quantize_per_tensor(scale, zero_point, dtype).unwrap()
}
pub fn quantize_per_tensor_dynamic(&self, dtype: Kind, reduce_range: bool) -> Tensor {
self.f_quantize_per_tensor_dynamic(dtype, reduce_range).unwrap()
}
pub fn quantize_per_tensor_dynamic_out(
&self,
out: &Tensor,
dtype: Kind,
reduce_range: bool,
) -> Tensor {
self.f_quantize_per_tensor_dynamic_out(out, dtype, reduce_range).unwrap()
}
pub fn quantize_per_tensor_out(
&self,
out: &Tensor,
scale: f64,
zero_point: i64,
dtype: Kind,
) -> Tensor {
self.f_quantize_per_tensor_out(out, scale, zero_point, dtype).unwrap()
}
pub fn quantize_per_tensor_tensor_qparams(
&self,
scale: &Tensor,
zero_point: &Tensor,
dtype: Kind,
) -> Tensor {
self.f_quantize_per_tensor_tensor_qparams(scale, zero_point, dtype).unwrap()
}
pub fn quantize_per_tensor_tensor_qparams_out(
&self,
out: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
dtype: Kind,
) -> Tensor {
self.f_quantize_per_tensor_tensor_qparams_out(out, scale, zero_point, dtype).unwrap()
}
pub fn quantize_per_tensor_tensors<T: Borrow<Tensor>>(
tensors: &[T],
scales: &Tensor,
zero_points: &Tensor,
dtype: Kind,
) -> Vec<Tensor> {
Tensor::f_quantize_per_tensor_tensors(tensors, scales, zero_points, dtype).unwrap()
}
pub fn quantize_per_tensor_tensors_out<T: Borrow<Tensor>>(
out: &[T],
tensors: &[T],
scales: &Tensor,
zero_points: &Tensor,
dtype: Kind,
) {
Tensor::f_quantize_per_tensor_tensors_out(out, tensors, scales, zero_points, dtype).unwrap()
}
pub fn quantized_batch_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
mean: &Tensor,
var: &Tensor,
eps: f64,
output_scale: f64,
output_zero_point: i64,
) -> Tensor {
self.f_quantized_batch_norm(weight, bias, mean, var, eps, output_scale, output_zero_point)
.unwrap()
}
pub fn quantized_batch_norm_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: Option<T>,
bias: Option<T>,
mean: &Tensor,
var: &Tensor,
eps: f64,
output_scale: f64,
output_zero_point: i64,
) -> Tensor {
self.f_quantized_batch_norm_out(
out,
weight,
bias,
mean,
var,
eps,
output_scale,
output_zero_point,
)
.unwrap()
}
pub fn quantized_gru_cell<S: Into<Scalar>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: &Tensor,
b_hh: &Tensor,
packed_ih: &Tensor,
packed_hh: &Tensor,
col_offsets_ih: &Tensor,
col_offsets_hh: &Tensor,
scale_ih: S,
scale_hh: S,
zero_point_ih: S,
zero_point_hh: S,
) -> Tensor {
self.f_quantized_gru_cell(
hx,
w_ih,
w_hh,
b_ih,
b_hh,
packed_ih,
packed_hh,
col_offsets_ih,
col_offsets_hh,
scale_ih,
scale_hh,
zero_point_ih,
zero_point_hh,
)
.unwrap()
}
pub fn quantized_lstm_cell<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
hx: &[T],
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: &Tensor,
b_hh: &Tensor,
packed_ih: &Tensor,
packed_hh: &Tensor,
col_offsets_ih: &Tensor,
col_offsets_hh: &Tensor,
scale_ih: S,
scale_hh: S,
zero_point_ih: S,
zero_point_hh: S,
) -> (Tensor, Tensor) {
self.f_quantized_lstm_cell(
hx,
w_ih,
w_hh,
b_ih,
b_hh,
packed_ih,
packed_hh,
col_offsets_ih,
col_offsets_hh,
scale_ih,
scale_hh,
zero_point_ih,
zero_point_hh,
)
.unwrap()
}
pub fn quantized_max_pool1d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_quantized_max_pool1d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
}
pub fn quantized_max_pool1d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_quantized_max_pool1d_out(out, kernel_size, stride, padding, dilation, ceil_mode)
.unwrap()
}
pub fn quantized_max_pool2d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_quantized_max_pool2d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
}
pub fn quantized_max_pool2d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_quantized_max_pool2d_out(out, kernel_size, stride, padding, dilation, ceil_mode)
.unwrap()
}
pub fn quantized_max_pool3d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_quantized_max_pool3d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
}
pub fn quantized_max_pool3d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Tensor {
self.f_quantized_max_pool3d_out(out, kernel_size, stride, padding, dilation, ceil_mode)
.unwrap()
}
pub fn quantized_rnn_relu_cell<S: Into<Scalar>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: &Tensor,
b_hh: &Tensor,
packed_ih: &Tensor,
packed_hh: &Tensor,
col_offsets_ih: &Tensor,
col_offsets_hh: &Tensor,
scale_ih: S,
scale_hh: S,
zero_point_ih: S,
zero_point_hh: S,
) -> Tensor {
self.f_quantized_rnn_relu_cell(
hx,
w_ih,
w_hh,
b_ih,
b_hh,
packed_ih,
packed_hh,
col_offsets_ih,
col_offsets_hh,
scale_ih,
scale_hh,
zero_point_ih,
zero_point_hh,
)
.unwrap()
}
pub fn quantized_rnn_tanh_cell<S: Into<Scalar>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: &Tensor,
b_hh: &Tensor,
packed_ih: &Tensor,
packed_hh: &Tensor,
col_offsets_ih: &Tensor,
col_offsets_hh: &Tensor,
scale_ih: S,
scale_hh: S,
zero_point_ih: S,
zero_point_hh: S,
) -> Tensor {
self.f_quantized_rnn_tanh_cell(
hx,
w_ih,
w_hh,
b_ih,
b_hh,
packed_ih,
packed_hh,
col_offsets_ih,
col_offsets_hh,
scale_ih,
scale_hh,
zero_point_ih,
zero_point_hh,
)
.unwrap()
}
pub fn rad2deg(&self) -> Tensor {
self.f_rad2deg().unwrap()
}
pub fn rad2deg_(&mut self) -> Tensor {
self.f_rad2deg_().unwrap()
}
pub fn rad2deg_out(&self, out: &Tensor) -> Tensor {
self.f_rad2deg_out(out).unwrap()
}
pub fn rand(size: impl IntList, options: (Kind, Device)) -> Tensor {
Tensor::f_rand(size, options).unwrap()
}
pub fn rand_like(&self) -> Tensor {
self.f_rand_like().unwrap()
}
pub fn rand_like_out(&self, out: &Tensor) -> Tensor {
self.f_rand_like_out(out).unwrap()
}
pub fn rand_out(out: &Tensor, size: impl IntList) -> Tensor {
Tensor::f_rand_out(out, size).unwrap()
}
pub fn randint(high: i64, size: impl IntList, options: (Kind, Device)) -> Tensor {
Tensor::f_randint(high, size, options).unwrap()
}
pub fn randint_like(&self, high: i64) -> Tensor {
self.f_randint_like(high).unwrap()
}
pub fn randint_like_low_dtype(&self, low: i64, high: i64) -> Tensor {
self.f_randint_like_low_dtype(low, high).unwrap()
}
pub fn randint_like_low_dtype_out(&self, out: &Tensor, low: i64, high: i64) -> Tensor {
self.f_randint_like_low_dtype_out(out, low, high).unwrap()
}
pub fn randint_like_out(&self, out: &Tensor, high: i64) -> Tensor {
self.f_randint_like_out(out, high).unwrap()
}
pub fn randint_low(low: i64, high: i64, size: impl IntList, options: (Kind, Device)) -> Tensor {
Tensor::f_randint_low(low, high, size, options).unwrap()
}
pub fn randint_low_out(out: &Tensor, low: i64, high: i64, size: impl IntList) -> Tensor {
Tensor::f_randint_low_out(out, low, high, size).unwrap()
}
pub fn randint_out(out: &Tensor, high: i64, size: impl IntList) -> Tensor {
Tensor::f_randint_out(out, high, size).unwrap()
}
pub fn randn(size: impl IntList, options: (Kind, Device)) -> Tensor {
Tensor::f_randn(size, options).unwrap()
}
pub fn randn_like(&self) -> Tensor {
self.f_randn_like().unwrap()
}
pub fn randn_like_out(&self, out: &Tensor) -> Tensor {
self.f_randn_like_out(out).unwrap()
}
pub fn randn_out(out: &Tensor, size: impl IntList) -> Tensor {
Tensor::f_randn_out(out, size).unwrap()
}
pub fn random(&self) -> Tensor {
self.f_random().unwrap()
}
pub fn random_(&mut self) -> Tensor {
self.f_random_().unwrap()
}
pub fn random_from(&self, from: i64, to: impl Into<Option<i64>>) -> Tensor {
self.f_random_from(from, to).unwrap()
}
pub fn random_from_(&mut self, from: i64, to: impl Into<Option<i64>>) -> Tensor {
self.f_random_from_(from, to).unwrap()
}
pub fn random_from_out(&self, out: &Tensor, from: i64, to: impl Into<Option<i64>>) -> Tensor {
self.f_random_from_out(out, from, to).unwrap()
}
pub fn random_out(&self, out: &Tensor) -> Tensor {
self.f_random_out(out).unwrap()
}
pub fn random_to(&self, to: i64) -> Tensor {
self.f_random_to(to).unwrap()
}
pub fn random_to_(&mut self, to: i64) -> Tensor {
self.f_random_to_(to).unwrap()
}
pub fn random_to_out(&self, out: &Tensor, to: i64) -> Tensor {
self.f_random_to_out(out, to).unwrap()
}
pub fn randperm(n: i64, options: (Kind, Device)) -> Tensor {
Tensor::f_randperm(n, options).unwrap()
}
pub fn randperm_out(out: &Tensor, n: i64) -> Tensor {
Tensor::f_randperm_out(out, n).unwrap()
}
pub fn range<S: Into<Scalar>>(start: S, end: S, options: (Kind, Device)) -> Tensor {
Tensor::f_range(start, end, options).unwrap()
}
pub fn range_out<S: Into<Scalar>>(out: &Tensor, start: S, end: S) -> Tensor {
Tensor::f_range_out(out, start, end).unwrap()
}
pub fn range_out_<S: Into<Scalar>>(out: &Tensor, start: S, end: S) -> Tensor {
Tensor::f_range_out_(out, start, end).unwrap()
}
pub fn range_step<S: Into<Scalar>>(start: S, end: S, options: (Kind, Device)) -> Tensor {
Tensor::f_range_step(start, end, options).unwrap()
}
pub fn ravel(&self) -> Tensor {
self.f_ravel().unwrap()
}
pub fn real(&self) -> Tensor {
self.f_real().unwrap()
}
pub fn reciprocal(&self) -> Tensor {
self.f_reciprocal().unwrap()
}
pub fn reciprocal_(&mut self) -> Tensor {
self.f_reciprocal_().unwrap()
}
pub fn reciprocal_out(&self, out: &Tensor) -> Tensor {
self.f_reciprocal_out(out).unwrap()
}
pub fn reflection_pad1d(&self, padding: impl IntList) -> Tensor {
self.f_reflection_pad1d(padding).unwrap()
}
pub fn reflection_pad1d_backward(&self, grad_output: &Tensor, padding: impl IntList) -> Tensor {
self.f_reflection_pad1d_backward(grad_output, padding).unwrap()
}
pub fn reflection_pad1d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: impl IntList,
) -> Tensor {
self.f_reflection_pad1d_backward_grad_input(grad_input, grad_output, padding).unwrap()
}
pub fn reflection_pad1d_out(&self, out: &Tensor, padding: impl IntList) -> Tensor {
self.f_reflection_pad1d_out(out, padding).unwrap()
}
pub fn reflection_pad2d(&self, padding: impl IntList) -> Tensor {
self.f_reflection_pad2d(padding).unwrap()
}
pub fn reflection_pad2d_backward(&self, grad_output: &Tensor, padding: impl IntList) -> Tensor {
self.f_reflection_pad2d_backward(grad_output, padding).unwrap()
}
pub fn reflection_pad2d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: impl IntList,
) -> Tensor {
self.f_reflection_pad2d_backward_grad_input(grad_input, grad_output, padding).unwrap()
}
pub fn reflection_pad2d_out(&self, out: &Tensor, padding: impl IntList) -> Tensor {
self.f_reflection_pad2d_out(out, padding).unwrap()
}
pub fn reflection_pad3d(&self, padding: impl IntList) -> Tensor {
self.f_reflection_pad3d(padding).unwrap()
}
pub fn reflection_pad3d_backward(&self, grad_output: &Tensor, padding: impl IntList) -> Tensor {
self.f_reflection_pad3d_backward(grad_output, padding).unwrap()
}
pub fn reflection_pad3d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: impl IntList,
) -> Tensor {
self.f_reflection_pad3d_backward_grad_input(grad_input, grad_output, padding).unwrap()
}
pub fn reflection_pad3d_out(&self, out: &Tensor, padding: impl IntList) -> Tensor {
self.f_reflection_pad3d_out(out, padding).unwrap()
}
pub fn relu(&self) -> Tensor {
self.f_relu().unwrap()
}
pub fn relu6(&self) -> Tensor {
self.f_relu6().unwrap()
}
pub fn relu6_(&mut self) -> Tensor {
self.f_relu6_().unwrap()
}
pub fn relu_(&mut self) -> Tensor {
self.f_relu_().unwrap()
}
pub fn relu_out(&self, out: &Tensor) -> Tensor {
self.f_relu_out(out).unwrap()
}
pub fn remainder<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_remainder(other).unwrap()
}
pub fn remainder_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_remainder_(other).unwrap()
}
pub fn remainder_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_remainder_scalar_out(out, other).unwrap()
}
pub fn remainder_scalar_tensor<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
Tensor::f_remainder_scalar_tensor(self_scalar, other).unwrap()
}
pub fn remainder_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Tensor {
Tensor::f_remainder_scalar_tensor_out(out, self_scalar, other).unwrap()
}
pub fn remainder_tensor(&self, other: &Tensor) -> Tensor {
self.f_remainder_tensor(other).unwrap()
}
pub fn remainder_tensor_(&mut self, other: &Tensor) -> Tensor {
self.f_remainder_tensor_(other).unwrap()
}
pub fn remainder_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_remainder_tensor_out(out, other).unwrap()
}
pub fn renorm<S: Into<Scalar>>(&self, p: S, dim: i64, maxnorm: S) -> Tensor {
self.f_renorm(p, dim, maxnorm).unwrap()
}
pub fn renorm_<S: Into<Scalar>>(&mut self, p: S, dim: i64, maxnorm: S) -> Tensor {
self.f_renorm_(p, dim, maxnorm).unwrap()
}
pub fn renorm_out<S: Into<Scalar>>(&self, out: &Tensor, p: S, dim: i64, maxnorm: S) -> Tensor {
self.f_renorm_out(out, p, dim, maxnorm).unwrap()
}
pub fn repeat(&self, repeats: impl IntList) -> Tensor {
self.f_repeat(repeats).unwrap()
}
pub fn repeat_interleave(repeats: &Tensor, output_size: impl Into<Option<i64>>) -> Tensor {
Tensor::f_repeat_interleave(repeats, output_size).unwrap()
}
pub fn repeat_interleave_self_int(
&self,
repeats: i64,
dim: impl Into<Option<i64>>,
output_size: impl Into<Option<i64>>,
) -> Tensor {
self.f_repeat_interleave_self_int(repeats, dim, output_size).unwrap()
}
pub fn repeat_interleave_self_tensor(
&self,
repeats: &Tensor,
dim: impl Into<Option<i64>>,
output_size: impl Into<Option<i64>>,
) -> Tensor {
self.f_repeat_interleave_self_tensor(repeats, dim, output_size).unwrap()
}
pub fn repeat_interleave_tensor_out(
out: &Tensor,
repeats: &Tensor,
output_size: impl Into<Option<i64>>,
) -> Tensor {
Tensor::f_repeat_interleave_tensor_out(out, repeats, output_size).unwrap()
}
pub fn repeat_out(&self, out: &Tensor, repeats: impl IntList) -> Tensor {
self.f_repeat_out(out, repeats).unwrap()
}
pub fn replication_pad1d(&self, padding: impl IntList) -> Tensor {
self.f_replication_pad1d(padding).unwrap()
}
pub fn replication_pad1d_backward(
&self,
grad_output: &Tensor,
padding: impl IntList,
) -> Tensor {
self.f_replication_pad1d_backward(grad_output, padding).unwrap()
}
pub fn replication_pad1d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: impl IntList,
) -> Tensor {
self.f_replication_pad1d_backward_grad_input(grad_input, grad_output, padding).unwrap()
}
pub fn replication_pad1d_out(&self, out: &Tensor, padding: impl IntList) -> Tensor {
self.f_replication_pad1d_out(out, padding).unwrap()
}
pub fn replication_pad2d(&self, padding: impl IntList) -> Tensor {
self.f_replication_pad2d(padding).unwrap()
}
pub fn replication_pad2d_backward(
&self,
grad_output: &Tensor,
padding: impl IntList,
) -> Tensor {
self.f_replication_pad2d_backward(grad_output, padding).unwrap()
}
pub fn replication_pad2d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: impl IntList,
) -> Tensor {
self.f_replication_pad2d_backward_grad_input(grad_input, grad_output, padding).unwrap()
}
pub fn replication_pad2d_out(&self, out: &Tensor, padding: impl IntList) -> Tensor {
self.f_replication_pad2d_out(out, padding).unwrap()
}
pub fn replication_pad3d(&self, padding: impl IntList) -> Tensor {
self.f_replication_pad3d(padding).unwrap()
}
pub fn replication_pad3d_backward(
&self,
grad_output: &Tensor,
padding: impl IntList,
) -> Tensor {
self.f_replication_pad3d_backward(grad_output, padding).unwrap()
}
pub fn replication_pad3d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: impl IntList,
) -> Tensor {
self.f_replication_pad3d_backward_grad_input(grad_input, grad_output, padding).unwrap()
}
pub fn replication_pad3d_out(&self, out: &Tensor, padding: impl IntList) -> Tensor {
self.f_replication_pad3d_out(out, padding).unwrap()
}
pub fn requires_grad_(&mut self, requires_grad: bool) -> Tensor {
self.f_requires_grad_(requires_grad).unwrap()
}
pub fn reshape(&self, shape: impl IntList) -> Tensor {
self.f_reshape(shape).unwrap()
}
pub fn reshape_as(&self, other: &Tensor) -> Tensor {
self.f_reshape_as(other).unwrap()
}
pub fn resize(&self, size: impl IntList) -> Tensor {
self.f_resize(size).unwrap()
}
pub fn resize_(&mut self, size: impl IntList) -> Tensor {
self.f_resize_(size).unwrap()
}
pub fn resize_as(&self, the_template: &Tensor) -> Tensor {
self.f_resize_as(the_template).unwrap()
}
pub fn resize_as_(&mut self, the_template: &Tensor) -> Tensor {
self.f_resize_as_(the_template).unwrap()
}
pub fn resize_as_out(&self, out: &Tensor, the_template: &Tensor) -> Tensor {
self.f_resize_as_out(out, the_template).unwrap()
}
pub fn resize_as_sparse(&self, the_template: &Tensor) -> Tensor {
self.f_resize_as_sparse(the_template).unwrap()
}
pub fn resize_as_sparse_(&mut self, the_template: &Tensor) -> Tensor {
self.f_resize_as_sparse_(the_template).unwrap()
}
pub fn resize_as_sparse_out(&self, out: &Tensor, the_template: &Tensor) -> Tensor {
self.f_resize_as_sparse_out(out, the_template).unwrap()
}
pub fn resize_out(&self, out: &Tensor, size: impl IntList) -> Tensor {
self.f_resize_out(out, size).unwrap()
}
pub fn resolve_conj(&self) -> Tensor {
self.f_resolve_conj().unwrap()
}
pub fn resolve_neg(&self) -> Tensor {
self.f_resolve_neg().unwrap()
}
pub fn retains_grad(&self) -> bool {
self.f_retains_grad().unwrap()
}
pub fn rnn_relu<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> (Tensor, Tensor) {
self.f_rnn_relu(
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
)
.unwrap()
}
pub fn rnn_relu_cell<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: Option<T>,
b_hh: Option<T>,
) -> Tensor {
self.f_rnn_relu_cell(hx, w_ih, w_hh, b_ih, b_hh).unwrap()
}
pub fn rnn_relu_data<T: Borrow<Tensor>>(
data: &Tensor,
batch_sizes: &Tensor,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
) -> (Tensor, Tensor) {
Tensor::f_rnn_relu_data(
data,
batch_sizes,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
)
.unwrap()
}
pub fn rnn_tanh<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> (Tensor, Tensor) {
self.f_rnn_tanh(
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
)
.unwrap()
}
pub fn rnn_tanh_cell<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: Option<T>,
b_hh: Option<T>,
) -> Tensor {
self.f_rnn_tanh_cell(hx, w_ih, w_hh, b_ih, b_hh).unwrap()
}
pub fn rnn_tanh_data<T: Borrow<Tensor>>(
data: &Tensor,
batch_sizes: &Tensor,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
) -> (Tensor, Tensor) {
Tensor::f_rnn_tanh_data(
data,
batch_sizes,
hx,
params,
has_biases,
num_layers,
dropout,
train,
bidirectional,
)
.unwrap()
}
pub fn roll(&self, shifts: impl IntList, dims: impl IntList) -> Tensor {
self.f_roll(shifts, dims).unwrap()
}
pub fn roll_out(&self, out: &Tensor, shifts: impl IntList, dims: impl IntList) -> Tensor {
self.f_roll_out(out, shifts, dims).unwrap()
}
pub fn rot90(&self, k: i64, dims: impl IntList) -> Tensor {
self.f_rot90(k, dims).unwrap()
}
pub fn rot90_out(&self, out: &Tensor, k: i64, dims: impl IntList) -> Tensor {
self.f_rot90_out(out, k, dims).unwrap()
}
pub fn round(&self) -> Tensor {
self.f_round().unwrap()
}
pub fn round_(&mut self) -> Tensor {
self.f_round_().unwrap()
}
pub fn round_decimals(&self, decimals: i64) -> Tensor {
self.f_round_decimals(decimals).unwrap()
}
pub fn round_decimals_(&mut self, decimals: i64) -> Tensor {
self.f_round_decimals_(decimals).unwrap()
}
pub fn round_decimals_out(&self, out: &Tensor, decimals: i64) -> Tensor {
self.f_round_decimals_out(out, decimals).unwrap()
}
pub fn round_out(&self, out: &Tensor) -> Tensor {
self.f_round_out(out).unwrap()
}
pub fn row_indices(&self) -> Tensor {
self.f_row_indices().unwrap()
}
pub fn row_indices_copy(&self) -> Tensor {
self.f_row_indices_copy().unwrap()
}
pub fn row_indices_copy_out(&self, out: &Tensor) -> Tensor {
self.f_row_indices_copy_out(out).unwrap()
}
pub fn row_stack<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
Tensor::f_row_stack(tensors).unwrap()
}
pub fn row_stack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
Tensor::f_row_stack_out(out, tensors).unwrap()
}
pub fn rrelu(&self, training: bool) -> Tensor {
self.f_rrelu(training).unwrap()
}
pub fn rrelu_(&mut self, training: bool) -> Tensor {
self.f_rrelu_(training).unwrap()
}
pub fn rrelu_with_noise(&self, noise: &Tensor, training: bool) -> Tensor {
self.f_rrelu_with_noise(noise, training).unwrap()
}
pub fn rrelu_with_noise_(&mut self, noise: &Tensor, training: bool) -> Tensor {
self.f_rrelu_with_noise_(noise, training).unwrap()
}
pub fn rrelu_with_noise_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
noise: &Tensor,
lower: S,
upper: S,
training: bool,
self_is_result: bool,
) -> Tensor {
self.f_rrelu_with_noise_backward(grad_output, noise, lower, upper, training, self_is_result)
.unwrap()
}
pub fn rrelu_with_noise_backward_out<S: Into<Scalar>>(
&self,
out: &Tensor,
grad_output: &Tensor,
noise: &Tensor,
lower: S,
upper: S,
training: bool,
self_is_result: bool,
) -> Tensor {
self.f_rrelu_with_noise_backward_out(
out,
grad_output,
noise,
lower,
upper,
training,
self_is_result,
)
.unwrap()
}
pub fn rrelu_with_noise_out(&self, out: &Tensor, noise: &Tensor, training: bool) -> Tensor {
self.f_rrelu_with_noise_out(out, noise, training).unwrap()
}
pub fn rsqrt(&self) -> Tensor {
self.f_rsqrt().unwrap()
}
pub fn rsqrt_(&mut self) -> Tensor {
self.f_rsqrt_().unwrap()
}
pub fn rsqrt_out(&self, out: &Tensor) -> Tensor {
self.f_rsqrt_out(out).unwrap()
}
pub fn rsub(&self, other: &Tensor) -> Tensor {
self.f_rsub(other).unwrap()
}
pub fn rsub_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_rsub_scalar(other).unwrap()
}
pub fn rsub_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_rsub_scalar_out(out, other).unwrap()
}
pub fn rsub_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_rsub_tensor_out(out, other).unwrap()
}
pub fn scalar_tensor<S: Into<Scalar>>(s: S, options: (Kind, Device)) -> Tensor {
Tensor::f_scalar_tensor(s, options).unwrap()
}
pub fn scalar_tensor_out<S: Into<Scalar>>(out: &Tensor, s: S) -> Tensor {
Tensor::f_scalar_tensor_out(out, s).unwrap()
}
pub fn scaled_dot_product_attention<T: Borrow<Tensor>>(
query: &Tensor,
key: &Tensor,
value: &Tensor,
attn_mask: Option<T>,
dropout_p: f64,
is_causal: bool,
scale: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, is_causal, scale,
)
.unwrap()
}
pub fn scatter(&self, dim: i64, index: &Tensor, src: &Tensor) -> Tensor {
self.f_scatter(dim, index, src).unwrap()
}
pub fn scatter_(&mut self, dim: i64, index: &Tensor, src: &Tensor) -> Tensor {
self.f_scatter_(dim, index, src).unwrap()
}
pub fn scatter_add(&self, dim: i64, index: &Tensor, src: &Tensor) -> Tensor {
self.f_scatter_add(dim, index, src).unwrap()
}
pub fn scatter_add_(&mut self, dim: i64, index: &Tensor, src: &Tensor) -> Tensor {
self.f_scatter_add_(dim, index, src).unwrap()
}
pub fn scatter_add_out(&self, out: &Tensor, dim: i64, index: &Tensor, src: &Tensor) -> Tensor {
self.f_scatter_add_out(out, dim, index, src).unwrap()
}
pub fn scatter_reduce(&self, dim: i64, index: &Tensor, src: &Tensor, reduce: &str) -> Tensor {
self.f_scatter_reduce(dim, index, src, reduce).unwrap()
}
pub fn scatter_reduce_(
&mut self,
dim: i64,
index: &Tensor,
src: &Tensor,
reduce: &str,
) -> Tensor {
self.f_scatter_reduce_(dim, index, src, reduce).unwrap()
}
pub fn scatter_reduce_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
src: &Tensor,
reduce: &str,
) -> Tensor {
self.f_scatter_reduce_out(out, dim, index, src, reduce).unwrap()
}
pub fn scatter_src_out(&self, out: &Tensor, dim: i64, index: &Tensor, src: &Tensor) -> Tensor {
self.f_scatter_src_out(out, dim, index, src).unwrap()
}
pub fn scatter_value<S: Into<Scalar>>(&self, dim: i64, index: &Tensor, value: S) -> Tensor {
self.f_scatter_value(dim, index, value).unwrap()
}
pub fn scatter_value_<S: Into<Scalar>>(
&mut self,
dim: i64,
index: &Tensor,
value: S,
) -> Tensor {
self.f_scatter_value_(dim, index, value).unwrap()
}
pub fn scatter_value_out<S: Into<Scalar>>(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
value: S,
) -> Tensor {
self.f_scatter_value_out(out, dim, index, value).unwrap()
}
pub fn scatter_value_reduce<S: Into<Scalar>>(
&self,
dim: i64,
index: &Tensor,
value: S,
reduce: &str,
) -> Tensor {
self.f_scatter_value_reduce(dim, index, value, reduce).unwrap()
}
pub fn scatter_value_reduce_<S: Into<Scalar>>(
&mut self,
dim: i64,
index: &Tensor,
value: S,
reduce: &str,
) -> Tensor {
self.f_scatter_value_reduce_(dim, index, value, reduce).unwrap()
}
pub fn scatter_value_reduce_out<S: Into<Scalar>>(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
value: S,
reduce: &str,
) -> Tensor {
self.f_scatter_value_reduce_out(out, dim, index, value, reduce).unwrap()
}
pub fn searchsorted<T: Borrow<Tensor>>(
&self,
sorted_sequence: &Tensor,
out_int32: bool,
right: bool,
side: &str,
sorter: Option<T>,
) -> Tensor {
self.f_searchsorted(sorted_sequence, out_int32, right, side, sorter).unwrap()
}
pub fn searchsorted_scalar<T: Borrow<Tensor>, S: Into<Scalar>>(
sorted_sequence: &Tensor,
self_scalar: S,
out_int32: bool,
right: bool,
side: &str,
sorter: Option<T>,
) -> Tensor {
Tensor::f_searchsorted_scalar(sorted_sequence, self_scalar, out_int32, right, side, sorter)
.unwrap()
}
pub fn searchsorted_scalar_out<T: Borrow<Tensor>, S: Into<Scalar>>(
out: &Tensor,
sorted_sequence: &Tensor,
self_scalar: S,
out_int32: bool,
right: bool,
side: &str,
sorter: Option<T>,
) -> Tensor {
Tensor::f_searchsorted_scalar_out(
out,
sorted_sequence,
self_scalar,
out_int32,
right,
side,
sorter,
)
.unwrap()
}
pub fn searchsorted_tensor_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
sorted_sequence: &Tensor,
out_int32: bool,
right: bool,
side: &str,
sorter: Option<T>,
) -> Tensor {
self.f_searchsorted_tensor_out(out, sorted_sequence, out_int32, right, side, sorter)
.unwrap()
}
pub fn segment_reduce<T: Borrow<Tensor>, S: Into<Scalar>>(
data: &Tensor,
reduce: &str,
lengths: Option<T>,
indices: Option<T>,
offsets: Option<T>,
axis: i64,
unsafe_: bool,
initial: S,
) -> Tensor {
Tensor::f_segment_reduce(data, reduce, lengths, indices, offsets, axis, unsafe_, initial)
.unwrap()
}
pub fn segment_reduce_out<T: Borrow<Tensor>, S: Into<Scalar>>(
out: &Tensor,
data: &Tensor,
reduce: &str,
lengths: Option<T>,
indices: Option<T>,
offsets: Option<T>,
axis: i64,
unsafe_: bool,
initial: S,
) -> Tensor {
Tensor::f_segment_reduce_out(
out, data, reduce, lengths, indices, offsets, axis, unsafe_, initial,
)
.unwrap()
}
pub fn select(&self, dim: i64, index: i64) -> Tensor {
self.f_select(dim, index).unwrap()
}
pub fn select_backward(
grad_output: &Tensor,
input_sizes: impl IntList,
dim: i64,
index: i64,
) -> Tensor {
Tensor::f_select_backward(grad_output, input_sizes, dim, index).unwrap()
}
pub fn select_backward_out(
out: &Tensor,
grad_output: &Tensor,
input_sizes: impl IntList,
dim: i64,
index: i64,
) -> Tensor {
Tensor::f_select_backward_out(out, grad_output, input_sizes, dim, index).unwrap()
}
pub fn select_copy(&self, dim: i64, index: i64) -> Tensor {
self.f_select_copy(dim, index).unwrap()
}
pub fn select_copy_int_out(&self, out: &Tensor, dim: i64, index: i64) -> Tensor {
self.f_select_copy_int_out(out, dim, index).unwrap()
}
pub fn select_scatter(&self, src: &Tensor, dim: i64, index: i64) -> Tensor {
self.f_select_scatter(src, dim, index).unwrap()
}
pub fn select_scatter_out(&self, out: &Tensor, src: &Tensor, dim: i64, index: i64) -> Tensor {
self.f_select_scatter_out(out, src, dim, index).unwrap()
}
pub fn selu(&self) -> Tensor {
self.f_selu().unwrap()
}
pub fn selu_(&mut self) -> Tensor {
self.f_selu_().unwrap()
}
pub fn set(&self) -> Tensor {
self.f_set().unwrap()
}
pub fn set_(&mut self) -> Tensor {
self.f_set_().unwrap()
}
pub fn set_data(&mut self, new_data: &Tensor) {
self.f_set_data(new_data).unwrap()
}
pub fn set_out(&self, out: &Tensor) -> Tensor {
self.f_set_out(out).unwrap()
}
pub fn set_requires_grad(&self, r: bool) -> Tensor {
self.f_set_requires_grad(r).unwrap()
}
pub fn set_source_tensor(&self, source: &Tensor) -> Tensor {
self.f_set_source_tensor(source).unwrap()
}
pub fn set_source_tensor_(&mut self, source: &Tensor) -> Tensor {
self.f_set_source_tensor_(source).unwrap()
}
pub fn set_source_tensor_out(&self, out: &Tensor, source: &Tensor) -> Tensor {
self.f_set_source_tensor_out(out, source).unwrap()
}
pub fn set_source_tensor_storage_offset_(
&mut self,
source: &Tensor,
storage_offset: i64,
size: impl IntList,
stride: impl IntList,
) -> Tensor {
self.f_set_source_tensor_storage_offset_(source, storage_offset, size, stride).unwrap()
}
pub fn sgn(&self) -> Tensor {
self.f_sgn().unwrap()
}
pub fn sgn_(&mut self) -> Tensor {
self.f_sgn_().unwrap()
}
pub fn sgn_out(&self, out: &Tensor) -> Tensor {
self.f_sgn_out(out).unwrap()
}
pub fn sigmoid(&self) -> Tensor {
self.f_sigmoid().unwrap()
}
pub fn sigmoid_(&mut self) -> Tensor {
self.f_sigmoid_().unwrap()
}
pub fn sigmoid_backward(grad_output: &Tensor, output: &Tensor) -> Tensor {
Tensor::f_sigmoid_backward(grad_output, output).unwrap()
}
pub fn sigmoid_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output: &Tensor,
) -> Tensor {
Tensor::f_sigmoid_backward_grad_input(grad_input, grad_output, output).unwrap()
}
pub fn sigmoid_out(&self, out: &Tensor) -> Tensor {
self.f_sigmoid_out(out).unwrap()
}
pub fn sign(&self) -> Tensor {
self.f_sign().unwrap()
}
pub fn sign_(&mut self) -> Tensor {
self.f_sign_().unwrap()
}
pub fn sign_out(&self, out: &Tensor) -> Tensor {
self.f_sign_out(out).unwrap()
}
pub fn signbit(&self) -> Tensor {
self.f_signbit().unwrap()
}
pub fn signbit_out(&self, out: &Tensor) -> Tensor {
self.f_signbit_out(out).unwrap()
}
pub fn silu(&self) -> Tensor {
self.f_silu().unwrap()
}
pub fn silu_(&mut self) -> Tensor {
self.f_silu_().unwrap()
}
pub fn silu_backward(&self, grad_output: &Tensor) -> Tensor {
self.f_silu_backward(grad_output).unwrap()
}
pub fn silu_backward_grad_input(&self, grad_input: &Tensor, grad_output: &Tensor) -> Tensor {
self.f_silu_backward_grad_input(grad_input, grad_output).unwrap()
}
pub fn silu_out(&self, out: &Tensor) -> Tensor {
self.f_silu_out(out).unwrap()
}
pub fn sin(&self) -> Tensor {
self.f_sin().unwrap()
}
pub fn sin_(&mut self) -> Tensor {
self.f_sin_().unwrap()
}
pub fn sin_out(&self, out: &Tensor) -> Tensor {
self.f_sin_out(out).unwrap()
}
pub fn sinc(&self) -> Tensor {
self.f_sinc().unwrap()
}
pub fn sinc_(&mut self) -> Tensor {
self.f_sinc_().unwrap()
}
pub fn sinc_out(&self, out: &Tensor) -> Tensor {
self.f_sinc_out(out).unwrap()
}
pub fn sinh(&self) -> Tensor {
self.f_sinh().unwrap()
}
pub fn sinh_(&mut self) -> Tensor {
self.f_sinh_().unwrap()
}
pub fn sinh_out(&self, out: &Tensor) -> Tensor {
self.f_sinh_out(out).unwrap()
}
pub fn slice(
&self,
dim: i64,
start: impl Into<Option<i64>>,
end: impl Into<Option<i64>>,
step: i64,
) -> Tensor {
self.f_slice(dim, start, end, step).unwrap()
}
pub fn slice_backward(
grad_output: &Tensor,
input_sizes: impl IntList,
dim: i64,
start: i64,
end: i64,
step: i64,
) -> Tensor {
Tensor::f_slice_backward(grad_output, input_sizes, dim, start, end, step).unwrap()
}
pub fn slice_backward_out(
out: &Tensor,
grad_output: &Tensor,
input_sizes: impl IntList,
dim: i64,
start: i64,
end: i64,
step: i64,
) -> Tensor {
Tensor::f_slice_backward_out(out, grad_output, input_sizes, dim, start, end, step).unwrap()
}
pub fn slice_copy(
&self,
dim: i64,
start: impl Into<Option<i64>>,
end: impl Into<Option<i64>>,
step: i64,
) -> Tensor {
self.f_slice_copy(dim, start, end, step).unwrap()
}
pub fn slice_copy_tensor_out(
&self,
out: &Tensor,
dim: i64,
start: impl Into<Option<i64>>,
end: impl Into<Option<i64>>,
step: i64,
) -> Tensor {
self.f_slice_copy_tensor_out(out, dim, start, end, step).unwrap()
}
pub fn slice_scatter(
&self,
src: &Tensor,
dim: i64,
start: impl Into<Option<i64>>,
end: impl Into<Option<i64>>,
step: i64,
) -> Tensor {
self.f_slice_scatter(src, dim, start, end, step).unwrap()
}
pub fn slice_scatter_out(
&self,
out: &Tensor,
src: &Tensor,
dim: i64,
start: impl Into<Option<i64>>,
end: impl Into<Option<i64>>,
step: i64,
) -> Tensor {
self.f_slice_scatter_out(out, src, dim, start, end, step).unwrap()
}
pub fn slogdet(&self) -> (Tensor, Tensor) {
self.f_slogdet().unwrap()
}
pub fn slogdet_out(&self, sign: &Tensor, logabsdet: &Tensor) -> (Tensor, Tensor) {
self.f_slogdet_out(sign, logabsdet).unwrap()
}
pub fn slow_conv3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
) -> Tensor {
self.f_slow_conv3d(weight, kernel_size, bias, stride, padding).unwrap()
}
pub fn slow_conv3d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
) -> Tensor {
self.f_slow_conv3d_out(out, weight, kernel_size, bias, stride, padding).unwrap()
}
pub fn slow_conv_dilated2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Tensor {
self.f_slow_conv_dilated2d(weight, kernel_size, bias, stride, padding, dilation).unwrap()
}
pub fn slow_conv_dilated2d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Tensor {
self.f_slow_conv_dilated2d_out(out, weight, kernel_size, bias, stride, padding, dilation)
.unwrap()
}
pub fn slow_conv_dilated3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Tensor {
self.f_slow_conv_dilated3d(weight, kernel_size, bias, stride, padding, dilation).unwrap()
}
pub fn slow_conv_dilated3d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Tensor {
self.f_slow_conv_dilated3d_out(out, weight, kernel_size, bias, stride, padding, dilation)
.unwrap()
}
pub fn slow_conv_transpose2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
dilation: impl IntList,
) -> Tensor {
self.f_slow_conv_transpose2d(
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
)
.unwrap()
}
pub fn slow_conv_transpose2d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
dilation: impl IntList,
) -> Tensor {
self.f_slow_conv_transpose2d_out(
out,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
)
.unwrap()
}
pub fn slow_conv_transpose3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
dilation: impl IntList,
) -> Tensor {
self.f_slow_conv_transpose3d(
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
)
.unwrap()
}
pub fn slow_conv_transpose3d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
dilation: impl IntList,
) -> Tensor {
self.f_slow_conv_transpose3d_out(
out,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
)
.unwrap()
}
pub fn smm(&self, mat2: &Tensor) -> Tensor {
self.f_smm(mat2).unwrap()
}
pub fn smooth_l1_loss(
&self,
target: &Tensor,
reduction: crate::Reduction,
beta: f64,
) -> Tensor {
self.f_smooth_l1_loss(target, reduction, beta).unwrap()
}
pub fn smooth_l1_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
beta: f64,
) -> Tensor {
self.f_smooth_l1_loss_backward(grad_output, target, reduction, beta).unwrap()
}
pub fn smooth_l1_loss_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
beta: f64,
) -> Tensor {
self.f_smooth_l1_loss_backward_grad_input(grad_input, grad_output, target, reduction, beta)
.unwrap()
}
pub fn smooth_l1_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
beta: f64,
) -> Tensor {
self.f_smooth_l1_loss_out(out, target, reduction, beta).unwrap()
}
pub fn soft_margin_loss(&self, target: &Tensor, reduction: crate::Reduction) -> Tensor {
self.f_soft_margin_loss(target, reduction).unwrap()
}
pub fn soft_margin_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Tensor {
self.f_soft_margin_loss_backward(grad_output, target, reduction).unwrap()
}
pub fn soft_margin_loss_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Tensor {
self.f_soft_margin_loss_backward_grad_input(grad_input, grad_output, target, reduction)
.unwrap()
}
pub fn soft_margin_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Tensor {
self.f_soft_margin_loss_out(out, target, reduction).unwrap()
}
pub fn softmax(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_softmax(dim, dtype).unwrap()
}
pub fn softmax_int_out(
&self,
out: &Tensor,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_softmax_int_out(out, dim, dtype).unwrap()
}
pub fn softplus(&self) -> Tensor {
self.f_softplus().unwrap()
}
pub fn softplus_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
beta: S,
threshold: S,
) -> Tensor {
self.f_softplus_backward(grad_output, beta, threshold).unwrap()
}
pub fn softplus_backward_grad_input<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
beta: S,
threshold: S,
) -> Tensor {
self.f_softplus_backward_grad_input(grad_input, grad_output, beta, threshold).unwrap()
}
pub fn softplus_out(&self, out: &Tensor) -> Tensor {
self.f_softplus_out(out).unwrap()
}
pub fn softshrink(&self) -> Tensor {
self.f_softshrink().unwrap()
}
pub fn softshrink_backward<S: Into<Scalar>>(&self, grad_output: &Tensor, lambd: S) -> Tensor {
self.f_softshrink_backward(grad_output, lambd).unwrap()
}
pub fn softshrink_backward_grad_input<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
lambd: S,
) -> Tensor {
self.f_softshrink_backward_grad_input(grad_input, grad_output, lambd).unwrap()
}
pub fn softshrink_out(&self, out: &Tensor) -> Tensor {
self.f_softshrink_out(out).unwrap()
}
pub fn sort(&self, dim: i64, descending: bool) -> (Tensor, Tensor) {
self.f_sort(dim, descending).unwrap()
}
pub fn sort_stable(&self, stable: bool, dim: i64, descending: bool) -> (Tensor, Tensor) {
self.f_sort_stable(stable, dim, descending).unwrap()
}
pub fn sort_values(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
descending: bool,
) -> (Tensor, Tensor) {
self.f_sort_values(values, indices, dim, descending).unwrap()
}
pub fn sort_values_stable(
&self,
values: &Tensor,
indices: &Tensor,
stable: bool,
dim: i64,
descending: bool,
) -> (Tensor, Tensor) {
self.f_sort_values_stable(values, indices, stable, dim, descending).unwrap()
}
pub fn sparse_bsc_tensor(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
) -> Tensor {
Tensor::f_sparse_bsc_tensor(ccol_indices, row_indices, values, options).unwrap()
}
pub fn sparse_bsc_tensor_ccol_row_value_size(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Tensor {
Tensor::f_sparse_bsc_tensor_ccol_row_value_size(
ccol_indices,
row_indices,
values,
size,
options,
)
.unwrap()
}
pub fn sparse_bsr_tensor(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
) -> Tensor {
Tensor::f_sparse_bsr_tensor(crow_indices, col_indices, values, options).unwrap()
}
pub fn sparse_bsr_tensor_crow_col_value_size(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Tensor {
Tensor::f_sparse_bsr_tensor_crow_col_value_size(
crow_indices,
col_indices,
values,
size,
options,
)
.unwrap()
}
pub fn sparse_compressed_tensor(
compressed_indices: &Tensor,
plain_indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
) -> Tensor {
Tensor::f_sparse_compressed_tensor(compressed_indices, plain_indices, values, options)
.unwrap()
}
pub fn sparse_compressed_tensor_comp_plain_value_size(
compressed_indices: &Tensor,
plain_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Tensor {
Tensor::f_sparse_compressed_tensor_comp_plain_value_size(
compressed_indices,
plain_indices,
values,
size,
options,
)
.unwrap()
}
pub fn sparse_coo_tensor(size: impl IntList, options: (Kind, Device)) -> Tensor {
Tensor::f_sparse_coo_tensor(size, options).unwrap()
}
pub fn sparse_coo_tensor_indices(
indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
is_coalesced: bool,
) -> Tensor {
Tensor::f_sparse_coo_tensor_indices(indices, values, options, is_coalesced).unwrap()
}
pub fn sparse_coo_tensor_indices_size(
indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
is_coalesced: bool,
) -> Tensor {
Tensor::f_sparse_coo_tensor_indices_size(indices, values, size, options, is_coalesced)
.unwrap()
}
pub fn sparse_coo_tensor_size_out(out: &Tensor, size: impl IntList) -> Tensor {
Tensor::f_sparse_coo_tensor_size_out(out, size).unwrap()
}
pub fn sparse_csc_tensor(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
) -> Tensor {
Tensor::f_sparse_csc_tensor(ccol_indices, row_indices, values, options).unwrap()
}
pub fn sparse_csc_tensor_ccol_row_value_size(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Tensor {
Tensor::f_sparse_csc_tensor_ccol_row_value_size(
ccol_indices,
row_indices,
values,
size,
options,
)
.unwrap()
}
pub fn sparse_csr_tensor(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
) -> Tensor {
Tensor::f_sparse_csr_tensor(crow_indices, col_indices, values, options).unwrap()
}
pub fn sparse_csr_tensor_crow_col_value_size(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Tensor {
Tensor::f_sparse_csr_tensor_crow_col_value_size(
crow_indices,
col_indices,
values,
size,
options,
)
.unwrap()
}
pub fn sparse_dim(&self) -> i64 {
self.f_sparse_dim().unwrap()
}
pub fn sparse_mask(&self, mask: &Tensor) -> Tensor {
self.f_sparse_mask(mask).unwrap()
}
pub fn sparse_mask_out(&self, out: &Tensor, mask: &Tensor) -> Tensor {
self.f_sparse_mask_out(out, mask).unwrap()
}
pub fn sparse_resize(&self, size: impl IntList, sparse_dim: i64, dense_dim: i64) -> Tensor {
self.f_sparse_resize(size, sparse_dim, dense_dim).unwrap()
}
pub fn sparse_resize_(
&mut self,
size: impl IntList,
sparse_dim: i64,
dense_dim: i64,
) -> Tensor {
self.f_sparse_resize_(size, sparse_dim, dense_dim).unwrap()
}
pub fn sparse_resize_and_clear(
&self,
size: impl IntList,
sparse_dim: i64,
dense_dim: i64,
) -> Tensor {
self.f_sparse_resize_and_clear(size, sparse_dim, dense_dim).unwrap()
}
pub fn sparse_resize_and_clear_(
&mut self,
size: impl IntList,
sparse_dim: i64,
dense_dim: i64,
) -> Tensor {
self.f_sparse_resize_and_clear_(size, sparse_dim, dense_dim).unwrap()
}
pub fn sparse_resize_and_clear_out(
&self,
out: &Tensor,
size: impl IntList,
sparse_dim: i64,
dense_dim: i64,
) -> Tensor {
self.f_sparse_resize_and_clear_out(out, size, sparse_dim, dense_dim).unwrap()
}
pub fn sparse_resize_out(
&self,
out: &Tensor,
size: impl IntList,
sparse_dim: i64,
dense_dim: i64,
) -> Tensor {
self.f_sparse_resize_out(out, size, sparse_dim, dense_dim).unwrap()
}
pub fn sparse_sampled_addmm(&self, mat1: &Tensor, mat2: &Tensor) -> Tensor {
self.f_sparse_sampled_addmm(mat1, mat2).unwrap()
}
pub fn sparse_sampled_addmm_out(&self, out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Tensor {
self.f_sparse_sampled_addmm_out(out, mat1, mat2).unwrap()
}
pub fn special_airy_ai(x: &Tensor) -> Tensor {
Tensor::f_special_airy_ai(x).unwrap()
}
pub fn special_airy_ai_out(out: &Tensor, x: &Tensor) -> Tensor {
Tensor::f_special_airy_ai_out(out, x).unwrap()
}
pub fn special_bessel_j0(&self) -> Tensor {
self.f_special_bessel_j0().unwrap()
}
pub fn special_bessel_j0_out(&self, out: &Tensor) -> Tensor {
self.f_special_bessel_j0_out(out).unwrap()
}
pub fn special_bessel_j1(&self) -> Tensor {
self.f_special_bessel_j1().unwrap()
}
pub fn special_bessel_j1_out(&self, out: &Tensor) -> Tensor {
self.f_special_bessel_j1_out(out).unwrap()
}
pub fn special_bessel_y0(&self) -> Tensor {
self.f_special_bessel_y0().unwrap()
}
pub fn special_bessel_y0_out(&self, out: &Tensor) -> Tensor {
self.f_special_bessel_y0_out(out).unwrap()
}
pub fn special_bessel_y1(&self) -> Tensor {
self.f_special_bessel_y1().unwrap()
}
pub fn special_bessel_y1_out(&self, out: &Tensor) -> Tensor {
self.f_special_bessel_y1_out(out).unwrap()
}
pub fn special_chebyshev_polynomial_t(x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_chebyshev_polynomial_t(x, n).unwrap()
}
pub fn special_chebyshev_polynomial_t_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
Tensor::f_special_chebyshev_polynomial_t_n_scalar(x, n).unwrap()
}
pub fn special_chebyshev_polynomial_t_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_chebyshev_polynomial_t_n_scalar_out(out, x, n).unwrap()
}
pub fn special_chebyshev_polynomial_t_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_chebyshev_polynomial_t_out(out, x, n).unwrap()
}
pub fn special_chebyshev_polynomial_t_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
Tensor::f_special_chebyshev_polynomial_t_x_scalar(x, n).unwrap()
}
pub fn special_chebyshev_polynomial_t_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_chebyshev_polynomial_t_x_scalar_out(out, x, n).unwrap()
}
pub fn special_chebyshev_polynomial_u(x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_chebyshev_polynomial_u(x, n).unwrap()
}
pub fn special_chebyshev_polynomial_u_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
Tensor::f_special_chebyshev_polynomial_u_n_scalar(x, n).unwrap()
}
pub fn special_chebyshev_polynomial_u_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_chebyshev_polynomial_u_n_scalar_out(out, x, n).unwrap()
}
pub fn special_chebyshev_polynomial_u_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_chebyshev_polynomial_u_out(out, x, n).unwrap()
}
pub fn special_chebyshev_polynomial_u_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
Tensor::f_special_chebyshev_polynomial_u_x_scalar(x, n).unwrap()
}
pub fn special_chebyshev_polynomial_u_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_chebyshev_polynomial_u_x_scalar_out(out, x, n).unwrap()
}
pub fn special_chebyshev_polynomial_v(x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_chebyshev_polynomial_v(x, n).unwrap()
}
pub fn special_chebyshev_polynomial_v_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
Tensor::f_special_chebyshev_polynomial_v_n_scalar(x, n).unwrap()
}
pub fn special_chebyshev_polynomial_v_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_chebyshev_polynomial_v_n_scalar_out(out, x, n).unwrap()
}
pub fn special_chebyshev_polynomial_v_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_chebyshev_polynomial_v_out(out, x, n).unwrap()
}
pub fn special_chebyshev_polynomial_v_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
Tensor::f_special_chebyshev_polynomial_v_x_scalar(x, n).unwrap()
}
pub fn special_chebyshev_polynomial_v_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_chebyshev_polynomial_v_x_scalar_out(out, x, n).unwrap()
}
pub fn special_chebyshev_polynomial_w(x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_chebyshev_polynomial_w(x, n).unwrap()
}
pub fn special_chebyshev_polynomial_w_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
Tensor::f_special_chebyshev_polynomial_w_n_scalar(x, n).unwrap()
}
pub fn special_chebyshev_polynomial_w_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_chebyshev_polynomial_w_n_scalar_out(out, x, n).unwrap()
}
pub fn special_chebyshev_polynomial_w_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_chebyshev_polynomial_w_out(out, x, n).unwrap()
}
pub fn special_chebyshev_polynomial_w_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
Tensor::f_special_chebyshev_polynomial_w_x_scalar(x, n).unwrap()
}
pub fn special_chebyshev_polynomial_w_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_chebyshev_polynomial_w_x_scalar_out(out, x, n).unwrap()
}
pub fn special_digamma(&self) -> Tensor {
self.f_special_digamma().unwrap()
}
pub fn special_digamma_out(&self, out: &Tensor) -> Tensor {
self.f_special_digamma_out(out).unwrap()
}
pub fn special_entr(&self) -> Tensor {
self.f_special_entr().unwrap()
}
pub fn special_entr_out(&self, out: &Tensor) -> Tensor {
self.f_special_entr_out(out).unwrap()
}
pub fn special_erf(&self) -> Tensor {
self.f_special_erf().unwrap()
}
pub fn special_erf_out(&self, out: &Tensor) -> Tensor {
self.f_special_erf_out(out).unwrap()
}
pub fn special_erfc(&self) -> Tensor {
self.f_special_erfc().unwrap()
}
pub fn special_erfc_out(&self, out: &Tensor) -> Tensor {
self.f_special_erfc_out(out).unwrap()
}
pub fn special_erfcx(&self) -> Tensor {
self.f_special_erfcx().unwrap()
}
pub fn special_erfcx_out(&self, out: &Tensor) -> Tensor {
self.f_special_erfcx_out(out).unwrap()
}
pub fn special_erfinv(&self) -> Tensor {
self.f_special_erfinv().unwrap()
}
pub fn special_erfinv_out(&self, out: &Tensor) -> Tensor {
self.f_special_erfinv_out(out).unwrap()
}
pub fn special_exp2(&self) -> Tensor {
self.f_special_exp2().unwrap()
}
pub fn special_exp2_out(&self, out: &Tensor) -> Tensor {
self.f_special_exp2_out(out).unwrap()
}
pub fn special_expit(&self) -> Tensor {
self.f_special_expit().unwrap()
}
pub fn special_expit_out(&self, out: &Tensor) -> Tensor {
self.f_special_expit_out(out).unwrap()
}
pub fn special_expm1(&self) -> Tensor {
self.f_special_expm1().unwrap()
}
pub fn special_expm1_out(&self, out: &Tensor) -> Tensor {
self.f_special_expm1_out(out).unwrap()
}
pub fn special_gammainc(&self, other: &Tensor) -> Tensor {
self.f_special_gammainc(other).unwrap()
}
pub fn special_gammainc_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_special_gammainc_out(out, other).unwrap()
}
pub fn special_gammaincc(&self, other: &Tensor) -> Tensor {
self.f_special_gammaincc(other).unwrap()
}
pub fn special_gammaincc_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_special_gammaincc_out(out, other).unwrap()
}
pub fn special_gammaln(&self) -> Tensor {
self.f_special_gammaln().unwrap()
}
pub fn special_gammaln_out(&self, out: &Tensor) -> Tensor {
self.f_special_gammaln_out(out).unwrap()
}
pub fn special_hermite_polynomial_h(x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_hermite_polynomial_h(x, n).unwrap()
}
pub fn special_hermite_polynomial_h_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
Tensor::f_special_hermite_polynomial_h_n_scalar(x, n).unwrap()
}
pub fn special_hermite_polynomial_h_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_hermite_polynomial_h_n_scalar_out(out, x, n).unwrap()
}
pub fn special_hermite_polynomial_h_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_hermite_polynomial_h_out(out, x, n).unwrap()
}
pub fn special_hermite_polynomial_h_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
Tensor::f_special_hermite_polynomial_h_x_scalar(x, n).unwrap()
}
pub fn special_hermite_polynomial_h_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_hermite_polynomial_h_x_scalar_out(out, x, n).unwrap()
}
pub fn special_hermite_polynomial_he(x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_hermite_polynomial_he(x, n).unwrap()
}
pub fn special_hermite_polynomial_he_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
Tensor::f_special_hermite_polynomial_he_n_scalar(x, n).unwrap()
}
pub fn special_hermite_polynomial_he_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_hermite_polynomial_he_n_scalar_out(out, x, n).unwrap()
}
pub fn special_hermite_polynomial_he_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_hermite_polynomial_he_out(out, x, n).unwrap()
}
pub fn special_hermite_polynomial_he_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
Tensor::f_special_hermite_polynomial_he_x_scalar(x, n).unwrap()
}
pub fn special_hermite_polynomial_he_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_hermite_polynomial_he_x_scalar_out(out, x, n).unwrap()
}
pub fn special_i0(&self) -> Tensor {
self.f_special_i0().unwrap()
}
pub fn special_i0_out(&self, out: &Tensor) -> Tensor {
self.f_special_i0_out(out).unwrap()
}
pub fn special_i0e(&self) -> Tensor {
self.f_special_i0e().unwrap()
}
pub fn special_i0e_out(&self, out: &Tensor) -> Tensor {
self.f_special_i0e_out(out).unwrap()
}
pub fn special_i1(&self) -> Tensor {
self.f_special_i1().unwrap()
}
pub fn special_i1_out(&self, out: &Tensor) -> Tensor {
self.f_special_i1_out(out).unwrap()
}
pub fn special_i1e(&self) -> Tensor {
self.f_special_i1e().unwrap()
}
pub fn special_i1e_out(&self, out: &Tensor) -> Tensor {
self.f_special_i1e_out(out).unwrap()
}
pub fn special_laguerre_polynomial_l(x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_laguerre_polynomial_l(x, n).unwrap()
}
pub fn special_laguerre_polynomial_l_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
Tensor::f_special_laguerre_polynomial_l_n_scalar(x, n).unwrap()
}
pub fn special_laguerre_polynomial_l_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_laguerre_polynomial_l_n_scalar_out(out, x, n).unwrap()
}
pub fn special_laguerre_polynomial_l_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_laguerre_polynomial_l_out(out, x, n).unwrap()
}
pub fn special_laguerre_polynomial_l_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
Tensor::f_special_laguerre_polynomial_l_x_scalar(x, n).unwrap()
}
pub fn special_laguerre_polynomial_l_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_laguerre_polynomial_l_x_scalar_out(out, x, n).unwrap()
}
pub fn special_legendre_polynomial_p(x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_legendre_polynomial_p(x, n).unwrap()
}
pub fn special_legendre_polynomial_p_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
Tensor::f_special_legendre_polynomial_p_n_scalar(x, n).unwrap()
}
pub fn special_legendre_polynomial_p_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_legendre_polynomial_p_n_scalar_out(out, x, n).unwrap()
}
pub fn special_legendre_polynomial_p_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_legendre_polynomial_p_out(out, x, n).unwrap()
}
pub fn special_legendre_polynomial_p_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
Tensor::f_special_legendre_polynomial_p_x_scalar(x, n).unwrap()
}
pub fn special_legendre_polynomial_p_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_legendre_polynomial_p_x_scalar_out(out, x, n).unwrap()
}
pub fn special_log1p(&self) -> Tensor {
self.f_special_log1p().unwrap()
}
pub fn special_log1p_out(&self, out: &Tensor) -> Tensor {
self.f_special_log1p_out(out).unwrap()
}
pub fn special_log_ndtr(&self) -> Tensor {
self.f_special_log_ndtr().unwrap()
}
pub fn special_log_ndtr_out(&self, out: &Tensor) -> Tensor {
self.f_special_log_ndtr_out(out).unwrap()
}
pub fn special_log_softmax(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_special_log_softmax(dim, dtype).unwrap()
}
pub fn special_logit(&self, eps: impl Into<Option<f64>>) -> Tensor {
self.f_special_logit(eps).unwrap()
}
pub fn special_logit_out(&self, out: &Tensor, eps: impl Into<Option<f64>>) -> Tensor {
self.f_special_logit_out(out, eps).unwrap()
}
pub fn special_logsumexp(&self, dim: impl IntList, keepdim: bool) -> Tensor {
self.f_special_logsumexp(dim, keepdim).unwrap()
}
pub fn special_logsumexp_out(&self, out: &Tensor, dim: impl IntList, keepdim: bool) -> Tensor {
self.f_special_logsumexp_out(out, dim, keepdim).unwrap()
}
pub fn special_modified_bessel_i0(&self) -> Tensor {
self.f_special_modified_bessel_i0().unwrap()
}
pub fn special_modified_bessel_i0_out(&self, out: &Tensor) -> Tensor {
self.f_special_modified_bessel_i0_out(out).unwrap()
}
pub fn special_modified_bessel_i1(&self) -> Tensor {
self.f_special_modified_bessel_i1().unwrap()
}
pub fn special_modified_bessel_i1_out(&self, out: &Tensor) -> Tensor {
self.f_special_modified_bessel_i1_out(out).unwrap()
}
pub fn special_modified_bessel_k0(&self) -> Tensor {
self.f_special_modified_bessel_k0().unwrap()
}
pub fn special_modified_bessel_k0_out(&self, out: &Tensor) -> Tensor {
self.f_special_modified_bessel_k0_out(out).unwrap()
}
pub fn special_modified_bessel_k1(&self) -> Tensor {
self.f_special_modified_bessel_k1().unwrap()
}
pub fn special_modified_bessel_k1_out(&self, out: &Tensor) -> Tensor {
self.f_special_modified_bessel_k1_out(out).unwrap()
}
pub fn special_multigammaln(&self, p: i64) -> Tensor {
self.f_special_multigammaln(p).unwrap()
}
pub fn special_multigammaln_out(&self, out: &Tensor, p: i64) -> Tensor {
self.f_special_multigammaln_out(out, p).unwrap()
}
pub fn special_ndtr(&self) -> Tensor {
self.f_special_ndtr().unwrap()
}
pub fn special_ndtr_out(&self, out: &Tensor) -> Tensor {
self.f_special_ndtr_out(out).unwrap()
}
pub fn special_ndtri(&self) -> Tensor {
self.f_special_ndtri().unwrap()
}
pub fn special_ndtri_out(&self, out: &Tensor) -> Tensor {
self.f_special_ndtri_out(out).unwrap()
}
pub fn special_polygamma(&self, n: i64) -> Tensor {
self.f_special_polygamma(n).unwrap()
}
pub fn special_polygamma_out(&self, out: &Tensor, n: i64) -> Tensor {
self.f_special_polygamma_out(out, n).unwrap()
}
pub fn special_psi(&self) -> Tensor {
self.f_special_psi().unwrap()
}
pub fn special_psi_out(&self, out: &Tensor) -> Tensor {
self.f_special_psi_out(out).unwrap()
}
pub fn special_round(&self, decimals: i64) -> Tensor {
self.f_special_round(decimals).unwrap()
}
pub fn special_round_out(&self, out: &Tensor, decimals: i64) -> Tensor {
self.f_special_round_out(out, decimals).unwrap()
}
pub fn special_scaled_modified_bessel_k0(x: &Tensor) -> Tensor {
Tensor::f_special_scaled_modified_bessel_k0(x).unwrap()
}
pub fn special_scaled_modified_bessel_k0_out(out: &Tensor, x: &Tensor) -> Tensor {
Tensor::f_special_scaled_modified_bessel_k0_out(out, x).unwrap()
}
pub fn special_scaled_modified_bessel_k1(x: &Tensor) -> Tensor {
Tensor::f_special_scaled_modified_bessel_k1(x).unwrap()
}
pub fn special_scaled_modified_bessel_k1_out(out: &Tensor, x: &Tensor) -> Tensor {
Tensor::f_special_scaled_modified_bessel_k1_out(out, x).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_t(x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_t(x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_t_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_t_n_scalar(x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_t_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_t_n_scalar_out(out, x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_t_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_t_out(out, x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_t_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_t_x_scalar(x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_t_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_t_x_scalar_out(out, x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_u(x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_u(x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_u_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_u_n_scalar(x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_u_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_u_n_scalar_out(out, x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_u_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_u_out(out, x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_u_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_u_x_scalar(x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_u_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_u_x_scalar_out(out, x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_v(x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_v(x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_v_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_v_n_scalar(x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_v_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_v_n_scalar_out(out, x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_v_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_v_out(out, x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_v_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_v_x_scalar(x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_v_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_v_x_scalar_out(out, x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_w(x: &Tensor, n: &Tensor) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_w(x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_w_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_w_n_scalar(x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_w_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_w_n_scalar_out(out, x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_w_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_w_out(out, x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_w_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_w_x_scalar(x, n).unwrap()
}
pub fn special_shifted_chebyshev_polynomial_w_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Tensor {
Tensor::f_special_shifted_chebyshev_polynomial_w_x_scalar_out(out, x, n).unwrap()
}
pub fn special_sinc(&self) -> Tensor {
self.f_special_sinc().unwrap()
}
pub fn special_sinc_out(&self, out: &Tensor) -> Tensor {
self.f_special_sinc_out(out).unwrap()
}
pub fn special_softmax(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_special_softmax(dim, dtype).unwrap()
}
pub fn special_spherical_bessel_j0(x: &Tensor) -> Tensor {
Tensor::f_special_spherical_bessel_j0(x).unwrap()
}
pub fn special_spherical_bessel_j0_out(out: &Tensor, x: &Tensor) -> Tensor {
Tensor::f_special_spherical_bessel_j0_out(out, x).unwrap()
}
pub fn special_xlog1py(&self, other: &Tensor) -> Tensor {
self.f_special_xlog1py(other).unwrap()
}
pub fn special_xlog1py_other_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_special_xlog1py_other_scalar(other).unwrap()
}
pub fn special_xlog1py_other_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Tensor {
self.f_special_xlog1py_other_scalar_out(out, other).unwrap()
}
pub fn special_xlog1py_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_special_xlog1py_out(out, other).unwrap()
}
pub fn special_xlog1py_self_scalar<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
Tensor::f_special_xlog1py_self_scalar(self_scalar, other).unwrap()
}
pub fn special_xlog1py_self_scalar_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Tensor {
Tensor::f_special_xlog1py_self_scalar_out(out, self_scalar, other).unwrap()
}
pub fn special_xlogy(&self, other: &Tensor) -> Tensor {
self.f_special_xlogy(other).unwrap()
}
pub fn special_xlogy_other_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_special_xlogy_other_scalar(other).unwrap()
}
pub fn special_xlogy_other_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Tensor {
self.f_special_xlogy_other_scalar_out(out, other).unwrap()
}
pub fn special_xlogy_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_special_xlogy_out(out, other).unwrap()
}
pub fn special_xlogy_self_scalar<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
Tensor::f_special_xlogy_self_scalar(self_scalar, other).unwrap()
}
pub fn special_xlogy_self_scalar_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Tensor {
Tensor::f_special_xlogy_self_scalar_out(out, self_scalar, other).unwrap()
}
pub fn special_zeta(&self, other: &Tensor) -> Tensor {
self.f_special_zeta(other).unwrap()
}
pub fn special_zeta_other_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_special_zeta_other_scalar(other).unwrap()
}
pub fn special_zeta_other_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_special_zeta_other_scalar_out(out, other).unwrap()
}
pub fn special_zeta_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_special_zeta_out(out, other).unwrap()
}
pub fn special_zeta_self_scalar<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
Tensor::f_special_zeta_self_scalar(self_scalar, other).unwrap()
}
pub fn special_zeta_self_scalar_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Tensor {
Tensor::f_special_zeta_self_scalar_out(out, self_scalar, other).unwrap()
}
pub fn split(&self, split_size: i64, dim: i64) -> Vec<Tensor> {
self.f_split(split_size, dim).unwrap()
}
pub fn split_copy(&self, split_size: i64, dim: i64) -> Vec<Tensor> {
self.f_split_copy(split_size, dim).unwrap()
}
pub fn split_copy_tensor_out<T: Borrow<Tensor>>(&self, out: &[T], split_size: i64, dim: i64) {
self.f_split_copy_tensor_out(out, split_size, dim).unwrap()
}
pub fn split_sizes(&self, split_size: impl IntList, dim: i64) -> Vec<Tensor> {
self.f_split_sizes(split_size, dim).unwrap()
}
pub fn split_with_sizes(&self, split_sizes: impl IntList, dim: i64) -> Vec<Tensor> {
self.f_split_with_sizes(split_sizes, dim).unwrap()
}
pub fn split_with_sizes_copy(&self, split_sizes: impl IntList, dim: i64) -> Vec<Tensor> {
self.f_split_with_sizes_copy(split_sizes, dim).unwrap()
}
pub fn split_with_sizes_copy_out<T: Borrow<Tensor>>(
&self,
out: &[T],
split_sizes: impl IntList,
dim: i64,
) {
self.f_split_with_sizes_copy_out(out, split_sizes, dim).unwrap()
}
pub fn sqrt(&self) -> Tensor {
self.f_sqrt().unwrap()
}
pub fn sqrt_(&mut self) -> Tensor {
self.f_sqrt_().unwrap()
}
pub fn sqrt_out(&self, out: &Tensor) -> Tensor {
self.f_sqrt_out(out).unwrap()
}
pub fn square(&self) -> Tensor {
self.f_square().unwrap()
}
pub fn square_(&mut self) -> Tensor {
self.f_square_().unwrap()
}
pub fn square_out(&self, out: &Tensor) -> Tensor {
self.f_square_out(out).unwrap()
}
pub fn squeeze(&self) -> Tensor {
self.f_squeeze().unwrap()
}
pub fn squeeze_(&mut self) -> Tensor {
self.f_squeeze_().unwrap()
}
pub fn squeeze_copy(&self) -> Tensor {
self.f_squeeze_copy().unwrap()
}
pub fn squeeze_copy_dim(&self, dim: i64) -> Tensor {
self.f_squeeze_copy_dim(dim).unwrap()
}
pub fn squeeze_copy_dim_out(&self, out: &Tensor, dim: i64) -> Tensor {
self.f_squeeze_copy_dim_out(out, dim).unwrap()
}
pub fn squeeze_copy_dims(&self, dim: impl IntList) -> Tensor {
self.f_squeeze_copy_dims(dim).unwrap()
}
pub fn squeeze_copy_dims_out(&self, out: &Tensor, dim: impl IntList) -> Tensor {
self.f_squeeze_copy_dims_out(out, dim).unwrap()
}
pub fn squeeze_copy_out(&self, out: &Tensor) -> Tensor {
self.f_squeeze_copy_out(out).unwrap()
}
pub fn squeeze_dim(&self, dim: i64) -> Tensor {
self.f_squeeze_dim(dim).unwrap()
}
pub fn squeeze_dim_(&mut self, dim: i64) -> Tensor {
self.f_squeeze_dim_(dim).unwrap()
}
pub fn squeeze_dims(&self, dim: impl IntList) -> Tensor {
self.f_squeeze_dims(dim).unwrap()
}
pub fn squeeze_dims_(&mut self, dim: impl IntList) -> Tensor {
self.f_squeeze_dims_(dim).unwrap()
}
pub fn sspaddmm(&self, mat1: &Tensor, mat2: &Tensor) -> Tensor {
self.f_sspaddmm(mat1, mat2).unwrap()
}
pub fn sspaddmm_out(&self, out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Tensor {
self.f_sspaddmm_out(out, mat1, mat2).unwrap()
}
pub fn stack<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Tensor {
Tensor::f_stack(tensors, dim).unwrap()
}
pub fn stack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T], dim: i64) -> Tensor {
Tensor::f_stack_out(out, tensors, dim).unwrap()
}
pub fn std(&self, unbiased: bool) -> Tensor {
self.f_std(unbiased).unwrap()
}
pub fn std_correction<S: Into<Scalar>>(
&self,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> Tensor {
self.f_std_correction(dim, correction, keepdim).unwrap()
}
pub fn std_correction_out<S: Into<Scalar>>(
&self,
out: &Tensor,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> Tensor {
self.f_std_correction_out(out, dim, correction, keepdim).unwrap()
}
pub fn std_dim(&self, dim: impl IntListOption, unbiased: bool, keepdim: bool) -> Tensor {
self.f_std_dim(dim, unbiased, keepdim).unwrap()
}
pub fn std_mean(&self, unbiased: bool) -> (Tensor, Tensor) {
self.f_std_mean(unbiased).unwrap()
}
pub fn std_mean_correction<S: Into<Scalar>>(
&self,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_std_mean_correction(dim, correction, keepdim).unwrap()
}
pub fn std_mean_correction_out<S: Into<Scalar>>(
&self,
out0: &Tensor,
out1: &Tensor,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_std_mean_correction_out(out0, out1, dim, correction, keepdim).unwrap()
}
pub fn std_mean_dim(
&self,
dim: impl IntListOption,
unbiased: bool,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_std_mean_dim(dim, unbiased, keepdim).unwrap()
}
pub fn std_out(
&self,
out: &Tensor,
dim: impl IntListOption,
unbiased: bool,
keepdim: bool,
) -> Tensor {
self.f_std_out(out, dim, unbiased, keepdim).unwrap()
}
pub fn stft<T: Borrow<Tensor>>(
&self,
n_fft: i64,
hop_length: impl Into<Option<i64>>,
win_length: impl Into<Option<i64>>,
window: Option<T>,
normalized: bool,
onesided: bool,
return_complex: bool,
) -> Tensor {
self.f_stft(n_fft, hop_length, win_length, window, normalized, onesided, return_complex)
.unwrap()
}
pub fn stft_center<T: Borrow<Tensor>>(
&self,
n_fft: i64,
hop_length: impl Into<Option<i64>>,
win_length: impl Into<Option<i64>>,
window: Option<T>,
center: bool,
pad_mode: &str,
normalized: bool,
onesided: bool,
return_complex: bool,
) -> Tensor {
self.f_stft_center(
n_fft,
hop_length,
win_length,
window,
center,
pad_mode,
normalized,
onesided,
return_complex,
)
.unwrap()
}
pub fn g_sub(&self, other: &Tensor) -> Tensor {
self.f_sub(other).unwrap()
}
pub fn g_sub_(&mut self, other: &Tensor) -> Tensor {
self.f_sub_(other).unwrap()
}
pub fn sub_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_sub_out(out, other).unwrap()
}
pub fn g_sub_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_sub_scalar(other).unwrap()
}
pub fn g_sub_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_sub_scalar_(other).unwrap()
}
pub fn sub_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_sub_scalar_out(out, other).unwrap()
}
pub fn subtract(&self, other: &Tensor) -> Tensor {
self.f_subtract(other).unwrap()
}
pub fn subtract_(&mut self, other: &Tensor) -> Tensor {
self.f_subtract_(other).unwrap()
}
pub fn subtract_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_subtract_out(out, other).unwrap()
}
pub fn subtract_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_subtract_scalar(other).unwrap()
}
pub fn subtract_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_subtract_scalar_(other).unwrap()
}
pub fn sum(&self, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_sum(dtype).unwrap()
}
pub fn sum_dim_intlist(
&self,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_sum_dim_intlist(dim, keepdim, dtype).unwrap()
}
pub fn sum_intlist_out(
&self,
out: &Tensor,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_sum_intlist_out(out, dim, keepdim, dtype).unwrap()
}
pub fn sum_out(&self, out: &Tensor, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_sum_out(out, dtype).unwrap()
}
pub fn sum_to_size(&self, size: impl IntList) -> Tensor {
self.f_sum_to_size(size).unwrap()
}
pub fn svd(&self, some: bool, compute_uv: bool) -> (Tensor, Tensor, Tensor) {
self.f_svd(some, compute_uv).unwrap()
}
pub fn svd_u(
&self,
u: &Tensor,
s: &Tensor,
v: &Tensor,
some: bool,
compute_uv: bool,
) -> (Tensor, Tensor, Tensor) {
self.f_svd_u(u, s, v, some, compute_uv).unwrap()
}
pub fn swapaxes(&self, axis0: i64, axis1: i64) -> Tensor {
self.f_swapaxes(axis0, axis1).unwrap()
}
pub fn swapaxes_(&mut self, axis0: i64, axis1: i64) -> Tensor {
self.f_swapaxes_(axis0, axis1).unwrap()
}
pub fn swapdims(&self, dim0: i64, dim1: i64) -> Tensor {
self.f_swapdims(dim0, dim1).unwrap()
}
pub fn swapdims_(&mut self, dim0: i64, dim1: i64) -> Tensor {
self.f_swapdims_(dim0, dim1).unwrap()
}
pub fn tr(&self) -> Tensor {
self.f_tr().unwrap()
}
pub fn t_(&mut self) -> Tensor {
self.f_t_().unwrap()
}
pub fn t_copy(&self) -> Tensor {
self.f_t_copy().unwrap()
}
pub fn t_copy_out(&self, out: &Tensor) -> Tensor {
self.f_t_copy_out(out).unwrap()
}
pub fn take(&self, index: &Tensor) -> Tensor {
self.f_take(index).unwrap()
}
pub fn take_along_dim(&self, indices: &Tensor, dim: impl Into<Option<i64>>) -> Tensor {
self.f_take_along_dim(indices, dim).unwrap()
}
pub fn take_along_dim_out(
&self,
out: &Tensor,
indices: &Tensor,
dim: impl Into<Option<i64>>,
) -> Tensor {
self.f_take_along_dim_out(out, indices, dim).unwrap()
}
pub fn take_out(&self, out: &Tensor, index: &Tensor) -> Tensor {
self.f_take_out(out, index).unwrap()
}
pub fn tan(&self) -> Tensor {
self.f_tan().unwrap()
}
pub fn tan_(&mut self) -> Tensor {
self.f_tan_().unwrap()
}
pub fn tan_out(&self, out: &Tensor) -> Tensor {
self.f_tan_out(out).unwrap()
}
pub fn tanh(&self) -> Tensor {
self.f_tanh().unwrap()
}
pub fn tanh_(&mut self) -> Tensor {
self.f_tanh_().unwrap()
}
pub fn tanh_backward(grad_output: &Tensor, output: &Tensor) -> Tensor {
Tensor::f_tanh_backward(grad_output, output).unwrap()
}
pub fn tanh_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output: &Tensor,
) -> Tensor {
Tensor::f_tanh_backward_grad_input(grad_input, grad_output, output).unwrap()
}
pub fn tanh_out(&self, out: &Tensor) -> Tensor {
self.f_tanh_out(out).unwrap()
}
pub fn tensor_split(&self, sections: i64, dim: i64) -> Vec<Tensor> {
self.f_tensor_split(sections, dim).unwrap()
}
pub fn tensor_split_indices(&self, indices: impl IntList, dim: i64) -> Vec<Tensor> {
self.f_tensor_split_indices(indices, dim).unwrap()
}
pub fn tensor_split_tensor_indices_or_sections(
&self,
tensor_indices_or_sections: &Tensor,
dim: i64,
) -> Vec<Tensor> {
self.f_tensor_split_tensor_indices_or_sections(tensor_indices_or_sections, dim).unwrap()
}
pub fn tensordot(
&self,
other: &Tensor,
dims_self: impl IntList,
dims_other: impl IntList,
) -> Tensor {
self.f_tensordot(other, dims_self, dims_other).unwrap()
}
pub fn tensordot_out(
&self,
out: &Tensor,
other: &Tensor,
dims_self: impl IntList,
dims_other: impl IntList,
) -> Tensor {
self.f_tensordot_out(out, other, dims_self, dims_other).unwrap()
}
pub fn threshold<S: Into<Scalar>>(&self, threshold: S, value: S) -> Tensor {
self.f_threshold(threshold, value).unwrap()
}
pub fn threshold_<S: Into<Scalar>>(&mut self, threshold: S, value: S) -> Tensor {
self.f_threshold_(threshold, value).unwrap()
}
pub fn threshold_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
threshold: S,
) -> Tensor {
self.f_threshold_backward(grad_output, threshold).unwrap()
}
pub fn threshold_backward_grad_input<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
threshold: S,
) -> Tensor {
self.f_threshold_backward_grad_input(grad_input, grad_output, threshold).unwrap()
}
pub fn threshold_out<S: Into<Scalar>>(&self, out: &Tensor, threshold: S, value: S) -> Tensor {
self.f_threshold_out(out, threshold, value).unwrap()
}
pub fn tile(&self, dims: impl IntList) -> Tensor {
self.f_tile(dims).unwrap()
}
pub fn to(&self, device: Device) -> Tensor {
self.f_to(device).unwrap()
}
pub fn to_dense(&self, dtype: impl Into<Option<Kind>>, masked_grad: bool) -> Tensor {
self.f_to_dense(dtype, masked_grad).unwrap()
}
pub fn to_dense_backward(&self, grad: &Tensor, masked_grad: bool) -> Tensor {
self.f_to_dense_backward(grad, masked_grad).unwrap()
}
pub fn to_device_(
&self,
device: Device,
dtype: Kind,
non_blocking: bool,
copy: bool,
) -> Tensor {
self.f_to_device_(device, dtype, non_blocking, copy).unwrap()
}
pub fn to_dtype(&self, dtype: Kind, non_blocking: bool, copy: bool) -> Tensor {
self.f_to_dtype(dtype, non_blocking, copy).unwrap()
}
pub fn to_dtype_layout(
&self,
options: (Kind, Device),
non_blocking: bool,
copy: bool,
) -> Tensor {
self.f_to_dtype_layout(options, non_blocking, copy).unwrap()
}
pub fn g_to_mkldnn(&self, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_to_mkldnn(dtype).unwrap()
}
pub fn to_mkldnn_backward(&self, grad: &Tensor) -> Tensor {
self.f_to_mkldnn_backward(grad).unwrap()
}
pub fn to_mkldnn_out(&self, out: &Tensor, dtype: impl Into<Option<Kind>>) -> Tensor {
self.f_to_mkldnn_out(out, dtype).unwrap()
}
pub fn to_other(&self, other: &Tensor, non_blocking: bool, copy: bool) -> Tensor {
self.f_to_other(other, non_blocking, copy).unwrap()
}
pub fn to_padded_tensor(&self, padding: f64, output_size: impl IntListOption) -> Tensor {
self.f_to_padded_tensor(padding, output_size).unwrap()
}
pub fn to_padded_tensor_out(
&self,
out: &Tensor,
padding: f64,
output_size: impl IntListOption,
) -> Tensor {
self.f_to_padded_tensor_out(out, padding, output_size).unwrap()
}
pub fn to_sparse(
&self,
layout: Option<Layout>,
blocksize: impl IntListOption,
dense_dim: impl Into<Option<i64>>,
) -> Tensor {
self.f_to_sparse(layout, blocksize, dense_dim).unwrap()
}
pub fn to_sparse_bsc(
&self,
blocksize: impl IntList,
dense_dim: impl Into<Option<i64>>,
) -> Tensor {
self.f_to_sparse_bsc(blocksize, dense_dim).unwrap()
}
pub fn to_sparse_bsr(
&self,
blocksize: impl IntList,
dense_dim: impl Into<Option<i64>>,
) -> Tensor {
self.f_to_sparse_bsr(blocksize, dense_dim).unwrap()
}
pub fn to_sparse_csc(&self, dense_dim: impl Into<Option<i64>>) -> Tensor {
self.f_to_sparse_csc(dense_dim).unwrap()
}
pub fn to_sparse_csr(&self, dense_dim: impl Into<Option<i64>>) -> Tensor {
self.f_to_sparse_csr(dense_dim).unwrap()
}
pub fn to_sparse_sparse_dim(&self, sparse_dim: i64) -> Tensor {
self.f_to_sparse_sparse_dim(sparse_dim).unwrap()
}
pub fn topk(&self, k: i64, dim: i64, largest: bool, sorted: bool) -> (Tensor, Tensor) {
self.f_topk(k, dim, largest, sorted).unwrap()
}
pub fn topk_values(
&self,
values: &Tensor,
indices: &Tensor,
k: i64,
dim: i64,
largest: bool,
sorted: bool,
) -> (Tensor, Tensor) {
self.f_topk_values(values, indices, k, dim, largest, sorted).unwrap()
}
pub fn totype(&self, scalar_type: Kind) -> Tensor {
self.f_totype(scalar_type).unwrap()
}
pub fn trace(&self) -> Tensor {
self.f_trace().unwrap()
}
pub fn trace_backward(grad: &Tensor, sizes: impl IntList) -> Tensor {
Tensor::f_trace_backward(grad, sizes).unwrap()
}
pub fn trace_out(&self, out: &Tensor) -> Tensor {
self.f_trace_out(out).unwrap()
}
pub fn transpose(&self, dim0: i64, dim1: i64) -> Tensor {
self.f_transpose(dim0, dim1).unwrap()
}
pub fn transpose_(&mut self, dim0: i64, dim1: i64) -> Tensor {
self.f_transpose_(dim0, dim1).unwrap()
}
pub fn transpose_copy(&self, dim0: i64, dim1: i64) -> Tensor {
self.f_transpose_copy(dim0, dim1).unwrap()
}
pub fn transpose_copy_int_out(&self, out: &Tensor, dim0: i64, dim1: i64) -> Tensor {
self.f_transpose_copy_int_out(out, dim0, dim1).unwrap()
}
pub fn trapezoid(y: &Tensor, dim: i64) -> Tensor {
Tensor::f_trapezoid(y, dim).unwrap()
}
pub fn trapezoid_x(y: &Tensor, x: &Tensor, dim: i64) -> Tensor {
Tensor::f_trapezoid_x(y, x, dim).unwrap()
}
pub fn trapz(y: &Tensor, x: &Tensor, dim: i64) -> Tensor {
Tensor::f_trapz(y, x, dim).unwrap()
}
pub fn trapz_dx(y: &Tensor, dx: f64, dim: i64) -> Tensor {
Tensor::f_trapz_dx(y, dx, dim).unwrap()
}
pub fn triangular_solve(
&self,
a: &Tensor,
upper: bool,
transpose: bool,
unitriangular: bool,
) -> (Tensor, Tensor) {
self.f_triangular_solve(a, upper, transpose, unitriangular).unwrap()
}
pub fn triangular_solve_x(
&self,
x: &Tensor,
m: &Tensor,
a: &Tensor,
upper: bool,
transpose: bool,
unitriangular: bool,
) -> (Tensor, Tensor) {
self.f_triangular_solve_x(x, m, a, upper, transpose, unitriangular).unwrap()
}
pub fn tril(&self, diagonal: i64) -> Tensor {
self.f_tril(diagonal).unwrap()
}
pub fn tril_(&mut self, diagonal: i64) -> Tensor {
self.f_tril_(diagonal).unwrap()
}
pub fn tril_indices(row: i64, col: i64, offset: i64, options: (Kind, Device)) -> Tensor {
Tensor::f_tril_indices(row, col, offset, options).unwrap()
}
pub fn tril_indices_out(out: &Tensor, row: i64, col: i64, offset: i64) -> Tensor {
Tensor::f_tril_indices_out(out, row, col, offset).unwrap()
}
pub fn tril_out(&self, out: &Tensor, diagonal: i64) -> Tensor {
self.f_tril_out(out, diagonal).unwrap()
}
pub fn triplet_margin_loss(
anchor: &Tensor,
positive: &Tensor,
negative: &Tensor,
margin: f64,
p: f64,
eps: f64,
swap: bool,
reduction: crate::Reduction,
) -> Tensor {
Tensor::f_triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction)
.unwrap()
}
pub fn triu(&self, diagonal: i64) -> Tensor {
self.f_triu(diagonal).unwrap()
}
pub fn triu_(&mut self, diagonal: i64) -> Tensor {
self.f_triu_(diagonal).unwrap()
}
pub fn triu_indices(row: i64, col: i64, offset: i64, options: (Kind, Device)) -> Tensor {
Tensor::f_triu_indices(row, col, offset, options).unwrap()
}
pub fn triu_indices_out(out: &Tensor, row: i64, col: i64, offset: i64) -> Tensor {
Tensor::f_triu_indices_out(out, row, col, offset).unwrap()
}
pub fn triu_out(&self, out: &Tensor, diagonal: i64) -> Tensor {
self.f_triu_out(out, diagonal).unwrap()
}
pub fn true_divide(&self, other: &Tensor) -> Tensor {
self.f_true_divide(other).unwrap()
}
pub fn true_divide_(&mut self, other: &Tensor) -> Tensor {
self.f_true_divide_(other).unwrap()
}
pub fn true_divide_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_true_divide_out(out, other).unwrap()
}
pub fn true_divide_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_true_divide_scalar(other).unwrap()
}
pub fn true_divide_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_true_divide_scalar_(other).unwrap()
}
pub fn trunc(&self) -> Tensor {
self.f_trunc().unwrap()
}
pub fn trunc_(&mut self) -> Tensor {
self.f_trunc_().unwrap()
}
pub fn trunc_out(&self, out: &Tensor) -> Tensor {
self.f_trunc_out(out).unwrap()
}
pub fn type_as(&self, other: &Tensor) -> Tensor {
self.f_type_as(other).unwrap()
}
pub fn unbind(&self, dim: i64) -> Vec<Tensor> {
self.f_unbind(dim).unwrap()
}
pub fn unbind_copy(&self, dim: i64) -> Vec<Tensor> {
self.f_unbind_copy(dim).unwrap()
}
pub fn unbind_copy_int_out<T: Borrow<Tensor>>(&self, out: &[T], dim: i64) {
self.f_unbind_copy_int_out(out, dim).unwrap()
}
pub fn unflatten(&self, dim: i64, sizes: impl IntList) -> Tensor {
self.f_unflatten(dim, sizes).unwrap()
}
pub fn unflatten_dense_tensors<T: Borrow<Tensor>>(flat: &Tensor, tensors: &[T]) -> Vec<Tensor> {
Tensor::f_unflatten_dense_tensors(flat, tensors).unwrap()
}
pub fn unfold(&self, dimension: i64, size: i64, step: i64) -> Tensor {
self.f_unfold(dimension, size, step).unwrap()
}
pub fn unfold_backward(
grad_in: &Tensor,
input_sizes: impl IntList,
dim: i64,
size: i64,
step: i64,
) -> Tensor {
Tensor::f_unfold_backward(grad_in, input_sizes, dim, size, step).unwrap()
}
pub fn unfold_backward_out(
out: &Tensor,
grad_in: &Tensor,
input_sizes: impl IntList,
dim: i64,
size: i64,
step: i64,
) -> Tensor {
Tensor::f_unfold_backward_out(out, grad_in, input_sizes, dim, size, step).unwrap()
}
pub fn unfold_copy(&self, dimension: i64, size: i64, step: i64) -> Tensor {
self.f_unfold_copy(dimension, size, step).unwrap()
}
pub fn unfold_copy_out(&self, out: &Tensor, dimension: i64, size: i64, step: i64) -> Tensor {
self.f_unfold_copy_out(out, dimension, size, step).unwrap()
}
pub fn uniform(&self, from: f64, to: f64) -> Tensor {
self.f_uniform(from, to).unwrap()
}
pub fn uniform_(&mut self, from: f64, to: f64) -> Tensor {
self.f_uniform_(from, to).unwrap()
}
pub fn uniform_out(&self, out: &Tensor, from: f64, to: f64) -> Tensor {
self.f_uniform_out(out, from, to).unwrap()
}
pub fn unique_consecutive(
&self,
return_inverse: bool,
return_counts: bool,
dim: impl Into<Option<i64>>,
) -> (Tensor, Tensor, Tensor) {
self.f_unique_consecutive(return_inverse, return_counts, dim).unwrap()
}
pub fn unique_consecutive_out(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
return_inverse: bool,
return_counts: bool,
dim: impl Into<Option<i64>>,
) -> (Tensor, Tensor, Tensor) {
self.f_unique_consecutive_out(out0, out1, out2, return_inverse, return_counts, dim).unwrap()
}
pub fn unique_dim(
&self,
dim: i64,
sorted: bool,
return_inverse: bool,
return_counts: bool,
) -> (Tensor, Tensor, Tensor) {
self.f_unique_dim(dim, sorted, return_inverse, return_counts).unwrap()
}
pub fn unique_dim_consecutive(
&self,
dim: i64,
return_inverse: bool,
return_counts: bool,
) -> (Tensor, Tensor, Tensor) {
self.f_unique_dim_consecutive(dim, return_inverse, return_counts).unwrap()
}
pub fn unique_dim_consecutive_out(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
dim: i64,
return_inverse: bool,
return_counts: bool,
) -> (Tensor, Tensor, Tensor) {
self.f_unique_dim_consecutive_out(out0, out1, out2, dim, return_inverse, return_counts)
.unwrap()
}
pub fn unique_dim_out(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
dim: i64,
sorted: bool,
return_inverse: bool,
return_counts: bool,
) -> (Tensor, Tensor, Tensor) {
self.f_unique_dim_out(out0, out1, out2, dim, sorted, return_inverse, return_counts).unwrap()
}
pub fn unsafe_chunk(&self, chunks: i64, dim: i64) -> Vec<Tensor> {
self.f_unsafe_chunk(chunks, dim).unwrap()
}
pub fn unsafe_split(&self, split_size: i64, dim: i64) -> Vec<Tensor> {
self.f_unsafe_split(split_size, dim).unwrap()
}
pub fn unsafe_split_tensor_out<T: Borrow<Tensor>>(&self, out: &[T], split_size: i64, dim: i64) {
self.f_unsafe_split_tensor_out(out, split_size, dim).unwrap()
}
pub fn unsafe_split_with_sizes(&self, split_sizes: impl IntList, dim: i64) -> Vec<Tensor> {
self.f_unsafe_split_with_sizes(split_sizes, dim).unwrap()
}
pub fn unsafe_split_with_sizes_out<T: Borrow<Tensor>>(
&self,
out: &[T],
split_sizes: impl IntList,
dim: i64,
) {
self.f_unsafe_split_with_sizes_out(out, split_sizes, dim).unwrap()
}
pub fn unsqueeze(&self, dim: i64) -> Tensor {
self.f_unsqueeze(dim).unwrap()
}
pub fn unsqueeze_(&mut self, dim: i64) -> Tensor {
self.f_unsqueeze_(dim).unwrap()
}
pub fn unsqueeze_copy(&self, dim: i64) -> Tensor {
self.f_unsqueeze_copy(dim).unwrap()
}
pub fn unsqueeze_copy_out(&self, out: &Tensor, dim: i64) -> Tensor {
self.f_unsqueeze_copy_out(out, dim).unwrap()
}
pub fn upsample_bicubic2d(
&self,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_bicubic2d(output_size, align_corners, scales_h, scales_w).unwrap()
}
pub fn upsample_bicubic2d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_bicubic2d_backward(
grad_output,
output_size,
input_size,
align_corners,
scales_h,
scales_w,
)
.unwrap()
}
pub fn upsample_bicubic2d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_bicubic2d_backward_grad_input(
grad_input,
grad_output,
output_size,
input_size,
align_corners,
scales_h,
scales_w,
)
.unwrap()
}
pub fn upsample_bicubic2d_out(
&self,
out: &Tensor,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_bicubic2d_out(out, output_size, align_corners, scales_h, scales_w).unwrap()
}
pub fn upsample_bicubic2d_vec(
&self,
output_size: impl IntListOption,
align_corners: bool,
scale_factors: impl DoubleList,
) -> Tensor {
self.f_upsample_bicubic2d_vec(output_size, align_corners, scale_factors).unwrap()
}
pub fn upsample_bilinear2d(
&self,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_bilinear2d(output_size, align_corners, scales_h, scales_w).unwrap()
}
pub fn upsample_bilinear2d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_bilinear2d_backward(
grad_output,
output_size,
input_size,
align_corners,
scales_h,
scales_w,
)
.unwrap()
}
pub fn upsample_bilinear2d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_bilinear2d_backward_grad_input(
grad_input,
grad_output,
output_size,
input_size,
align_corners,
scales_h,
scales_w,
)
.unwrap()
}
pub fn upsample_bilinear2d_out(
&self,
out: &Tensor,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_bilinear2d_out(out, output_size, align_corners, scales_h, scales_w).unwrap()
}
pub fn upsample_bilinear2d_vec(
&self,
output_size: impl IntListOption,
align_corners: bool,
scale_factors: impl DoubleList,
) -> Tensor {
self.f_upsample_bilinear2d_vec(output_size, align_corners, scale_factors).unwrap()
}
pub fn upsample_linear1d(
&self,
output_size: impl IntList,
align_corners: bool,
scales: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_linear1d(output_size, align_corners, scales).unwrap()
}
pub fn upsample_linear1d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_linear1d_backward(
grad_output,
output_size,
input_size,
align_corners,
scales,
)
.unwrap()
}
pub fn upsample_linear1d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_linear1d_backward_grad_input(
grad_input,
grad_output,
output_size,
input_size,
align_corners,
scales,
)
.unwrap()
}
pub fn upsample_linear1d_out(
&self,
out: &Tensor,
output_size: impl IntList,
align_corners: bool,
scales: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_linear1d_out(out, output_size, align_corners, scales).unwrap()
}
pub fn upsample_linear1d_vec(
&self,
output_size: impl IntListOption,
align_corners: bool,
scale_factors: impl DoubleList,
) -> Tensor {
self.f_upsample_linear1d_vec(output_size, align_corners, scale_factors).unwrap()
}
pub fn upsample_nearest1d(
&self,
output_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_nearest1d(output_size, scales).unwrap()
}
pub fn upsample_nearest1d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_nearest1d_backward(grad_output, output_size, input_size, scales).unwrap()
}
pub fn upsample_nearest1d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_nearest1d_backward_grad_input(
grad_input,
grad_output,
output_size,
input_size,
scales,
)
.unwrap()
}
pub fn upsample_nearest1d_out(
&self,
out: &Tensor,
output_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_nearest1d_out(out, output_size, scales).unwrap()
}
pub fn upsample_nearest1d_vec(
&self,
output_size: impl IntListOption,
scale_factors: impl DoubleList,
) -> Tensor {
self.f_upsample_nearest1d_vec(output_size, scale_factors).unwrap()
}
pub fn upsample_nearest2d(
&self,
output_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_nearest2d(output_size, scales_h, scales_w).unwrap()
}
pub fn upsample_nearest2d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_nearest2d_backward(
grad_output,
output_size,
input_size,
scales_h,
scales_w,
)
.unwrap()
}
pub fn upsample_nearest2d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_nearest2d_backward_grad_input(
grad_input,
grad_output,
output_size,
input_size,
scales_h,
scales_w,
)
.unwrap()
}
pub fn upsample_nearest2d_out(
&self,
out: &Tensor,
output_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_nearest2d_out(out, output_size, scales_h, scales_w).unwrap()
}
pub fn upsample_nearest2d_vec(
&self,
output_size: impl IntListOption,
scale_factors: impl DoubleList,
) -> Tensor {
self.f_upsample_nearest2d_vec(output_size, scale_factors).unwrap()
}
pub fn upsample_nearest3d(
&self,
output_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_nearest3d(output_size, scales_d, scales_h, scales_w).unwrap()
}
pub fn upsample_nearest3d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_nearest3d_backward(
grad_output,
output_size,
input_size,
scales_d,
scales_h,
scales_w,
)
.unwrap()
}
pub fn upsample_nearest3d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_nearest3d_backward_grad_input(
grad_input,
grad_output,
output_size,
input_size,
scales_d,
scales_h,
scales_w,
)
.unwrap()
}
pub fn upsample_nearest3d_out(
&self,
out: &Tensor,
output_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_nearest3d_out(out, output_size, scales_d, scales_h, scales_w).unwrap()
}
pub fn upsample_nearest3d_vec(
&self,
output_size: impl IntListOption,
scale_factors: impl DoubleList,
) -> Tensor {
self.f_upsample_nearest3d_vec(output_size, scale_factors).unwrap()
}
pub fn upsample_trilinear3d(
&self,
output_size: impl IntList,
align_corners: bool,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_trilinear3d(output_size, align_corners, scales_d, scales_h, scales_w)
.unwrap()
}
pub fn upsample_trilinear3d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_trilinear3d_backward(
grad_output,
output_size,
input_size,
align_corners,
scales_d,
scales_h,
scales_w,
)
.unwrap()
}
pub fn upsample_trilinear3d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
Tensor::f_upsample_trilinear3d_backward_grad_input(
grad_input,
grad_output,
output_size,
input_size,
align_corners,
scales_d,
scales_h,
scales_w,
)
.unwrap()
}
pub fn upsample_trilinear3d_out(
&self,
out: &Tensor,
output_size: impl IntList,
align_corners: bool,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Tensor {
self.f_upsample_trilinear3d_out(
out,
output_size,
align_corners,
scales_d,
scales_h,
scales_w,
)
.unwrap()
}
pub fn upsample_trilinear3d_vec(
&self,
output_size: impl IntListOption,
align_corners: bool,
scale_factors: impl DoubleList,
) -> Tensor {
self.f_upsample_trilinear3d_vec(output_size, align_corners, scale_factors).unwrap()
}
pub fn value_selecting_reduction_backward(
grad: &Tensor,
dim: i64,
indices: &Tensor,
sizes: impl IntList,
keepdim: bool,
) -> Tensor {
Tensor::f_value_selecting_reduction_backward(grad, dim, indices, sizes, keepdim).unwrap()
}
pub fn values(&self) -> Tensor {
self.f_values().unwrap()
}
pub fn values_copy(&self) -> Tensor {
self.f_values_copy().unwrap()
}
pub fn values_copy_out(&self, out: &Tensor) -> Tensor {
self.f_values_copy_out(out).unwrap()
}
pub fn vander(x: &Tensor, n: impl Into<Option<i64>>, increasing: bool) -> Tensor {
Tensor::f_vander(x, n, increasing).unwrap()
}
pub fn var(&self, unbiased: bool) -> Tensor {
self.f_var(unbiased).unwrap()
}
pub fn var_correction<S: Into<Scalar>>(
&self,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> Tensor {
self.f_var_correction(dim, correction, keepdim).unwrap()
}
pub fn var_correction_out<S: Into<Scalar>>(
&self,
out: &Tensor,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> Tensor {
self.f_var_correction_out(out, dim, correction, keepdim).unwrap()
}
pub fn var_dim(&self, dim: impl IntListOption, unbiased: bool, keepdim: bool) -> Tensor {
self.f_var_dim(dim, unbiased, keepdim).unwrap()
}
pub fn var_mean(&self, unbiased: bool) -> (Tensor, Tensor) {
self.f_var_mean(unbiased).unwrap()
}
pub fn var_mean_correction<S: Into<Scalar>>(
&self,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_var_mean_correction(dim, correction, keepdim).unwrap()
}
pub fn var_mean_correction_out<S: Into<Scalar>>(
&self,
out0: &Tensor,
out1: &Tensor,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_var_mean_correction_out(out0, out1, dim, correction, keepdim).unwrap()
}
pub fn var_mean_dim(
&self,
dim: impl IntListOption,
unbiased: bool,
keepdim: bool,
) -> (Tensor, Tensor) {
self.f_var_mean_dim(dim, unbiased, keepdim).unwrap()
}
pub fn var_out(
&self,
out: &Tensor,
dim: impl IntListOption,
unbiased: bool,
keepdim: bool,
) -> Tensor {
self.f_var_out(out, dim, unbiased, keepdim).unwrap()
}
pub fn vdot(&self, other: &Tensor) -> Tensor {
self.f_vdot(other).unwrap()
}
pub fn vdot_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_vdot_out(out, other).unwrap()
}
pub fn view_(&self, size: impl IntList) -> Tensor {
self.f_view_(size).unwrap()
}
pub fn view_as(&self, other: &Tensor) -> Tensor {
self.f_view_as(other).unwrap()
}
pub fn view_as_complex(&self) -> Tensor {
self.f_view_as_complex().unwrap()
}
pub fn view_as_complex_copy(&self) -> Tensor {
self.f_view_as_complex_copy().unwrap()
}
pub fn view_as_complex_copy_out(&self, out: &Tensor) -> Tensor {
self.f_view_as_complex_copy_out(out).unwrap()
}
pub fn view_as_real(&self) -> Tensor {
self.f_view_as_real().unwrap()
}
pub fn view_as_real_copy(&self) -> Tensor {
self.f_view_as_real_copy().unwrap()
}
pub fn view_as_real_copy_out(&self, out: &Tensor) -> Tensor {
self.f_view_as_real_copy_out(out).unwrap()
}
pub fn view_copy(&self, size: impl IntList) -> Tensor {
self.f_view_copy(size).unwrap()
}
pub fn view_copy_dtype(&self, dtype: Kind) -> Tensor {
self.f_view_copy_dtype(dtype).unwrap()
}
pub fn view_copy_dtype_out(&self, out: &Tensor, dtype: Kind) -> Tensor {
self.f_view_copy_dtype_out(out, dtype).unwrap()
}
pub fn view_copy_out(&self, out: &Tensor, size: impl IntList) -> Tensor {
self.f_view_copy_out(out, size).unwrap()
}
pub fn view_dtype(&self, dtype: Kind) -> Tensor {
self.f_view_dtype(dtype).unwrap()
}
pub fn vsplit(&self, sections: i64) -> Vec<Tensor> {
self.f_vsplit(sections).unwrap()
}
pub fn vsplit_array(&self, indices: impl IntList) -> Vec<Tensor> {
self.f_vsplit_array(indices).unwrap()
}
pub fn vstack<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
Tensor::f_vstack(tensors).unwrap()
}
pub fn vstack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
Tensor::f_vstack_out(out, tensors).unwrap()
}
pub fn where_(condition: &Tensor) -> Vec<Tensor> {
Tensor::f_where_(condition).unwrap()
}
pub fn where_scalar<S: Into<Scalar>>(condition: &Tensor, self_scalar: S, other: S) -> Tensor {
Tensor::f_where_scalar(condition, self_scalar, other).unwrap()
}
pub fn where_scalarother<S: Into<Scalar>>(&self, condition: &Tensor, other: S) -> Tensor {
self.f_where_scalarother(condition, other).unwrap()
}
pub fn where_scalarself<S: Into<Scalar>>(
condition: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Tensor {
Tensor::f_where_scalarself(condition, self_scalar, other).unwrap()
}
pub fn where_self(&self, condition: &Tensor, other: &Tensor) -> Tensor {
self.f_where_self(condition, other).unwrap()
}
pub fn where_self_out(&self, out: &Tensor, condition: &Tensor, other: &Tensor) -> Tensor {
self.f_where_self_out(out, condition, other).unwrap()
}
pub fn xlogy(&self, other: &Tensor) -> Tensor {
self.f_xlogy(other).unwrap()
}
pub fn xlogy_(&mut self, other: &Tensor) -> Tensor {
self.f_xlogy_(other).unwrap()
}
pub fn xlogy_outscalar_other<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
self.f_xlogy_outscalar_other(out, other).unwrap()
}
pub fn xlogy_outscalar_self<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Tensor {
Tensor::f_xlogy_outscalar_self(out, self_scalar, other).unwrap()
}
pub fn xlogy_outtensor(&self, out: &Tensor, other: &Tensor) -> Tensor {
self.f_xlogy_outtensor(out, other).unwrap()
}
pub fn xlogy_scalar_other<S: Into<Scalar>>(&self, other: S) -> Tensor {
self.f_xlogy_scalar_other(other).unwrap()
}
pub fn xlogy_scalar_other_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
self.f_xlogy_scalar_other_(other).unwrap()
}
pub fn xlogy_scalar_self<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
Tensor::f_xlogy_scalar_self(self_scalar, other).unwrap()
}
pub fn zero(&self) -> Tensor {
self.f_zero().unwrap()
}
pub fn zero_(&mut self) -> Tensor {
self.f_zero_().unwrap()
}
pub fn zero_out(&self, out: &Tensor) -> Tensor {
self.f_zero_out(out).unwrap()
}
pub fn zeros(size: impl IntList, options: (Kind, Device)) -> Tensor {
Tensor::f_zeros(size, options).unwrap()
}
pub fn zeros_like(&self) -> Tensor {
self.f_zeros_like().unwrap()
}
pub fn zeros_like_out(&self, out: &Tensor) -> Tensor {
self.f_zeros_like_out(out).unwrap()
}
pub fn zeros_out(out: &Tensor, size: impl IntList) -> Tensor {
Tensor::f_zeros_out(out, size).unwrap()
}
}