#![allow(clippy::all)]
use crate::{Device, Kind, Layout, Scalar, TchError, Tensor};
use std::borrow::Borrow;
use std::convert::Into;
use torch_sys::c_generated::*;
use torch_sys::*;
fn ptr_list_opt<T: Borrow<Tensor>>(l: &[Option<T>]) -> Vec<*mut C_tensor> {
l.iter().map(|x| x.as_ref().map_or(std::ptr::null_mut(), |x| x.borrow().c_tensor)).collect()
}
fn ptr_list<T: Borrow<Tensor>>(l: &[T]) -> Vec<*mut C_tensor> {
l.iter().map(|x| x.borrow().c_tensor).collect()
}
impl Tensor {
pub fn f_internal_and_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___and__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_and_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___and__tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_iand_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___iand__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_iand_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___iand__tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_ilshift_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___ilshift__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_ilshift_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___ilshift__tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_ior_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___ior__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_ior_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___ior__tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_irshift_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___irshift__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_irshift_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___irshift__tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_ixor_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___ixor__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_ixor_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___ixor__tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_lshift_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___lshift__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_lshift_scalar_out_<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___lshift__scalar_out_(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_lshift_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___lshift__tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_lshift_tensor_out_(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___lshift__tensor_out_(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_or_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___or__(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_or_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___or__tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_rshift_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___rshift__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_rshift_scalar_out_<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___rshift__scalar_out_(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_rshift_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___rshift__tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_rshift_tensor_out_(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___rshift__tensor_out_(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_xor_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___xor__(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_xor_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg___xor__tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_adaptive_avg_pool2d(
&self,
output_size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__adaptive_avg_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_adaptive_avg_pool2d_backward(
&self,
grad_output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__adaptive_avg_pool2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_adaptive_avg_pool2d_backward_out(
&self,
out: &Tensor,
grad_output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__adaptive_avg_pool2d_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_adaptive_avg_pool2d_out(
&self,
out: &Tensor,
output_size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__adaptive_avg_pool2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_adaptive_avg_pool3d(
&self,
output_size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__adaptive_avg_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_adaptive_avg_pool3d_backward(
&self,
grad_output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__adaptive_avg_pool3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_adaptive_avg_pool3d_backward_out(
&self,
out: &Tensor,
grad_output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__adaptive_avg_pool3d_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_adaptive_avg_pool3d_out(
&self,
out: &Tensor,
output_size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__adaptive_avg_pool3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_add_batch_dim(&self, batch_dim: i64, level: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__add_batch_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
batch_dim,
level
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_add_relu(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__add_relu(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_add_relu_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__add_relu_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_add_relu_out(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__add_relu_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_add_relu_scalar<S: Into<Scalar>>(
&self,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__add_relu_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_add_relu_scalar_<S: Into<Scalar>>(
&mut self,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__add_relu_scalar_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_add_relu_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__add_relu_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_addmm_activation(
&self,
mat1: &Tensor,
mat2: &Tensor,
use_gelu: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__addmm_activation(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor,
if use_gelu { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_addmm_activation_out(
&self,
out: &Tensor,
mat1: &Tensor,
mat2: &Tensor,
use_gelu: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__addmm_activation_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor,
if use_gelu { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_aminmax(&self) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__aminmax(c_tensors.as_mut_ptr(), self.c_tensor));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_aminmax_dim(
&self,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__aminmax_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_aminmax_dim_out(
&self,
out0: &Tensor,
out1: &Tensor,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__aminmax_dim_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_aminmax_out(
&self,
out0: &Tensor,
out1: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__aminmax_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_amp_update_scale(
&self,
growth_tracker: &Tensor,
found_inf: &Tensor,
scale_growth_factor: f64,
scale_backoff_factor: f64,
growth_interval: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__amp_update_scale(
c_tensors.as_mut_ptr(),
self.c_tensor,
growth_tracker.c_tensor,
found_inf.c_tensor,
scale_growth_factor,
scale_backoff_factor,
growth_interval
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_amp_update_scale_(
&mut self,
growth_tracker: &Tensor,
found_inf: &Tensor,
scale_growth_factor: f64,
scale_backoff_factor: f64,
growth_interval: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__amp_update_scale_(
c_tensors.as_mut_ptr(),
self.c_tensor,
growth_tracker.c_tensor,
found_inf.c_tensor,
scale_growth_factor,
scale_backoff_factor,
growth_interval
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_amp_update_scale_out(
&self,
out: &Tensor,
growth_tracker: &Tensor,
found_inf: &Tensor,
scale_growth_factor: f64,
scale_backoff_factor: f64,
growth_interval: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__amp_update_scale_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
growth_tracker.c_tensor,
found_inf.c_tensor,
scale_growth_factor,
scale_backoff_factor,
growth_interval
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_assert_tensor_metadata(
a: &Tensor,
size: impl IntListOption,
stride: impl IntListOption,
dtype: impl Into<Option<Kind>>,
) -> Result<(), TchError> {
unsafe_torch_err!(atg__assert_tensor_metadata(
a.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(())
}
pub fn f_internal_autocast_to_full_precision(
&self,
cuda_enabled: bool,
cpu_enabled: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__autocast_to_full_precision(
c_tensors.as_mut_ptr(),
self.c_tensor,
if cuda_enabled { 1 } else { 0 },
if cpu_enabled { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_autocast_to_reduced_precision(
&self,
cuda_enabled: bool,
cpu_enabled: bool,
cuda_dtype: Kind,
cpu_dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__autocast_to_reduced_precision(
c_tensors.as_mut_ptr(),
self.c_tensor,
if cuda_enabled { 1 } else { 0 },
if cpu_enabled { 1 } else { 0 },
cuda_dtype.c_int(),
cpu_dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cast_byte(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_byte(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cast_char(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_char(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cast_double(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_double(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cast_float(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_float(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cast_half(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_half(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cast_int(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_int(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cast_long(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_long(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cast_short(&self, non_blocking: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cast_short(
c_tensors.as_mut_ptr(),
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cdist_backward(
grad: &Tensor,
x1: &Tensor,
x2: &Tensor,
p: f64,
cdist: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cdist_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
x1.c_tensor,
x2.c_tensor,
p,
cdist.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cdist_backward_out(
out: &Tensor,
grad: &Tensor,
x1: &Tensor,
x2: &Tensor,
p: f64,
cdist: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cdist_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad.c_tensor,
x1.c_tensor,
x2.c_tensor,
p,
cdist.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cholesky_solve_helper(
&self,
a: &Tensor,
upper: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cholesky_solve_helper(
c_tensors.as_mut_ptr(),
self.c_tensor,
a.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cholesky_solve_helper_out(
&self,
out: &Tensor,
a: &Tensor,
upper: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cholesky_solve_helper_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
a.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_coalesce(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__coalesce(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_coalesce_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__coalesce_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_coalesced(&self, coalesced: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__coalesced(
c_tensors.as_mut_ptr(),
self.c_tensor,
if coalesced { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_coalesced_(&mut self, coalesced: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__coalesced_(
c_tensors.as_mut_ptr(),
self.c_tensor,
if coalesced { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_coalesced_out(
&self,
out: &Tensor,
coalesced: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__coalesced_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
if coalesced { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_compute_linear_combination(
&self,
coefficients: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__compute_linear_combination(
c_tensors.as_mut_ptr(),
self.c_tensor,
coefficients.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_compute_linear_combination_out(
&self,
out: &Tensor,
coefficients: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__compute_linear_combination_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
coefficients.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_conj(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__conj(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_conj_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__conj_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_conj_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__conj_copy_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_conj_physical(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__conj_physical(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_conj_physical_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__conj_physical_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_conv_depthwise2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__conv_depthwise2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_conv_depthwise2d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__conv_depthwise2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_convert_indices_from_coo_to_csr(
&self,
size: i64,
out_int32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__convert_indices_from_coo_to_csr(
c_tensors.as_mut_ptr(),
self.c_tensor,
size,
if out_int32 { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_convert_indices_from_coo_to_csr_out(
&self,
out: &Tensor,
size: i64,
out_int32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__convert_indices_from_coo_to_csr_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size,
if out_int32 { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_convert_indices_from_csr_to_coo(
crow_indices: &Tensor,
col_indices: &Tensor,
out_int32: bool,
transpose: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__convert_indices_from_csr_to_coo(
c_tensors.as_mut_ptr(),
crow_indices.c_tensor,
col_indices.c_tensor,
if out_int32 { 1 } else { 0 },
if transpose { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_convert_indices_from_csr_to_coo_out(
out: &Tensor,
crow_indices: &Tensor,
col_indices: &Tensor,
out_int32: bool,
transpose: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__convert_indices_from_csr_to_coo_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
crow_indices.c_tensor,
col_indices.c_tensor,
if out_int32 { 1 } else { 0 },
if transpose { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
cudnn_enabled: bool,
allow_tf32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if transposed { 1 } else { 0 },
output_padding.as_ptr(),
output_padding.len_i32(),
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if cudnn_enabled { 1 } else { 0 },
if allow_tf32 { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_convolution_deprecated<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
cudnn_enabled: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__convolution_deprecated(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if transposed { 1 } else { 0 },
output_padding.as_ptr(),
output_padding.len_i32(),
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if cudnn_enabled { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_convolution_mode<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: &str,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__convolution_mode(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
cudnn_enabled: bool,
allow_tf32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__convolution_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if transposed { 1 } else { 0 },
output_padding.as_ptr(),
output_padding.len_i32(),
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if cudnn_enabled { 1 } else { 0 },
if allow_tf32 { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_copy_from(
&self,
dst: &Tensor,
non_blocking: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__copy_from(
c_tensors.as_mut_ptr(),
self.c_tensor,
dst.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_copy_from_and_resize(&self, dst: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__copy_from_and_resize(
c_tensors.as_mut_ptr(),
self.c_tensor,
dst.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_copy_from_and_resize_out(
&self,
out: &Tensor,
dst: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__copy_from_and_resize_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dst.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_copy_from_out(
&self,
out: &Tensor,
dst: &Tensor,
non_blocking: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__copy_from_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dst.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cslt_compress(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cslt_compress(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cslt_sparse_mm<T: Borrow<Tensor>>(
compressed_a: &Tensor,
dense_b: &Tensor,
bias: Option<T>,
transpose_result: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cslt_sparse_mm(
c_tensors.as_mut_ptr(),
compressed_a.c_tensor,
dense_b.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if transpose_result { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_ctc_loss(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
blank: i64,
zero_infinity: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__ctc_loss(
c_tensors.as_mut_ptr(),
log_probs.c_tensor,
targets.c_tensor,
input_lengths.as_ptr(),
input_lengths.len_i32(),
target_lengths.as_ptr(),
target_lengths.len_i32(),
blank,
if zero_infinity { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_ctc_loss_backward(
grad: &Tensor,
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
neg_log_likelihood: &Tensor,
log_alpha: &Tensor,
blank: i64,
zero_infinity: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__ctc_loss_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
log_probs.c_tensor,
targets.c_tensor,
input_lengths.as_ptr(),
input_lengths.len_i32(),
target_lengths.as_ptr(),
target_lengths.len_i32(),
neg_log_likelihood.c_tensor,
log_alpha.c_tensor,
blank,
if zero_infinity { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_ctc_loss_backward_out(
out: &Tensor,
grad: &Tensor,
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
neg_log_likelihood: &Tensor,
log_alpha: &Tensor,
blank: i64,
zero_infinity: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__ctc_loss_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad.c_tensor,
log_probs.c_tensor,
targets.c_tensor,
input_lengths.as_ptr(),
input_lengths.len_i32(),
target_lengths.as_ptr(),
target_lengths.len_i32(),
neg_log_likelihood.c_tensor,
log_alpha.c_tensor,
blank,
if zero_infinity { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_ctc_loss_backward_tensor(
grad: &Tensor,
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &Tensor,
target_lengths: &Tensor,
neg_log_likelihood: &Tensor,
log_alpha: &Tensor,
blank: i64,
zero_infinity: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__ctc_loss_backward_tensor(
c_tensors.as_mut_ptr(),
grad.c_tensor,
log_probs.c_tensor,
targets.c_tensor,
input_lengths.c_tensor,
target_lengths.c_tensor,
neg_log_likelihood.c_tensor,
log_alpha.c_tensor,
blank,
if zero_infinity { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_ctc_loss_out(
out0: &Tensor,
out1: &Tensor,
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
blank: i64,
zero_infinity: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__ctc_loss_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
log_probs.c_tensor,
targets.c_tensor,
input_lengths.as_ptr(),
input_lengths.len_i32(),
target_lengths.as_ptr(),
target_lengths.len_i32(),
blank,
if zero_infinity { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_ctc_loss_tensor(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &Tensor,
target_lengths: &Tensor,
blank: i64,
zero_infinity: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__ctc_loss_tensor(
c_tensors.as_mut_ptr(),
log_probs.c_tensor,
targets.c_tensor,
input_lengths.c_tensor,
target_lengths.c_tensor,
blank,
if zero_infinity { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_ctc_loss_tensor_out(
out0: &Tensor,
out1: &Tensor,
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &Tensor,
target_lengths: &Tensor,
blank: i64,
zero_infinity: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__ctc_loss_tensor_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
log_probs.c_tensor,
targets.c_tensor,
input_lengths.c_tensor,
target_lengths.c_tensor,
blank,
if zero_infinity { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_cudnn_ctc_loss(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
blank: i64,
deterministic: bool,
zero_infinity: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__cudnn_ctc_loss(
c_tensors.as_mut_ptr(),
log_probs.c_tensor,
targets.c_tensor,
input_lengths.as_ptr(),
input_lengths.len_i32(),
target_lengths.as_ptr(),
target_lengths.len_i32(),
blank,
if deterministic { 1 } else { 0 },
if zero_infinity { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_cudnn_ctc_loss_out(
out0: &Tensor,
out1: &Tensor,
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
blank: i64,
deterministic: bool,
zero_infinity: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__cudnn_ctc_loss_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
log_probs.c_tensor,
targets.c_tensor,
input_lengths.as_ptr(),
input_lengths.len_i32(),
target_lengths.as_ptr(),
target_lengths.len_i32(),
blank,
if deterministic { 1 } else { 0 },
if zero_infinity { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_cudnn_ctc_loss_tensor(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &Tensor,
target_lengths: &Tensor,
blank: i64,
deterministic: bool,
zero_infinity: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__cudnn_ctc_loss_tensor(
c_tensors.as_mut_ptr(),
log_probs.c_tensor,
targets.c_tensor,
input_lengths.c_tensor,
target_lengths.c_tensor,
blank,
if deterministic { 1 } else { 0 },
if zero_infinity { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_cudnn_init_dropout_state(
dropout: f64,
train: bool,
dropout_seed: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cudnn_init_dropout_state(
c_tensors.as_mut_ptr(),
dropout,
if train { 1 } else { 0 },
dropout_seed,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cudnn_init_dropout_state_out(
out: &Tensor,
dropout: f64,
train: bool,
dropout_seed: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cudnn_init_dropout_state_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
dropout,
if train { 1 } else { 0 },
dropout_seed
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cudnn_rnn<T: Borrow<Tensor>>(
&self,
weight: &[T],
weight_stride0: i64,
weight_buf: Option<T>,
hx: &Tensor,
cx: Option<T>,
mode: i64,
hidden_size: i64,
proj_size: i64,
num_layers: i64,
batch_first: bool,
dropout: f64,
train: bool,
bidirectional: bool,
batch_sizes: impl IntList,
dropout_state: Option<T>,
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 5];
unsafe_torch_err!(atg__cudnn_rnn(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(weight).as_ptr(),
weight.len() as i32,
weight_stride0,
weight_buf.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
hx.c_tensor,
cx.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mode,
hidden_size,
proj_size,
num_layers,
if batch_first { 1 } else { 0 },
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
batch_sizes.as_ptr(),
batch_sizes.len_i32(),
dropout_state.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
Tensor { c_tensor: c_tensors[4] },
))
}
pub fn f_internal_cudnn_rnn_flatten_weight<T: Borrow<Tensor>>(
weight_arr: &[T],
weight_stride0: i64,
input_size: i64,
mode: i64,
hidden_size: i64,
proj_size: i64,
num_layers: i64,
batch_first: bool,
bidirectional: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cudnn_rnn_flatten_weight(
c_tensors.as_mut_ptr(),
ptr_list(weight_arr).as_ptr(),
weight_arr.len() as i32,
weight_stride0,
input_size,
mode,
hidden_size,
proj_size,
num_layers,
if batch_first { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cudnn_rnn_flatten_weight_out<T: Borrow<Tensor>>(
out: &Tensor,
weight_arr: &[T],
weight_stride0: i64,
input_size: i64,
mode: i64,
hidden_size: i64,
proj_size: i64,
num_layers: i64,
batch_first: bool,
bidirectional: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cudnn_rnn_flatten_weight_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(weight_arr).as_ptr(),
weight_arr.len() as i32,
weight_stride0,
input_size,
mode,
hidden_size,
proj_size,
num_layers,
if batch_first { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_cudnn_rnn_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
out4: &Tensor,
weight: &[T],
weight_stride0: i64,
weight_buf: Option<T>,
hx: &Tensor,
cx: Option<T>,
mode: i64,
hidden_size: i64,
proj_size: i64,
num_layers: i64,
batch_first: bool,
dropout: f64,
train: bool,
bidirectional: bool,
batch_sizes: impl IntList,
dropout_state: Option<T>,
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 5];
unsafe_torch_err!(atg__cudnn_rnn_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
out3.c_tensor,
out4.c_tensor,
self.c_tensor,
ptr_list(weight).as_ptr(),
weight.len() as i32,
weight_stride0,
weight_buf.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
hx.c_tensor,
cx.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mode,
hidden_size,
proj_size,
num_layers,
if batch_first { 1 } else { 0 },
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
batch_sizes.as_ptr(),
batch_sizes.len_i32(),
dropout_state.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
Tensor { c_tensor: c_tensors[4] },
))
}
pub fn f_internal_debug_has_internal_overlap(&self) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg__debug_has_internal_overlap(self.c_tensor));
Ok(return_)
}
pub fn f_internal_dim_arange(like: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__dim_arange(c_tensors.as_mut_ptr(), like.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_dimi(&self) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg__dimi(self.c_tensor));
Ok(return_)
}
pub fn f_internal_dimv(&self) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg__dimv(self.c_tensor));
Ok(return_)
}
pub fn f_internal_dirichlet_grad(
x: &Tensor,
alpha: &Tensor,
total: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__dirichlet_grad(
c_tensors.as_mut_ptr(),
x.c_tensor,
alpha.c_tensor,
total.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_dirichlet_grad_out(
out: &Tensor,
x: &Tensor,
alpha: &Tensor,
total: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__dirichlet_grad_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
alpha.c_tensor,
total.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_efficient_attention_backward<T: Borrow<Tensor>>(
grad_out_: &Tensor,
query: &Tensor,
key: &Tensor,
value: &Tensor,
bias: Option<T>,
out: &Tensor,
cu_seqlens_q: Option<T>,
cu_seqlens_k: Option<T>,
max_seqlen_k: i64,
max_seqlen_q: i64,
logsumexp: &Tensor,
dropout_p: f64,
philox_seed: &Tensor,
philox_offset: &Tensor,
custom_mask_type: i64,
bias_requires_grad: bool,
scale: impl Into<Option<f64>>,
num_splits_key: impl Into<Option<i64>>,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let scale = scale.into();
let num_splits_key = num_splits_key.into();
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg__efficient_attention_backward(
c_tensors.as_mut_ptr(),
grad_out_.c_tensor,
query.c_tensor,
key.c_tensor,
value.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
out.c_tensor,
cu_seqlens_q.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
cu_seqlens_k.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
max_seqlen_k,
max_seqlen_q,
logsumexp.c_tensor,
dropout_p,
philox_seed.c_tensor,
philox_offset.c_tensor,
custom_mask_type,
if bias_requires_grad { 1 } else { 0 },
scale.unwrap_or(std::f64::NAN),
scale.is_none() as i8,
num_splits_key.unwrap_or(0i64),
num_splits_key.is_none() as i8
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_internal_efficientzerotensor(
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__efficientzerotensor(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_efficientzerotensor_out(
out: &Tensor,
size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__efficientzerotensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_embedding_bag<T: Borrow<Tensor>>(
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
padding_idx: i64,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg__embedding_bag(
c_tensors.as_mut_ptr(),
weight.c_tensor,
indices.c_tensor,
offsets.c_tensor,
if scale_grad_by_freq { 1 } else { 0 },
mode,
if sparse { 1 } else { 0 },
per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if include_last_offset { 1 } else { 0 },
padding_idx
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_internal_embedding_bag_backward<T: Borrow<Tensor>>(
grad: &Tensor,
indices: &Tensor,
offsets: &Tensor,
offset2bag: &Tensor,
bag_size: &Tensor,
maximum_indices: &Tensor,
num_weights: i64,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
padding_idx: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__embedding_bag_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
indices.c_tensor,
offsets.c_tensor,
offset2bag.c_tensor,
bag_size.c_tensor,
maximum_indices.c_tensor,
num_weights,
if scale_grad_by_freq { 1 } else { 0 },
mode,
if sparse { 1 } else { 0 },
per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding_idx
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_embedding_bag_dense_backward<T: Borrow<Tensor>>(
grad: &Tensor,
indices: &Tensor,
offset2bag: &Tensor,
bag_size: &Tensor,
maximum_indices: &Tensor,
num_weights: i64,
scale_grad_by_freq: bool,
mode: i64,
per_sample_weights: Option<T>,
padding_idx: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__embedding_bag_dense_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
indices.c_tensor,
offset2bag.c_tensor,
bag_size.c_tensor,
maximum_indices.c_tensor,
num_weights,
if scale_grad_by_freq { 1 } else { 0 },
mode,
per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding_idx
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_embedding_bag_dense_backward_out<T: Borrow<Tensor>>(
out: &Tensor,
grad: &Tensor,
indices: &Tensor,
offset2bag: &Tensor,
bag_size: &Tensor,
maximum_indices: &Tensor,
num_weights: i64,
scale_grad_by_freq: bool,
mode: i64,
per_sample_weights: Option<T>,
padding_idx: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__embedding_bag_dense_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad.c_tensor,
indices.c_tensor,
offset2bag.c_tensor,
bag_size.c_tensor,
maximum_indices.c_tensor,
num_weights,
if scale_grad_by_freq { 1 } else { 0 },
mode,
per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding_idx
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_embedding_bag_forward_only<T: Borrow<Tensor>>(
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
padding_idx: i64,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg__embedding_bag_forward_only(
c_tensors.as_mut_ptr(),
weight.c_tensor,
indices.c_tensor,
offsets.c_tensor,
if scale_grad_by_freq { 1 } else { 0 },
mode,
if sparse { 1 } else { 0 },
per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if include_last_offset { 1 } else { 0 },
padding_idx
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_internal_embedding_bag_forward_only_out<T: Borrow<Tensor>>(
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
padding_idx: i64,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg__embedding_bag_forward_only_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
out3.c_tensor,
weight.c_tensor,
indices.c_tensor,
offsets.c_tensor,
if scale_grad_by_freq { 1 } else { 0 },
mode,
if sparse { 1 } else { 0 },
per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if include_last_offset { 1 } else { 0 },
padding_idx
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_internal_embedding_bag_out<T: Borrow<Tensor>>(
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
padding_idx: i64,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg__embedding_bag_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
out3.c_tensor,
weight.c_tensor,
indices.c_tensor,
offsets.c_tensor,
if scale_grad_by_freq { 1 } else { 0 },
mode,
if sparse { 1 } else { 0 },
per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if include_last_offset { 1 } else { 0 },
padding_idx
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_internal_embedding_bag_per_sample_weights_backward(
grad: &Tensor,
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
offset2bag: &Tensor,
mode: i64,
padding_idx: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__embedding_bag_per_sample_weights_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
weight.c_tensor,
indices.c_tensor,
offsets.c_tensor,
offset2bag.c_tensor,
mode,
padding_idx
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_embedding_bag_per_sample_weights_backward_out(
out: &Tensor,
grad: &Tensor,
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
offset2bag: &Tensor,
mode: i64,
padding_idx: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__embedding_bag_per_sample_weights_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad.c_tensor,
weight.c_tensor,
indices.c_tensor,
offsets.c_tensor,
offset2bag.c_tensor,
mode,
padding_idx
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_embedding_bag_sparse_backward<T: Borrow<Tensor>>(
grad: &Tensor,
indices: &Tensor,
offsets: &Tensor,
offset2bag: &Tensor,
bag_size: &Tensor,
num_weights: i64,
scale_grad_by_freq: bool,
mode: i64,
per_sample_weights: Option<T>,
padding_idx: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__embedding_bag_sparse_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
indices.c_tensor,
offsets.c_tensor,
offset2bag.c_tensor,
bag_size.c_tensor,
num_weights,
if scale_grad_by_freq { 1 } else { 0 },
mode,
per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding_idx
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_empty_affine_quantized(
size: impl IntList,
options: (Kind, Device),
scale: f64,
zero_point: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__empty_affine_quantized(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int(),
scale,
zero_point
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_empty_affine_quantized_out(
out: &Tensor,
size: impl IntList,
scale: f64,
zero_point: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__empty_affine_quantized_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len_i32(),
scale,
zero_point
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_empty_per_channel_affine_quantized(
size: impl IntList,
scales: &Tensor,
zero_points: &Tensor,
axis: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__empty_per_channel_affine_quantized(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len_i32(),
scales.c_tensor,
zero_points.c_tensor,
axis,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_empty_per_channel_affine_quantized_out(
out: &Tensor,
size: impl IntList,
scales: &Tensor,
zero_points: &Tensor,
axis: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__empty_per_channel_affine_quantized_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len_i32(),
scales.c_tensor,
zero_points.c_tensor,
axis
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_euclidean_dist(x1: &Tensor, x2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__euclidean_dist(c_tensors.as_mut_ptr(), x1.c_tensor, x2.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_euclidean_dist_out(
out: &Tensor,
x1: &Tensor,
x2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__euclidean_dist_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x1.c_tensor,
x2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fake_quantize_learnable_per_channel_affine(
&self,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fake_quantize_learnable_per_channel_affine(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
axis,
quant_min,
quant_max,
grad_factor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fake_quantize_learnable_per_channel_affine_backward(
&self,
grad: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__fake_quantize_learnable_per_channel_affine_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
axis,
quant_min,
quant_max,
grad_factor
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_fake_quantize_learnable_per_channel_affine_out(
&self,
out: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fake_quantize_learnable_per_channel_affine_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
axis,
quant_min,
quant_max,
grad_factor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fake_quantize_learnable_per_tensor_affine(
&self,
scale: &Tensor,
zero_point: &Tensor,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fake_quantize_learnable_per_tensor_affine(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
quant_min,
quant_max,
grad_factor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fake_quantize_learnable_per_tensor_affine_backward(
&self,
grad: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__fake_quantize_learnable_per_tensor_affine_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
quant_min,
quant_max,
grad_factor
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_fake_quantize_learnable_per_tensor_affine_out(
&self,
out: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
quant_min: i64,
quant_max: i64,
grad_factor: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fake_quantize_learnable_per_tensor_affine_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
quant_min,
quant_max,
grad_factor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(
&self,
scale: &Tensor,
zero_point: &Tensor,
fake_quant_enabled: &Tensor,
quant_min: i64,
quant_max: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__fake_quantize_per_tensor_affine_cachemask_tensor_qparams(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
fake_quant_enabled.c_tensor,
quant_min,
quant_max
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(
&self,
out0: &Tensor,
out1: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
fake_quant_enabled: &Tensor,
quant_min: i64,
quant_max: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
fake_quant_enabled.c_tensor,
quant_min,
quant_max
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_fft_c2c(
&self,
dim: impl IntList,
normalization: i64,
forward: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fft_c2c(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
normalization,
if forward { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fft_c2c_out(
&self,
out: &Tensor,
dim: impl IntList,
normalization: i64,
forward: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fft_c2c_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
normalization,
if forward { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fft_c2r(
&self,
dim: impl IntList,
normalization: i64,
last_dim_size: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fft_c2r(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
normalization,
last_dim_size
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fft_c2r_out(
&self,
out: &Tensor,
dim: impl IntList,
normalization: i64,
last_dim_size: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fft_c2r_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
normalization,
last_dim_size
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fft_r2c(
&self,
dim: impl IntList,
normalization: i64,
onesided: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fft_r2c(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
normalization,
if onesided { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fft_r2c_out(
&self,
out: &Tensor,
dim: impl IntList,
normalization: i64,
onesided: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fft_r2c_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
normalization,
if onesided { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fill_mem_eff_dropout_mask_(
&mut self,
dropout_p: f64,
seed: i64,
offset: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fill_mem_eff_dropout_mask_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dropout_p,
seed,
offset
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_flash_attention_backward(
grad_out: &Tensor,
query: &Tensor,
key: &Tensor,
value: &Tensor,
out: &Tensor,
logsumexp: &Tensor,
cum_seq_q: &Tensor,
cum_seq_k: &Tensor,
max_q: i64,
max_k: i64,
dropout_p: f64,
is_causal: bool,
philox_seed: &Tensor,
philox_offset: &Tensor,
scale: impl Into<Option<f64>>,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let scale = scale.into();
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__flash_attention_backward(
c_tensors.as_mut_ptr(),
grad_out.c_tensor,
query.c_tensor,
key.c_tensor,
value.c_tensor,
out.c_tensor,
logsumexp.c_tensor,
cum_seq_q.c_tensor,
cum_seq_k.c_tensor,
max_q,
max_k,
dropout_p,
if is_causal { 1 } else { 0 },
philox_seed.c_tensor,
philox_offset.c_tensor,
scale.unwrap_or(std::f64::NAN),
scale.is_none() as i8
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_foobar(
&self,
arg1: bool,
arg2: bool,
arg3: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__foobar(
c_tensors.as_mut_ptr(),
self.c_tensor,
if arg1 { 1 } else { 0 },
if arg2 { 1 } else { 0 },
if arg3 { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_foobar_out(
&self,
out: &Tensor,
arg1: bool,
arg2: bool,
arg3: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__foobar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
if arg1 { 1 } else { 0 },
if arg2 { 1 } else { 0 },
if arg3 { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_functional_assert_async(
&self,
assert_msg: &str,
dep_token: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__functional_assert_async(
c_tensors.as_mut_ptr(),
self.c_tensor,
assert_msg.as_ptr(),
assert_msg.len() as i32,
dep_token.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_functional_sym_constrain_range<S: Into<Scalar>>(
size: S,
min: impl Into<Option<i64>>,
max: impl Into<Option<i64>>,
dep_token: &Tensor,
) -> Result<Tensor, TchError> {
let min = min.into();
let max = max.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__functional_sym_constrain_range(
c_tensors.as_mut_ptr(),
size.into().c_scalar,
min.unwrap_or(0i64),
min.is_none() as i8,
max.unwrap_or(0i64),
max.is_none() as i8,
dep_token.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_functional_sym_constrain_range_for_size<S: Into<Scalar>>(
size: S,
min: impl Into<Option<i64>>,
max: impl Into<Option<i64>>,
dep_token: &Tensor,
) -> Result<Tensor, TchError> {
let min = min.into();
let max = max.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__functional_sym_constrain_range_for_size(
c_tensors.as_mut_ptr(),
size.into().c_scalar,
min.unwrap_or(0i64),
min.is_none() as i8,
max.unwrap_or(0i64),
max.is_none() as i8,
dep_token.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fused_dropout(&self, p: f64) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__fused_dropout(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_fused_dropout_out(
&self,
out0: &Tensor,
out1: &Tensor,
p: f64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__fused_dropout_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
p
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_fused_moving_avg_obs_fq_helper(
&self,
observer_on: &Tensor,
fake_quant_on: &Tensor,
running_min: &Tensor,
running_max: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
averaging_const: f64,
quant_min: i64,
quant_max: i64,
ch_axis: i64,
per_row_fake_quant: bool,
symmetric_quant: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__fused_moving_avg_obs_fq_helper(
c_tensors.as_mut_ptr(),
self.c_tensor,
observer_on.c_tensor,
fake_quant_on.c_tensor,
running_min.c_tensor,
running_max.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
averaging_const,
quant_min,
quant_max,
ch_axis,
if per_row_fake_quant { 1 } else { 0 },
if symmetric_quant { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_fused_moving_avg_obs_fq_helper_functional(
&self,
observer_on: &Tensor,
fake_quant_on: &Tensor,
running_min: &Tensor,
running_max: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
averaging_const: f64,
quant_min: i64,
quant_max: i64,
ch_axis: i64,
per_row_fake_quant: bool,
symmetric_quant: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 6];
unsafe_torch_err!(atg__fused_moving_avg_obs_fq_helper_functional(
c_tensors.as_mut_ptr(),
self.c_tensor,
observer_on.c_tensor,
fake_quant_on.c_tensor,
running_min.c_tensor,
running_max.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
averaging_const,
quant_min,
quant_max,
ch_axis,
if per_row_fake_quant { 1 } else { 0 },
if symmetric_quant { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
Tensor { c_tensor: c_tensors[4] },
Tensor { c_tensor: c_tensors[5] },
))
}
pub fn f_internal_fused_moving_avg_obs_fq_helper_out(
&self,
out0: &Tensor,
out1: &Tensor,
observer_on: &Tensor,
fake_quant_on: &Tensor,
running_min: &Tensor,
running_max: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
averaging_const: f64,
quant_min: i64,
quant_max: i64,
ch_axis: i64,
per_row_fake_quant: bool,
symmetric_quant: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__fused_moving_avg_obs_fq_helper_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
observer_on.c_tensor,
fake_quant_on.c_tensor,
running_min.c_tensor,
running_max.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
averaging_const,
quant_min,
quant_max,
ch_axis,
if per_row_fake_quant { 1 } else { 0 },
if symmetric_quant { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_fused_sdp_choice<T: Borrow<Tensor>>(
query: &Tensor,
key: &Tensor,
value: &Tensor,
attn_mask: Option<T>,
dropout_p: f64,
is_causal: bool,
scale: impl Into<Option<f64>>,
) -> Result<i64, TchError> {
let scale = scale.into();
let return_;
unsafe_torch_err!(
return_ = atg__fused_sdp_choice(
query.c_tensor,
key.c_tensor,
value.c_tensor,
attn_mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
dropout_p,
if is_causal { 1 } else { 0 },
scale.unwrap_or(std::f64::NAN),
scale.is_none() as i8
)
);
Ok(return_)
}
pub fn f_internal_fw_primal(&self, level: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fw_primal(c_tensors.as_mut_ptr(), self.c_tensor, level));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fw_primal_copy(&self, level: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fw_primal_copy(c_tensors.as_mut_ptr(), self.c_tensor, level));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_fw_primal_copy_out(
&self,
out: &Tensor,
level: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__fw_primal_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
level
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_gather_sparse_backward(
&self,
dim: i64,
index: &Tensor,
grad: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__gather_sparse_backward(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
grad.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_grid_sampler_2d_cpu_fallback(
&self,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__grid_sampler_2d_cpu_fallback(
c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_grid_sampler_2d_cpu_fallback_backward(
&self,
grad_output: &Tensor,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__grid_sampler_2d_cpu_fallback_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_grid_sampler_2d_cpu_fallback_out(
&self,
out: &Tensor,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__grid_sampler_2d_cpu_fallback_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_has_compatible_shallow_copy_type(
&self,
from: &Tensor,
) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(
return_ = atg__has_compatible_shallow_copy_type(self.c_tensor, from.c_tensor)
);
Ok(return_ != 0)
}
pub fn f_internal_has_same_storage_numel(&self, other: &Tensor) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg__has_same_storage_numel(self.c_tensor, other.c_tensor));
Ok(return_ != 0)
}
pub fn f_internal_histogramdd_bin_edges<T: Borrow<Tensor>>(
&self,
bins: impl IntList,
range: impl DoubleList,
weight: Option<T>,
density: bool,
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg__histogramdd_bin_edges(
self.c_tensor,
bins.as_ptr(),
bins.len_i32(),
range.as_ptr(),
range.len_i32(),
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if density { 1 } else { 0 }
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_internal_histogramdd_bin_edges_out<T: Borrow<Tensor>>(
&self,
out: &[T],
bins: impl IntList,
range: impl DoubleList,
weight: Option<T>,
density: bool,
) -> Result<(), TchError> {
unsafe_torch_err!(atg__histogramdd_bin_edges_out(
ptr_list(out).as_ptr(),
out.len() as i32,
self.c_tensor,
bins.as_ptr(),
bins.len_i32(),
range.as_ptr(),
range.len_i32(),
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if density { 1 } else { 0 }
));
Ok(())
}
pub fn f_internal_histogramdd_from_bin_cts<T: Borrow<Tensor>>(
&self,
bins: impl IntList,
range: impl DoubleList,
weight: Option<T>,
density: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__histogramdd_from_bin_cts(
c_tensors.as_mut_ptr(),
self.c_tensor,
bins.as_ptr(),
bins.len_i32(),
range.as_ptr(),
range.len_i32(),
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if density { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_histogramdd_from_bin_cts_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
bins: impl IntList,
range: impl DoubleList,
weight: Option<T>,
density: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__histogramdd_from_bin_cts_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
bins.as_ptr(),
bins.len_i32(),
range.as_ptr(),
range.len_i32(),
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if density { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_histogramdd_from_bin_tensors<T: Borrow<Tensor>>(
&self,
bins: &[T],
weight: Option<T>,
density: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__histogramdd_from_bin_tensors(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(bins).as_ptr(),
bins.len() as i32,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if density { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_histogramdd_from_bin_tensors_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
bins: &[T],
weight: Option<T>,
density: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__histogramdd_from_bin_tensors_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
ptr_list(bins).as_ptr(),
bins.len() as i32,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if density { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_index_put_impl<T: Borrow<Tensor>>(
&self,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
unsafe_: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__index_put_impl(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32,
values.c_tensor,
if accumulate { 1 } else { 0 },
if unsafe_ { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_index_put_impl_<T: Borrow<Tensor>>(
&mut self,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
unsafe_: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__index_put_impl_(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32,
values.c_tensor,
if accumulate { 1 } else { 0 },
if unsafe_ { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_index_put_impl_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
unsafe_: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__index_put_impl_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32,
values.c_tensor,
if accumulate { 1 } else { 0 },
if unsafe_ { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_indices(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__indices(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_indices_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__indices_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_indices_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__indices_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_int_mm(&self, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__int_mm(c_tensors.as_mut_ptr(), self.c_tensor, mat2.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_int_mm_out(&self, out: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__int_mm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_is_all_true(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__is_all_true(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_is_any_true(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__is_any_true(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_is_zerotensor(&self) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg__is_zerotensor(self.c_tensor));
Ok(return_ != 0)
}
pub fn f_internal_linalg_check_errors(
info: &Tensor,
api_name: &str,
is_matrix: bool,
) -> Result<(), TchError> {
unsafe_torch_err!(atg__linalg_check_errors(
info.c_tensor,
api_name.as_ptr(),
api_name.len() as i32,
if is_matrix { 1 } else { 0 }
));
Ok(())
}
pub fn f_internal_linalg_det(a: &Tensor) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__linalg_det(c_tensors.as_mut_ptr(), a.c_tensor));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_linalg_det_result(
result: &Tensor,
lu: &Tensor,
pivots: &Tensor,
a: &Tensor,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__linalg_det_result(
c_tensors.as_mut_ptr(),
result.c_tensor,
lu.c_tensor,
pivots.c_tensor,
a.c_tensor
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_linalg_eigh(
a: &Tensor,
uplo: &str,
compute_v: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__linalg_eigh(
c_tensors.as_mut_ptr(),
a.c_tensor,
uplo.as_ptr(),
uplo.len() as i32,
if compute_v { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_linalg_eigh_eigenvalues(
eigenvalues: &Tensor,
eigenvectors: &Tensor,
a: &Tensor,
uplo: &str,
compute_v: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__linalg_eigh_eigenvalues(
c_tensors.as_mut_ptr(),
eigenvalues.c_tensor,
eigenvectors.c_tensor,
a.c_tensor,
uplo.as_ptr(),
uplo.len() as i32,
if compute_v { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_linalg_slogdet(
a: &Tensor,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg__linalg_slogdet(c_tensors.as_mut_ptr(), a.c_tensor));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_internal_linalg_slogdet_sign(
sign: &Tensor,
logabsdet: &Tensor,
lu: &Tensor,
pivots: &Tensor,
a: &Tensor,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg__linalg_slogdet_sign(
c_tensors.as_mut_ptr(),
sign.c_tensor,
logabsdet.c_tensor,
lu.c_tensor,
pivots.c_tensor,
a.c_tensor
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_internal_linalg_solve_ex(
a: &Tensor,
b: &Tensor,
left: bool,
check_errors: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg__linalg_solve_ex(
c_tensors.as_mut_ptr(),
a.c_tensor,
b.c_tensor,
if left { 1 } else { 0 },
if check_errors { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_internal_linalg_solve_ex_result(
result: &Tensor,
lu: &Tensor,
pivots: &Tensor,
info: &Tensor,
a: &Tensor,
b: &Tensor,
left: bool,
check_errors: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg__linalg_solve_ex_result(
c_tensors.as_mut_ptr(),
result.c_tensor,
lu.c_tensor,
pivots.c_tensor,
info.c_tensor,
a.c_tensor,
b.c_tensor,
if left { 1 } else { 0 },
if check_errors { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_internal_linalg_svd(
a: &Tensor,
full_matrices: bool,
compute_uv: bool,
driver: &str,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__linalg_svd(
c_tensors.as_mut_ptr(),
a.c_tensor,
if full_matrices { 1 } else { 0 },
if compute_uv { 1 } else { 0 },
driver.as_ptr(),
driver.len() as i32
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_linalg_svd_u(
u: &Tensor,
s: &Tensor,
vh: &Tensor,
a: &Tensor,
full_matrices: bool,
compute_uv: bool,
driver: &str,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__linalg_svd_u(
c_tensors.as_mut_ptr(),
u.c_tensor,
s.c_tensor,
vh.c_tensor,
a.c_tensor,
if full_matrices { 1 } else { 0 },
if compute_uv { 1 } else { 0 },
driver.as_ptr(),
driver.len() as i32
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_log_softmax(
&self,
dim: i64,
half_to_float: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__log_softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if half_to_float { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_log_softmax_backward_data(
grad_output: &Tensor,
output: &Tensor,
dim: i64,
input_dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__log_softmax_backward_data(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor,
dim,
input_dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_log_softmax_backward_data_out(
out: &Tensor,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
input_dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__log_softmax_backward_data_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
output.c_tensor,
dim,
input_dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_log_softmax_out(
&self,
out: &Tensor,
dim: i64,
half_to_float: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__log_softmax_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
if half_to_float { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_logcumsumexp(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__logcumsumexp(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_logcumsumexp_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__logcumsumexp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_lstm_mps<T: Borrow<Tensor>>(
&self,
hx: &[T],
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 6];
unsafe_torch_err!(atg__lstm_mps(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(hx).as_ptr(),
hx.len() as i32,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
Tensor { c_tensor: c_tensors[4] },
Tensor { c_tensor: c_tensors[5] },
))
}
pub fn f_internal_lstm_mps_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
out4: &Tensor,
out5: &Tensor,
hx: &[T],
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 6];
unsafe_torch_err!(atg__lstm_mps_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
out3.c_tensor,
out4.c_tensor,
out5.c_tensor,
self.c_tensor,
ptr_list(hx).as_ptr(),
hx.len() as i32,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
Tensor { c_tensor: c_tensors[4] },
Tensor { c_tensor: c_tensors[5] },
))
}
pub fn f_internal_lu_with_info(
&self,
pivot: bool,
check_errors: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__lu_with_info(
c_tensors.as_mut_ptr(),
self.c_tensor,
if pivot { 1 } else { 0 },
if check_errors { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_make_dep_token(options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__make_dep_token(
c_tensors.as_mut_ptr(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_make_dual(
primal: &Tensor,
tangent: &Tensor,
level: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__make_dual(
c_tensors.as_mut_ptr(),
primal.c_tensor,
tangent.c_tensor,
level
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_make_dual_copy(
primal: &Tensor,
tangent: &Tensor,
level: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__make_dual_copy(
c_tensors.as_mut_ptr(),
primal.c_tensor,
tangent.c_tensor,
level
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_make_dual_copy_out(
out: &Tensor,
primal: &Tensor,
tangent: &Tensor,
level: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__make_dual_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
primal.c_tensor,
tangent.c_tensor,
level
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_make_per_channel_quantized_tensor(
&self,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__make_per_channel_quantized_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
axis
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_make_per_channel_quantized_tensor_out(
&self,
out: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__make_per_channel_quantized_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
axis
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_make_per_tensor_quantized_tensor(
&self,
scale: f64,
zero_point: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__make_per_tensor_quantized_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale,
zero_point
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_make_per_tensor_quantized_tensor_out(
&self,
out: &Tensor,
scale: f64,
zero_point: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__make_per_tensor_quantized_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
scale,
zero_point
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_masked_scale(&self, mask: &Tensor, scale: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__masked_scale(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
scale
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_masked_scale_out(
&self,
out: &Tensor,
mask: &Tensor,
scale: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__masked_scale_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mask.c_tensor,
scale
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_masked_softmax(
&self,
mask: &Tensor,
dim: impl Into<Option<i64>>,
mask_type: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mask_type = mask_type.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__masked_softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
mask_type.unwrap_or(0i64),
mask_type.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_masked_softmax_backward(
grad_output: &Tensor,
output: &Tensor,
mask: &Tensor,
dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__masked_softmax_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor,
mask.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_masked_softmax_backward_out(
out: &Tensor,
grad_output: &Tensor,
output: &Tensor,
mask: &Tensor,
dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__masked_softmax_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
output.c_tensor,
mask.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_masked_softmax_out(
&self,
out: &Tensor,
mask: &Tensor,
dim: impl Into<Option<i64>>,
mask_type: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mask_type = mask_type.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__masked_softmax_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mask.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
mask_type.unwrap_or(0i64),
mask_type.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_mkldnn_reshape(&self, shape: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__mkldnn_reshape(
c_tensors.as_mut_ptr(),
self.c_tensor,
shape.as_ptr(),
shape.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_mkldnn_reshape_out(
&self,
out: &Tensor,
shape: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__mkldnn_reshape_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
shape.as_ptr(),
shape.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_mkldnn_transpose(&self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__mkldnn_transpose(c_tensors.as_mut_ptr(), self.c_tensor, dim0, dim1));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_mkldnn_transpose_(
&mut self,
dim0: i64,
dim1: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__mkldnn_transpose_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim0,
dim1
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_mkldnn_transpose_out(
&self,
out: &Tensor,
dim0: i64,
dim1: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__mkldnn_transpose_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim0,
dim1
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_mps_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__mps_convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_mps_convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__mps_convolution_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_mps_convolution_transpose(
&self,
weight: &Tensor,
padding: impl IntList,
output_padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__mps_convolution_transpose(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len_i32(),
output_padding.as_ptr(),
output_padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_mps_convolution_transpose_out(
&self,
out: &Tensor,
weight: &Tensor,
padding: impl IntList,
output_padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__mps_convolution_transpose_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len_i32(),
output_padding.as_ptr(),
output_padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_native_batch_norm_legit<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: &Tensor,
running_var: &Tensor,
training: bool,
momentum: f64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__native_batch_norm_legit(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.c_tensor,
running_var.c_tensor,
if training { 1 } else { 0 },
momentum,
eps
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_native_batch_norm_legit_functional<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: &Tensor,
running_var: &Tensor,
training: bool,
momentum: f64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 5];
unsafe_torch_err!(atg__native_batch_norm_legit_functional(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.c_tensor,
running_var.c_tensor,
if training { 1 } else { 0 },
momentum,
eps
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
Tensor { c_tensor: c_tensors[4] },
))
}
pub fn f_internal_native_batch_norm_legit_no_stats<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
training: bool,
momentum: f64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__native_batch_norm_legit_no_stats(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
momentum,
eps
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_native_batch_norm_legit_no_stats_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
save_mean: &Tensor,
save_invstd: &Tensor,
weight: Option<T>,
bias: Option<T>,
training: bool,
momentum: f64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__native_batch_norm_legit_no_stats_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
save_mean.c_tensor,
save_invstd.c_tensor,
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
momentum,
eps
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_native_batch_norm_legit_no_training<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: &Tensor,
running_var: &Tensor,
momentum: f64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__native_batch_norm_legit_no_training(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.c_tensor,
running_var.c_tensor,
momentum,
eps
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_native_batch_norm_legit_no_training_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
weight: Option<T>,
bias: Option<T>,
running_mean: &Tensor,
running_var: &Tensor,
momentum: f64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__native_batch_norm_legit_no_training_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.c_tensor,
running_var.c_tensor,
momentum,
eps
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_native_batch_norm_legit_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
save_mean: &Tensor,
save_invstd: &Tensor,
weight: Option<T>,
bias: Option<T>,
running_mean: &Tensor,
running_var: &Tensor,
training: bool,
momentum: f64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__native_batch_norm_legit_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
save_mean.c_tensor,
save_invstd.c_tensor,
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.c_tensor,
running_var.c_tensor,
if training { 1 } else { 0 },
momentum,
eps
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_native_multi_head_attention<T: Borrow<Tensor>>(
query: &Tensor,
key: &Tensor,
value: &Tensor,
embed_dim: i64,
num_head: i64,
qkv_weight: &Tensor,
qkv_bias: &Tensor,
proj_weight: &Tensor,
proj_bias: &Tensor,
mask: Option<T>,
need_weights: bool,
average_attn_weights: bool,
mask_type: impl Into<Option<i64>>,
) -> Result<(Tensor, Tensor), TchError> {
let mask_type = mask_type.into();
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__native_multi_head_attention(
c_tensors.as_mut_ptr(),
query.c_tensor,
key.c_tensor,
value.c_tensor,
embed_dim,
num_head,
qkv_weight.c_tensor,
qkv_bias.c_tensor,
proj_weight.c_tensor,
proj_bias.c_tensor,
mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if need_weights { 1 } else { 0 },
if average_attn_weights { 1 } else { 0 },
mask_type.unwrap_or(0i64),
mask_type.is_none() as i8
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_native_multi_head_attention_out<T: Borrow<Tensor>>(
out0: &Tensor,
out1: &Tensor,
query: &Tensor,
key: &Tensor,
value: &Tensor,
embed_dim: i64,
num_head: i64,
qkv_weight: &Tensor,
qkv_bias: &Tensor,
proj_weight: &Tensor,
proj_bias: &Tensor,
mask: Option<T>,
need_weights: bool,
average_attn_weights: bool,
mask_type: impl Into<Option<i64>>,
) -> Result<(Tensor, Tensor), TchError> {
let mask_type = mask_type.into();
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__native_multi_head_attention_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
query.c_tensor,
key.c_tensor,
value.c_tensor,
embed_dim,
num_head,
qkv_weight.c_tensor,
qkv_bias.c_tensor,
proj_weight.c_tensor,
proj_bias.c_tensor,
mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if need_weights { 1 } else { 0 },
if average_attn_weights { 1 } else { 0 },
mask_type.unwrap_or(0i64),
mask_type.is_none() as i8
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_neg_view(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__neg_view(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_neg_view_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__neg_view_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_neg_view_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__neg_view_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_nested_from_padded(
padded: &Tensor,
cpu_nested_shape_example: &Tensor,
fuse_transform_0213: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nested_from_padded(
c_tensors.as_mut_ptr(),
padded.c_tensor,
cpu_nested_shape_example.c_tensor,
if fuse_transform_0213 { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_nested_from_padded_and_nested_example(
padded: &Tensor,
nt_example: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nested_from_padded_and_nested_example(
c_tensors.as_mut_ptr(),
padded.c_tensor,
nt_example.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_nested_from_padded_and_nested_example_out(
out: &Tensor,
padded: &Tensor,
nt_example: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nested_from_padded_and_nested_example_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
padded.c_tensor,
nt_example.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_nested_from_padded_out(
out: &Tensor,
padded: &Tensor,
cpu_nested_shape_example: &Tensor,
fuse_transform_0213: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nested_from_padded_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
padded.c_tensor,
cpu_nested_shape_example.c_tensor,
if fuse_transform_0213 { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_nested_select_backward(
&self,
grad_output: &Tensor,
dim: i64,
index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nested_select_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
dim,
index
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_nested_sum_backward(
&self,
grad: &Tensor,
dim: impl IntListOption,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nested_sum_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_nested_view_from_buffer(
&self,
nested_size: &Tensor,
nested_strides: &Tensor,
offsets: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nested_view_from_buffer(
c_tensors.as_mut_ptr(),
self.c_tensor,
nested_size.c_tensor,
nested_strides.c_tensor,
offsets.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_nested_view_from_buffer_copy(
&self,
nested_size: &Tensor,
nested_strides: &Tensor,
offsets: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nested_view_from_buffer_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
nested_size.c_tensor,
nested_strides.c_tensor,
offsets.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_nested_view_from_buffer_copy_out(
&self,
out: &Tensor,
nested_size: &Tensor,
nested_strides: &Tensor,
offsets: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nested_view_from_buffer_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
nested_size.c_tensor,
nested_strides.c_tensor,
offsets.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_new_zeros_with_same_feature_meta(
&self,
other: &Tensor,
self_num_batch_dims: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__new_zeros_with_same_feature_meta(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
self_num_batch_dims
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_new_zeros_with_same_feature_meta_out(
&self,
out: &Tensor,
other: &Tensor,
self_num_batch_dims: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__new_zeros_with_same_feature_meta_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor,
self_num_batch_dims
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_nnpack_available() -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg__nnpack_available());
Ok(return_ != 0)
}
pub fn f_internal_nnpack_spatial_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nnpack_spatial_convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_nnpack_spatial_convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nnpack_spatial_convolution_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_nnz(&self) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg__nnz(self.c_tensor));
Ok(return_)
}
pub fn f_internal_pack_padded_sequence(
&self,
lengths: &Tensor,
batch_first: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__pack_padded_sequence(
c_tensors.as_mut_ptr(),
self.c_tensor,
lengths.c_tensor,
if batch_first { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_pack_padded_sequence_backward(
grad: &Tensor,
input_size: impl IntList,
batch_sizes: &Tensor,
batch_first: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__pack_padded_sequence_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
input_size.as_ptr(),
input_size.len_i32(),
batch_sizes.c_tensor,
if batch_first { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_pack_padded_sequence_out(
&self,
out0: &Tensor,
out1: &Tensor,
lengths: &Tensor,
batch_first: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__pack_padded_sequence_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
lengths.c_tensor,
if batch_first { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_pad_circular(&self, pad: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__pad_circular(
c_tensors.as_mut_ptr(),
self.c_tensor,
pad.as_ptr(),
pad.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_pad_enum(
&self,
pad: impl IntList,
mode: i64,
value: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let value = value.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__pad_enum(
c_tensors.as_mut_ptr(),
self.c_tensor,
pad.as_ptr(),
pad.len_i32(),
mode,
value.unwrap_or(std::f64::NAN),
value.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_pad_packed_sequence<S: Into<Scalar>>(
data: &Tensor,
batch_sizes: &Tensor,
batch_first: bool,
padding_value: S,
total_length: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__pad_packed_sequence(
c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
if batch_first { 1 } else { 0 },
padding_value.into().c_scalar,
total_length
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_pdist_backward(
&self,
grad: &Tensor,
p: f64,
pdist: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__pdist_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
p,
pdist.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_pdist_backward_out(
&self,
out: &Tensor,
grad: &Tensor,
p: f64,
pdist: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__pdist_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad.c_tensor,
self.c_tensor,
p,
pdist.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_pin_memory(&self, device: Device) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__pin_memory(c_tensors.as_mut_ptr(), self.c_tensor, device.c_int()));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_pin_memory_out(
&self,
out: &Tensor,
device: Device,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__pin_memory_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
device.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_prelu_kernel(&self, weight: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__prelu_kernel(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_prelu_kernel_backward(
&self,
grad_output: &Tensor,
weight: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__prelu_kernel_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
weight.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_propagate_xla_data(&self, output: &Tensor) -> Result<(), TchError> {
unsafe_torch_err!(atg__propagate_xla_data(self.c_tensor, output.c_tensor));
Ok(())
}
pub fn f_internal_remove_batch_dim(
&self,
level: i64,
batch_size: i64,
out_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__remove_batch_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
level,
batch_size,
out_dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_reshape_alias(
&self,
size: impl IntList,
stride: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__reshape_alias(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_reshape_alias_copy(
&self,
size: impl IntList,
stride: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__reshape_alias_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_reshape_alias_copy_out(
&self,
out: &Tensor,
size: impl IntList,
stride: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__reshape_alias_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_reshape_copy(&self, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__reshape_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_reshape_from_tensor(&self, shape: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__reshape_from_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
shape.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_resize_output(
&self,
size: impl IntList,
device: Device,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__resize_output(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
device.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_resize_output_(
&mut self,
size: impl IntList,
device: Device,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__resize_output_(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
device.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_resize_output_out(
&self,
out: &Tensor,
size: impl IntList,
device: Device,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__resize_output_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32(),
device.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_rowwise_prune(
weight: &Tensor,
mask: &Tensor,
compressed_indices_dtype: Kind,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__rowwise_prune(
c_tensors.as_mut_ptr(),
weight.c_tensor,
mask.c_tensor,
compressed_indices_dtype.c_int()
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_sample_dirichlet(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sample_dirichlet(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sample_dirichlet_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sample_dirichlet_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_saturate_weight_to_fp16(weight: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__saturate_weight_to_fp16(c_tensors.as_mut_ptr(), weight.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_scaled_dot_product_attention_math<T: Borrow<Tensor>>(
query: &Tensor,
key: &Tensor,
value: &Tensor,
attn_mask: Option<T>,
dropout_p: f64,
is_causal: bool,
dropout_mask: Option<T>,
scale: impl Into<Option<f64>>,
) -> Result<(Tensor, Tensor), TchError> {
let scale = scale.into();
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__scaled_dot_product_attention_math(
c_tensors.as_mut_ptr(),
query.c_tensor,
key.c_tensor,
value.c_tensor,
attn_mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
dropout_p,
if is_causal { 1 } else { 0 },
dropout_mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
scale.unwrap_or(std::f64::NAN),
scale.is_none() as i8
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_scaled_dot_product_efficient_attention<T: Borrow<Tensor>>(
query: &Tensor,
key: &Tensor,
value: &Tensor,
attn_bias: Option<T>,
compute_log_sumexp: bool,
dropout_p: f64,
is_causal: bool,
scale: impl Into<Option<f64>>,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let scale = scale.into();
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg__scaled_dot_product_efficient_attention(
c_tensors.as_mut_ptr(),
query.c_tensor,
key.c_tensor,
value.c_tensor,
attn_bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if compute_log_sumexp { 1 } else { 0 },
dropout_p,
if is_causal { 1 } else { 0 },
scale.unwrap_or(std::f64::NAN),
scale.is_none() as i8
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_internal_scaled_dot_product_flash_attention_backward(
grad_out: &Tensor,
query: &Tensor,
key: &Tensor,
value: &Tensor,
out: &Tensor,
logsumexp: &Tensor,
cum_seq_q: &Tensor,
cum_seq_k: &Tensor,
max_q: i64,
max_k: i64,
dropout_p: f64,
is_causal: bool,
philox_seed: &Tensor,
philox_offset: &Tensor,
scale: impl Into<Option<f64>>,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let scale = scale.into();
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__scaled_dot_product_flash_attention_backward(
c_tensors.as_mut_ptr(),
grad_out.c_tensor,
query.c_tensor,
key.c_tensor,
value.c_tensor,
out.c_tensor,
logsumexp.c_tensor,
cum_seq_q.c_tensor,
cum_seq_k.c_tensor,
max_q,
max_k,
dropout_p,
if is_causal { 1 } else { 0 },
philox_seed.c_tensor,
philox_offset.c_tensor,
scale.unwrap_or(std::f64::NAN),
scale.is_none() as i8
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_scaled_mm<T: Borrow<Tensor>>(
&self,
mat2: &Tensor,
bias: Option<T>,
out_dtype: impl Into<Option<Kind>>,
scale_a: Option<T>,
scale_b: Option<T>,
scale_result: Option<T>,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__scaled_mm(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat2.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
out_dtype.into().map_or(-1, |s| s.c_int()),
scale_a.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
scale_b.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
scale_result.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_scaled_mm_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
out_amax: &Tensor,
mat2: &Tensor,
bias: Option<T>,
out_dtype: impl Into<Option<Kind>>,
scale_a: Option<T>,
scale_b: Option<T>,
scale_result: Option<T>,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__scaled_mm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
out_amax.c_tensor,
self.c_tensor,
mat2.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
out_dtype.into().map_or(-1, |s| s.c_int()),
scale_a.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
scale_b.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
scale_result.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_scatter_reduce(
&self,
dim: i64,
index: &Tensor,
src: &Tensor,
reduce: &str,
include_self: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__scatter_reduce(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor,
reduce.as_ptr(),
reduce.len() as i32,
if include_self { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_scatter_reduce_(
&mut self,
dim: i64,
index: &Tensor,
src: &Tensor,
reduce: &str,
include_self: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__scatter_reduce_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor,
reduce.as_ptr(),
reduce.len() as i32,
if include_self { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_scatter_reduce_two_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
src: &Tensor,
reduce: &str,
include_self: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__scatter_reduce_two_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor,
reduce.as_ptr(),
reduce.len() as i32,
if include_self { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_segment_reduce_backward<T: Borrow<Tensor>, S: Into<Scalar>>(
grad: &Tensor,
output: &Tensor,
data: &Tensor,
reduce: &str,
lengths: Option<T>,
offsets: Option<T>,
axis: i64,
initial: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__segment_reduce_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
output.c_tensor,
data.c_tensor,
reduce.as_ptr(),
reduce.len() as i32,
lengths.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
offsets.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
axis,
initial.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_segment_reduce_backward_out<T: Borrow<Tensor>, S: Into<Scalar>>(
out: &Tensor,
grad: &Tensor,
output: &Tensor,
data: &Tensor,
reduce: &str,
lengths: Option<T>,
offsets: Option<T>,
axis: i64,
initial: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__segment_reduce_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad.c_tensor,
output.c_tensor,
data.c_tensor,
reduce.as_ptr(),
reduce.len() as i32,
lengths.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
offsets.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
axis,
initial.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_shape_as_tensor(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__shape_as_tensor(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_slow_conv2d_backward(
&self,
grad_input: &Tensor,
grad_weight: &Tensor,
grad_bias: &Tensor,
grad_output: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__slow_conv2d_backward(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_weight.c_tensor,
grad_bias.c_tensor,
grad_output.c_tensor,
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32()
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_sobol_engine_draw(
quasi: &Tensor,
n: i64,
sobolstate: &Tensor,
dimension: i64,
num_generated: i64,
dtype: impl Into<Option<Kind>>,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__sobol_engine_draw(
c_tensors.as_mut_ptr(),
quasi.c_tensor,
n,
sobolstate.c_tensor,
dimension,
num_generated,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_sobol_engine_ff_(
&mut self,
n: i64,
sobolstate: &Tensor,
dimension: i64,
num_generated: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sobol_engine_ff_(
c_tensors.as_mut_ptr(),
self.c_tensor,
n,
sobolstate.c_tensor,
dimension,
num_generated
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sobol_engine_initialize_state_(
&mut self,
dimension: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sobol_engine_initialize_state_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dimension
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sobol_engine_scramble_(
&mut self,
ltm: &Tensor,
dimension: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sobol_engine_scramble_(
c_tensors.as_mut_ptr(),
self.c_tensor,
ltm.c_tensor,
dimension
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_softmax(&self, dim: i64, half_to_float: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if half_to_float { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_softmax_backward_data(
grad_output: &Tensor,
output: &Tensor,
dim: i64,
input_dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__softmax_backward_data(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor,
dim,
input_dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_softmax_backward_data_out(
grad_input: &Tensor,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
input_dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__softmax_backward_data_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output.c_tensor,
dim,
input_dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_softmax_out(
&self,
out: &Tensor,
dim: i64,
half_to_float: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__softmax_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
if half_to_float { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_addmm(
&self,
mat1: &Tensor,
mat2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_addmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_addmm_out(
&self,
out: &Tensor,
mat1: &Tensor,
mat2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_addmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_broadcast_to(&self, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_broadcast_to(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_broadcast_to_copy(
&self,
size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_broadcast_to_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_broadcast_to_copy_out(
&self,
out: &Tensor,
size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_broadcast_to_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_bsc_tensor_unsafe(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_bsc_tensor_unsafe(
c_tensors.as_mut_ptr(),
ccol_indices.c_tensor,
row_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_bsr_tensor_unsafe(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_bsr_tensor_unsafe(
c_tensors.as_mut_ptr(),
crow_indices.c_tensor,
col_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_compressed_tensor_unsafe(
compressed_indices: &Tensor,
plain_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_compressed_tensor_unsafe(
c_tensors.as_mut_ptr(),
compressed_indices.c_tensor,
plain_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_coo_tensor_unsafe(
indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
is_coalesced: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_coo_tensor_unsafe(
c_tensors.as_mut_ptr(),
indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int(),
if is_coalesced { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_coo_tensor_with_dims(
sparse_dim: i64,
dense_dim: i64,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_coo_tensor_with_dims(
c_tensors.as_mut_ptr(),
sparse_dim,
dense_dim,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_coo_tensor_with_dims_and_tensors(
sparse_dim: i64,
dense_dim: i64,
size: impl IntList,
indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
is_coalesced: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_coo_tensor_with_dims_and_tensors(
c_tensors.as_mut_ptr(),
sparse_dim,
dense_dim,
size.as_ptr(),
size.len_i32(),
indices.c_tensor,
values.c_tensor,
options.0.c_int(),
options.1.c_int(),
if is_coalesced { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_coo_tensor_with_dims_and_tensors_out(
out: &Tensor,
sparse_dim: i64,
dense_dim: i64,
size: impl IntList,
indices: &Tensor,
values: &Tensor,
is_coalesced: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_coo_tensor_with_dims_and_tensors_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
sparse_dim,
dense_dim,
size.as_ptr(),
size.len_i32(),
indices.c_tensor,
values.c_tensor,
if is_coalesced { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_coo_tensor_with_dims_out(
out: &Tensor,
sparse_dim: i64,
dense_dim: i64,
size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_coo_tensor_with_dims_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
sparse_dim,
dense_dim,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_csc_tensor_unsafe(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_csc_tensor_unsafe(
c_tensors.as_mut_ptr(),
ccol_indices.c_tensor,
row_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_csr_prod(
&self,
dim: impl IntList,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_csr_prod(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_csr_prod_dim_dtype_out(
&self,
out: &Tensor,
dim: impl IntList,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_csr_prod_dim_dtype_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_csr_sum(
&self,
dim: impl IntList,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_csr_sum(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_csr_sum_dim_dtype_out(
&self,
out: &Tensor,
dim: impl IntList,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_csr_sum_dim_dtype_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_csr_tensor_unsafe(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_csr_tensor_unsafe(
c_tensors.as_mut_ptr(),
crow_indices.c_tensor,
col_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_log_softmax(
&self,
dim: i64,
half_to_float: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_log_softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if half_to_float { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_log_softmax_backward_data(
&self,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_log_softmax_backward_data(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor,
dim,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_log_softmax_backward_data_out(
&self,
out: &Tensor,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_log_softmax_backward_data_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
output.c_tensor,
dim,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_log_softmax_int(
&self,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_log_softmax_int(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_log_softmax_out(
&self,
out: &Tensor,
dim: i64,
half_to_float: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_log_softmax_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
if half_to_float { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_mask_projection(
&self,
mask: &Tensor,
accumulate_matches: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_mask_projection(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
if accumulate_matches { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_mask_projection_out(
&self,
out: &Tensor,
mask: &Tensor,
accumulate_matches: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_mask_projection_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mask.c_tensor,
if accumulate_matches { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_mm(sparse: &Tensor, dense: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_mm(c_tensors.as_mut_ptr(), sparse.c_tensor, dense.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_mm_reduce(
sparse: &Tensor,
dense: &Tensor,
reduce: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_mm_reduce(
c_tensors.as_mut_ptr(),
sparse.c_tensor,
dense.c_tensor,
reduce.as_ptr(),
reduce.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_mm_reduce_impl(
&self,
other: &Tensor,
reduce: &str,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__sparse_mm_reduce_impl(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
reduce.as_ptr(),
reduce.len() as i32
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_sparse_semi_structured_linear<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
meta: &Tensor,
bias: Option<T>,
activation: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_semi_structured_linear(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
meta.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
activation.as_ptr(),
activation.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_softmax(
&self,
dim: i64,
half_to_float: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if half_to_float { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_softmax_backward_data(
&self,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_softmax_backward_data(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor,
dim,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_softmax_backward_data_out(
&self,
out: &Tensor,
grad_output: &Tensor,
output: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_softmax_backward_data_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
output.c_tensor,
dim,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_softmax_int(
&self,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_softmax_int(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_softmax_out(
&self,
out: &Tensor,
dim: i64,
half_to_float: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_softmax_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
if half_to_float { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_sparse_matmul(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sparse_matmul(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_sparse_matmul_out(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sparse_matmul_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_sum(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sum(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_sum_backward(
&self,
grad: &Tensor,
dim: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sum_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_sum_backward_out(
&self,
out: &Tensor,
grad: &Tensor,
dim: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sum_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_sum_dim(&self, dim: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sum_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_sum_dim_dtype(
&self,
dim: impl IntList,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sum_dim_dtype(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_sum_dim_out(
&self,
out: &Tensor,
dim: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sum_dim_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_sparse_sum_dtype(&self, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_sum_dtype(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_spdiags(
diagonals: &Tensor,
offsets: &Tensor,
shape: impl IntList,
layout: Option<Layout>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__spdiags(
c_tensors.as_mut_ptr(),
diagonals.c_tensor,
offsets.c_tensor,
shape.as_ptr(),
shape.len_i32(),
layout.map_or(-1, |s| s.to_i8())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_spdiags_out(
out: &Tensor,
diagonals: &Tensor,
offsets: &Tensor,
shape: impl IntList,
layout: Option<Layout>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__spdiags_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
diagonals.c_tensor,
offsets.c_tensor,
shape.as_ptr(),
shape.len_i32(),
layout.map_or(-1, |s| s.to_i8())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_stack<T: Borrow<Tensor>>(
tensors: &[T],
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__stack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_stack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__stack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_standard_gamma(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__standard_gamma(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_standard_gamma_grad(&self, output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__standard_gamma_grad(
c_tensors.as_mut_ptr(),
self.c_tensor,
output.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_standard_gamma_grad_out(
&self,
out: &Tensor,
output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__standard_gamma_grad_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_standard_gamma_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__standard_gamma_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_ambiguous_defaults(
dummy: &Tensor,
a: i64,
b: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_ambiguous_defaults(
c_tensors.as_mut_ptr(),
dummy.c_tensor,
a,
b
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_ambiguous_defaults_b(
dummy: &Tensor,
a: i64,
b: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_ambiguous_defaults_b(
c_tensors.as_mut_ptr(),
dummy.c_tensor,
a,
b.as_ptr(),
b.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_autograd_multiple_dispatch(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_autograd_multiple_dispatch(
c_tensors.as_mut_ptr(),
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_autograd_multiple_dispatch_fullcoverage_out(
&self,
out: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_autograd_multiple_dispatch_fullcoverage_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_autograd_multiple_dispatch_ntonly(
&self,
b: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_autograd_multiple_dispatch_ntonly(
c_tensors.as_mut_ptr(),
self.c_tensor,
if b { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_autograd_multiple_dispatch_view(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_autograd_multiple_dispatch_view(
c_tensors.as_mut_ptr(),
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_autograd_multiple_dispatch_view_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_autograd_multiple_dispatch_view_copy(
c_tensors.as_mut_ptr(),
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_autograd_multiple_dispatch_view_copy_out(
&self,
out: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_autograd_multiple_dispatch_view_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_check_tensor(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_check_tensor(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_functorch_fallback(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_functorch_fallback(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_functorch_fallback_out(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_functorch_fallback_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_optional_filled_intlist(
values: &Tensor,
addends: impl IntListOption,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_optional_filled_intlist(
c_tensors.as_mut_ptr(),
values.c_tensor,
addends.as_ptr(),
addends.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_optional_filled_intlist_out(
out: &Tensor,
values: &Tensor,
addends: impl IntListOption,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_optional_filled_intlist_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
values.c_tensor,
addends.as_ptr(),
addends.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_optional_floatlist(
values: &Tensor,
addends: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_optional_floatlist(
c_tensors.as_mut_ptr(),
values.c_tensor,
addends.as_ptr(),
addends.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_optional_floatlist_out(
out: &Tensor,
values: &Tensor,
addends: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_optional_floatlist_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
values.c_tensor,
addends.as_ptr(),
addends.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_optional_intlist(
values: &Tensor,
addends: impl IntListOption,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_optional_intlist(
c_tensors.as_mut_ptr(),
values.c_tensor,
addends.as_ptr(),
addends.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_optional_intlist_out(
out: &Tensor,
values: &Tensor,
addends: impl IntListOption,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_optional_intlist_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
values.c_tensor,
addends.as_ptr(),
addends.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_serialization_subcmul(
&self,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_serialization_subcmul(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_string_default(
dummy: &Tensor,
a: &str,
b: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_string_default(
c_tensors.as_mut_ptr(),
dummy.c_tensor,
a.as_ptr(),
a.len() as i32,
b.as_ptr(),
b.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_warn_in_autograd(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_warn_in_autograd(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_test_warn_in_autograd_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__test_warn_in_autograd_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_copy(
&self,
options: (Kind, Device),
non_blocking: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
options.0.c_int(),
options.1.c_int(),
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_copy_out(
&self,
out: &Tensor,
non_blocking: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_cpu<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
let c_tensors =
unsafe_torch_err!(atg__to_cpu(ptr_list(tensors).as_ptr(), tensors.len() as i32));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_internal_to_dense(
&self,
dtype: impl Into<Option<Kind>>,
masked_grad: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_dense(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.into().map_or(-1, |s| s.c_int()),
if masked_grad { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_dense_out(
&self,
out: &Tensor,
dtype: impl Into<Option<Kind>>,
masked_grad: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_dense_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dtype.into().map_or(-1, |s| s.c_int()),
if masked_grad { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_sparse(
&self,
layout: Option<Layout>,
blocksize: impl IntListOption,
dense_dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_sparse(
c_tensors.as_mut_ptr(),
self.c_tensor,
layout.map_or(-1, |s| s.to_i8()),
blocksize.as_ptr(),
blocksize.len_i32(),
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_sparse_bsc(
&self,
blocksize: impl IntList,
dense_dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_sparse_bsc(
c_tensors.as_mut_ptr(),
self.c_tensor,
blocksize.as_ptr(),
blocksize.len_i32(),
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_sparse_bsc_out(
&self,
out: &Tensor,
blocksize: impl IntList,
dense_dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_sparse_bsc_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
blocksize.as_ptr(),
blocksize.len_i32(),
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_sparse_bsr(
&self,
blocksize: impl IntList,
dense_dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_sparse_bsr(
c_tensors.as_mut_ptr(),
self.c_tensor,
blocksize.as_ptr(),
blocksize.len_i32(),
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_sparse_bsr_out(
&self,
out: &Tensor,
blocksize: impl IntList,
dense_dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_sparse_bsr_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
blocksize.as_ptr(),
blocksize.len_i32(),
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_sparse_csc(
&self,
dense_dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_sparse_csc(
c_tensors.as_mut_ptr(),
self.c_tensor,
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_sparse_csc_out(
&self,
out: &Tensor,
dense_dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_sparse_csc_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_sparse_csr(
&self,
dense_dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_sparse_csr(
c_tensors.as_mut_ptr(),
self.c_tensor,
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_sparse_csr_out(
&self,
out: &Tensor,
dense_dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_sparse_csr_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_sparse_out(
&self,
out: &Tensor,
layout: Option<Layout>,
blocksize: impl IntListOption,
dense_dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_sparse_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
layout.map_or(-1, |s| s.to_i8()),
blocksize.as_ptr(),
blocksize.len_i32(),
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_sparse_semi_structured(
dense: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__to_sparse_semi_structured(c_tensors.as_mut_ptr(), dense.c_tensor));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_to_sparse_sparse_dim(&self, sparse_dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_sparse_sparse_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
sparse_dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_to_sparse_sparse_dim_out(
&self,
out: &Tensor,
sparse_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__to_sparse_sparse_dim_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
sparse_dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_transform_bias_rescale_qkv(
qkv: &Tensor,
qkv_bias: &Tensor,
num_heads: i64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__transform_bias_rescale_qkv(
c_tensors.as_mut_ptr(),
qkv.c_tensor,
qkv_bias.c_tensor,
num_heads
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_transform_bias_rescale_qkv_out(
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
qkv: &Tensor,
qkv_bias: &Tensor,
num_heads: i64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__transform_bias_rescale_qkv_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
qkv.c_tensor,
qkv_bias.c_tensor,
num_heads
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_transformer_encoder_layer_fwd<T: Borrow<Tensor>>(
src: &Tensor,
embed_dim: i64,
num_heads: i64,
qkv_weight: &Tensor,
qkv_bias: &Tensor,
proj_weight: &Tensor,
proj_bias: &Tensor,
use_gelu: bool,
norm_first: bool,
eps: f64,
norm_weight_1: &Tensor,
norm_bias_1: &Tensor,
norm_weight_2: &Tensor,
norm_bias_2: &Tensor,
ffn_weight_1: &Tensor,
ffn_bias_1: &Tensor,
ffn_weight_2: &Tensor,
ffn_bias_2: &Tensor,
mask: Option<T>,
mask_type: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let mask_type = mask_type.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__transformer_encoder_layer_fwd(
c_tensors.as_mut_ptr(),
src.c_tensor,
embed_dim,
num_heads,
qkv_weight.c_tensor,
qkv_bias.c_tensor,
proj_weight.c_tensor,
proj_bias.c_tensor,
if use_gelu { 1 } else { 0 },
if norm_first { 1 } else { 0 },
eps,
norm_weight_1.c_tensor,
norm_bias_1.c_tensor,
norm_weight_2.c_tensor,
norm_bias_2.c_tensor,
ffn_weight_1.c_tensor,
ffn_bias_1.c_tensor,
ffn_weight_2.c_tensor,
ffn_bias_2.c_tensor,
mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mask_type.unwrap_or(0i64),
mask_type.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_transformer_encoder_layer_fwd_out<T: Borrow<Tensor>>(
out: &Tensor,
src: &Tensor,
embed_dim: i64,
num_heads: i64,
qkv_weight: &Tensor,
qkv_bias: &Tensor,
proj_weight: &Tensor,
proj_bias: &Tensor,
use_gelu: bool,
norm_first: bool,
eps: f64,
norm_weight_1: &Tensor,
norm_bias_1: &Tensor,
norm_weight_2: &Tensor,
norm_bias_2: &Tensor,
ffn_weight_1: &Tensor,
ffn_bias_1: &Tensor,
ffn_weight_2: &Tensor,
ffn_bias_2: &Tensor,
mask: Option<T>,
mask_type: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let mask_type = mask_type.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__transformer_encoder_layer_fwd_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
src.c_tensor,
embed_dim,
num_heads,
qkv_weight.c_tensor,
qkv_bias.c_tensor,
proj_weight.c_tensor,
proj_bias.c_tensor,
if use_gelu { 1 } else { 0 },
if norm_first { 1 } else { 0 },
eps,
norm_weight_1.c_tensor,
norm_bias_1.c_tensor,
norm_weight_2.c_tensor,
norm_bias_2.c_tensor,
ffn_weight_1.c_tensor,
ffn_bias_1.c_tensor,
ffn_weight_2.c_tensor,
ffn_bias_2.c_tensor,
mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mask_type.unwrap_or(0i64),
mask_type.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_trilinear(
i1: &Tensor,
i2: &Tensor,
i3: &Tensor,
expand1: impl IntList,
expand2: impl IntList,
expand3: impl IntList,
sumdim: impl IntList,
unroll_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__trilinear(
c_tensors.as_mut_ptr(),
i1.c_tensor,
i2.c_tensor,
i3.c_tensor,
expand1.as_ptr(),
expand1.len_i32(),
expand2.as_ptr(),
expand2.len_i32(),
expand3.as_ptr(),
expand3.len_i32(),
sumdim.as_ptr(),
sumdim.len_i32(),
unroll_dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_trilinear_out(
out: &Tensor,
i1: &Tensor,
i2: &Tensor,
i3: &Tensor,
expand1: impl IntList,
expand2: impl IntList,
expand3: impl IntList,
sumdim: impl IntList,
unroll_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__trilinear_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
i1.c_tensor,
i2.c_tensor,
i3.c_tensor,
expand1.as_ptr(),
expand1.len_i32(),
expand2.as_ptr(),
expand2.len_i32(),
expand3.as_ptr(),
expand3.len_i32(),
sumdim.as_ptr(),
sumdim.len_i32(),
unroll_dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_triton_multi_head_attention<T: Borrow<Tensor>>(
query: &Tensor,
key: &Tensor,
value: &Tensor,
embed_dim: i64,
num_head: i64,
qkv_weight: &Tensor,
qkv_bias: &Tensor,
proj_weight: &Tensor,
proj_bias: &Tensor,
mask: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__triton_multi_head_attention(
c_tensors.as_mut_ptr(),
query.c_tensor,
key.c_tensor,
value.c_tensor,
embed_dim,
num_head,
qkv_weight.c_tensor,
qkv_bias.c_tensor,
proj_weight.c_tensor,
proj_bias.c_tensor,
mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_triton_multi_head_attention_out<T: Borrow<Tensor>>(
out: &Tensor,
query: &Tensor,
key: &Tensor,
value: &Tensor,
embed_dim: i64,
num_head: i64,
qkv_weight: &Tensor,
qkv_bias: &Tensor,
proj_weight: &Tensor,
proj_bias: &Tensor,
mask: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__triton_multi_head_attention_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
query.c_tensor,
key.c_tensor,
value.c_tensor,
embed_dim,
num_head,
qkv_weight.c_tensor,
qkv_bias.c_tensor,
proj_weight.c_tensor,
proj_bias.c_tensor,
mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_triton_scaled_dot_attention(
q: &Tensor,
k: &Tensor,
v: &Tensor,
dropout_p: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__triton_scaled_dot_attention(
c_tensors.as_mut_ptr(),
q.c_tensor,
k.c_tensor,
v.c_tensor,
dropout_p
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_triton_scaled_dot_attention_out(
out: &Tensor,
q: &Tensor,
k: &Tensor,
v: &Tensor,
dropout_p: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__triton_scaled_dot_attention_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
q.c_tensor,
k.c_tensor,
v.c_tensor,
dropout_p
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_unique(
&self,
sorted: bool,
return_inverse: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__unique(
c_tensors.as_mut_ptr(),
self.c_tensor,
if sorted { 1 } else { 0 },
if return_inverse { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_unique2(
&self,
sorted: bool,
return_inverse: bool,
return_counts: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__unique2(
c_tensors.as_mut_ptr(),
self.c_tensor,
if sorted { 1 } else { 0 },
if return_inverse { 1 } else { 0 },
if return_counts { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_unique2_out(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
sorted: bool,
return_inverse: bool,
return_counts: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg__unique2_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
self.c_tensor,
if sorted { 1 } else { 0 },
if return_inverse { 1 } else { 0 },
if return_counts { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_internal_unique_out(
&self,
out0: &Tensor,
out1: &Tensor,
sorted: bool,
return_inverse: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__unique_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
if sorted { 1 } else { 0 },
if return_inverse { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_unpack_dual(dual: &Tensor, level: i64) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__unpack_dual(c_tensors.as_mut_ptr(), dual.c_tensor, level));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_unsafe_index<T: Borrow<Tensor>>(
&self,
indices: &[Option<T>],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__unsafe_index(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_unsafe_index_put<T: Borrow<Tensor>>(
&self,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__unsafe_index_put(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32,
values.c_tensor,
if accumulate { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_unsafe_view(&self, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__unsafe_view(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_unsafe_view_out(
&self,
out: &Tensor,
size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__unsafe_view_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_bicubic2d_aa(
&self,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_bicubic2d_aa(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_bicubic2d_aa_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_bicubic2d_aa_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_bicubic2d_aa_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_bicubic2d_aa_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_bicubic2d_aa_out(
&self,
out: &Tensor,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_bicubic2d_aa_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_bicubic2d_aa_vec(
&self,
output_size: impl IntListOption,
align_corners: bool,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_bicubic2d_aa_vec(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_bilinear2d_aa(
&self,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_bilinear2d_aa(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_bilinear2d_aa_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_bilinear2d_aa_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_bilinear2d_aa_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_bilinear2d_aa_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_bilinear2d_aa_out(
&self,
out: &Tensor,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_bilinear2d_aa_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_bilinear2d_aa_vec(
&self,
output_size: impl IntListOption,
align_corners: bool,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_bilinear2d_aa_vec(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact1d(
&self,
output_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact1d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact1d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact1d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact1d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact1d_out(
&self,
out: &Tensor,
output_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact1d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact1d_vec(
&self,
output_size: impl IntListOption,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact1d_vec(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact2d(
&self,
output_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact2d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact2d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact2d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact2d_out(
&self,
out: &Tensor,
output_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact2d_vec(
&self,
output_size: impl IntListOption,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact2d_vec(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact3d(
&self,
output_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact3d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact3d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact3d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact3d_out(
&self,
out: &Tensor,
output_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_upsample_nearest_exact3d_vec(
&self,
output_size: impl IntListOption,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__upsample_nearest_exact3d_vec(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_use_cudnn_ctc_loss(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
blank: i64,
) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(
return_ = atg__use_cudnn_ctc_loss(
log_probs.c_tensor,
targets.c_tensor,
input_lengths.as_ptr(),
input_lengths.len_i32(),
target_lengths.as_ptr(),
target_lengths.len_i32(),
blank
)
);
Ok(return_ != 0)
}
pub fn f_internal_use_cudnn_ctc_loss_tensor(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &Tensor,
target_lengths: &Tensor,
blank: i64,
) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(
return_ = atg__use_cudnn_ctc_loss_tensor(
log_probs.c_tensor,
targets.c_tensor,
input_lengths.c_tensor,
target_lengths.c_tensor,
blank
)
);
Ok(return_ != 0)
}
pub fn f_internal_use_cudnn_rnn_flatten_weight() -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg__use_cudnn_rnn_flatten_weight());
Ok(return_ != 0)
}
pub fn f_internal_validate_compressed_sparse_indices(
is_crow: bool,
compressed_idx: &Tensor,
plain_idx: &Tensor,
cdim: i64,
dim: i64,
nnz: i64,
) -> Result<(), TchError> {
unsafe_torch_err!(atg__validate_compressed_sparse_indices(
if is_crow { 1 } else { 0 },
compressed_idx.c_tensor,
plain_idx.c_tensor,
cdim,
dim,
nnz
));
Ok(())
}
pub fn f_internal_validate_sparse_bsc_tensor_args(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
size: impl IntList,
) -> Result<(), TchError> {
unsafe_torch_err!(atg__validate_sparse_bsc_tensor_args(
ccol_indices.c_tensor,
row_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(())
}
pub fn f_internal_validate_sparse_bsr_tensor_args(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
size: impl IntList,
) -> Result<(), TchError> {
unsafe_torch_err!(atg__validate_sparse_bsr_tensor_args(
crow_indices.c_tensor,
col_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(())
}
pub fn f_internal_validate_sparse_compressed_tensor_args(
compressed_indices: &Tensor,
plain_indices: &Tensor,
values: &Tensor,
size: impl IntList,
layout: Layout,
) -> Result<(), TchError> {
unsafe_torch_err!(atg__validate_sparse_compressed_tensor_args(
compressed_indices.c_tensor,
plain_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32(),
layout.to_i8()
));
Ok(())
}
pub fn f_internal_validate_sparse_csc_tensor_args(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
size: impl IntList,
) -> Result<(), TchError> {
unsafe_torch_err!(atg__validate_sparse_csc_tensor_args(
ccol_indices.c_tensor,
row_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(())
}
pub fn f_internal_validate_sparse_csr_tensor_args(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
size: impl IntList,
) -> Result<(), TchError> {
unsafe_torch_err!(atg__validate_sparse_csr_tensor_args(
crow_indices.c_tensor,
col_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(())
}
pub fn f_internal_values(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__values(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_values_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__values_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_values_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__values_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_version(&self) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg__version(self.c_tensor));
Ok(return_)
}
pub fn f_internal_weight_norm(v: &Tensor, g: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__weight_norm(c_tensors.as_mut_ptr(), v.c_tensor, g.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_internal_weight_norm_differentiable_backward(
grad_w: &Tensor,
saved_v: &Tensor,
saved_g: &Tensor,
saved_norms: &Tensor,
dim: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__weight_norm_differentiable_backward(
c_tensors.as_mut_ptr(),
grad_w.c_tensor,
saved_v.c_tensor,
saved_g.c_tensor,
saved_norms.c_tensor,
dim
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_weight_norm_interface(
v: &Tensor,
g: &Tensor,
dim: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__weight_norm_interface(
c_tensors.as_mut_ptr(),
v.c_tensor,
g.c_tensor,
dim
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_weight_norm_interface_backward(
grad_w: &Tensor,
saved_v: &Tensor,
saved_g: &Tensor,
saved_norms: &Tensor,
dim: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__weight_norm_interface_backward(
c_tensors.as_mut_ptr(),
grad_w.c_tensor,
saved_v.c_tensor,
saved_g.c_tensor,
saved_norms.c_tensor,
dim
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_weight_norm_interface_backward_out(
out0: &Tensor,
out1: &Tensor,
grad_w: &Tensor,
saved_v: &Tensor,
saved_g: &Tensor,
saved_norms: &Tensor,
dim: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__weight_norm_interface_backward_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
grad_w.c_tensor,
saved_v.c_tensor,
saved_g.c_tensor,
saved_norms.c_tensor,
dim
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_internal_weight_norm_interface_out(
out0: &Tensor,
out1: &Tensor,
v: &Tensor,
g: &Tensor,
dim: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg__weight_norm_interface_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
v.c_tensor,
g.c_tensor,
dim
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_abs(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_abs(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_abs_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_abs_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_abs_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_abs_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_absolute(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_absolute(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_absolute_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_absolute_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_absolute_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_absolute_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_acos(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_acos(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_acos_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_acos_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_acos_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_acos_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_acosh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_acosh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_acosh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_acosh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_acosh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_acosh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_adaptive_avg_pool1d(&self, output_size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_adaptive_avg_pool2d(&self, output_size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_adaptive_avg_pool2d_out(
&self,
out: &Tensor,
output_size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_adaptive_avg_pool3d(&self, output_size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_adaptive_avg_pool3d_backward(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool3d_backward(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_adaptive_avg_pool3d_out(
&self,
out: &Tensor,
output_size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_adaptive_max_pool1d(
&self,
output_size: impl IntList,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_adaptive_max_pool1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_adaptive_max_pool2d(
&self,
output_size: impl IntList,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_adaptive_max_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_adaptive_max_pool2d_backward(
&self,
grad_output: &Tensor,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_max_pool2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_adaptive_max_pool2d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_max_pool2d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_adaptive_max_pool2d_out(
&self,
out: &Tensor,
indices: &Tensor,
output_size: impl IntList,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_adaptive_max_pool2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
indices.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_adaptive_max_pool3d(
&self,
output_size: impl IntList,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_adaptive_max_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_adaptive_max_pool3d_backward(
&self,
grad_output: &Tensor,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_max_pool3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_adaptive_max_pool3d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_max_pool3d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
indices.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_adaptive_max_pool3d_out(
&self,
out: &Tensor,
indices: &Tensor,
output_size: impl IntList,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_adaptive_max_pool3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
indices.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_add(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_add(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_add_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_add_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_add_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_add_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_add_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_add_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_add_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_add_scalar_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_add_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_add_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addbmm(&self, batch1: &Tensor, batch2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addbmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addbmm_(&mut self, batch1: &Tensor, batch2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addbmm_(
c_tensors.as_mut_ptr(),
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addbmm_out(
&self,
out: &Tensor,
batch1: &Tensor,
batch2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addbmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addcdiv(&self, tensor1: &Tensor, tensor2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addcdiv(
c_tensors.as_mut_ptr(),
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addcdiv_(&mut self, tensor1: &Tensor, tensor2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addcdiv_(
c_tensors.as_mut_ptr(),
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addcdiv_out(
&self,
out: &Tensor,
tensor1: &Tensor,
tensor2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addcdiv_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addcmul(&self, tensor1: &Tensor, tensor2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addcmul(
c_tensors.as_mut_ptr(),
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addcmul_(&mut self, tensor1: &Tensor, tensor2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addcmul_(
c_tensors.as_mut_ptr(),
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addcmul_out(
&self,
out: &Tensor,
tensor1: &Tensor,
tensor2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addcmul_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
tensor1.c_tensor,
tensor2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addmm(&self, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addmm_(&mut self, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addmm_(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addmm_out(
&self,
out: &Tensor,
mat1: &Tensor,
mat2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addmv(&self, mat: &Tensor, vec: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addmv(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat.c_tensor,
vec.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addmv_(&mut self, mat: &Tensor, vec: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addmv_(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat.c_tensor,
vec.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addmv_out(
&self,
out: &Tensor,
mat: &Tensor,
vec: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addmv_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat.c_tensor,
vec.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addr(&self, vec1: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addr(
c_tensors.as_mut_ptr(),
self.c_tensor,
vec1.c_tensor,
vec2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addr_(&mut self, vec1: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addr_(
c_tensors.as_mut_ptr(),
self.c_tensor,
vec1.c_tensor,
vec2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_addr_out(
&self,
out: &Tensor,
vec1: &Tensor,
vec2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_addr_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
vec1.c_tensor,
vec2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_adjoint(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adjoint(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_affine_grid_generator(
theta: &Tensor,
size: impl IntList,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_affine_grid_generator(
c_tensors.as_mut_ptr(),
theta.c_tensor,
size.as_ptr(),
size.len_i32(),
if align_corners { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_affine_grid_generator_backward(
grad: &Tensor,
size: impl IntList,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_affine_grid_generator_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
size.as_ptr(),
size.len_i32(),
if align_corners { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_affine_grid_generator_out(
out: &Tensor,
theta: &Tensor,
size: impl IntList,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_affine_grid_generator_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
theta.c_tensor,
size.as_ptr(),
size.len_i32(),
if align_corners { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_alias(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_alias(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_alias_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_alias_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_alias_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_alias_copy_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_align_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_align_as(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_align_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
let c_tensors =
unsafe_torch_err!(atg_align_tensors(ptr_list(tensors).as_ptr(), tensors.len() as i32));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_all(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_all(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_all_all_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_all_all_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_all_dim(&self, dim: i64, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_all_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_all_out(&self, out: &Tensor, dim: i64, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_all_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_allclose(
&self,
other: &Tensor,
rtol: f64,
atol: f64,
equal_nan: bool,
) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(
return_ = atg_allclose(
self.c_tensor,
other.c_tensor,
rtol,
atol,
if equal_nan { 1 } else { 0 }
)
);
Ok(return_ != 0)
}
pub fn f_alpha_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_alpha_dropout(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_alpha_dropout_(&mut self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_alpha_dropout_(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_amax(&self, dim: impl IntList, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_amax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_amax_out(
&self,
out: &Tensor,
dim: impl IntList,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_amax_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_amin(&self, dim: impl IntList, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_amin(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_amin_out(
&self,
out: &Tensor,
dim: impl IntList,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_amin_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_aminmax(
&self,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_aminmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_aminmax_out(
&self,
min: &Tensor,
max: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_aminmax_out(
c_tensors.as_mut_ptr(),
min.c_tensor,
max.c_tensor,
self.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_angle(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_angle(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_angle_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_angle_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_any(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_any(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_any_all_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_any_all_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_any_dim(&self, dim: i64, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_any_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_any_out(&self, out: &Tensor, dim: i64, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_any_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arange<S: Into<Scalar>>(end: S, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arange(
c_tensors.as_mut_ptr(),
end.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arange_start<S: Into<Scalar>>(
start: S,
end: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arange_start(
c_tensors.as_mut_ptr(),
start.into().c_scalar,
end.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arange_start_step<S: Into<Scalar>>(
start: S,
end: S,
step: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arange_start_step(
c_tensors.as_mut_ptr(),
start.into().c_scalar,
end.into().c_scalar,
step.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arccos(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arccos(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arccos_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arccos_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arccos_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arccos_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arccosh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arccosh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arccosh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arccosh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arccosh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arccosh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arcsin(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arcsin(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arcsin_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arcsin_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arcsin_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arcsin_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arcsinh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arcsinh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arcsinh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arcsinh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arcsinh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arcsinh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arctan(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctan(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arctan2(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctan2(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arctan2_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctan2_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arctan2_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctan2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arctan_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctan_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arctan_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctan_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arctanh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctanh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arctanh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctanh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_arctanh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_arctanh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_argmax(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_argmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_argmax_out(
&self,
out: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_argmax_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_argmin(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_argmin(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_argmin_out(
&self,
out: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_argmin_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_argsort(&self, dim: i64, descending: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_argsort(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if descending { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_argsort_stable(
&self,
stable: bool,
dim: i64,
descending: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_argsort_stable(
c_tensors.as_mut_ptr(),
self.c_tensor,
if stable { 1 } else { 0 },
dim,
if descending { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_argsort_stable_out(
&self,
out: &Tensor,
stable: bool,
dim: i64,
descending: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_argsort_stable_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
if stable { 1 } else { 0 },
dim,
if descending { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_argwhere(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_argwhere(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_as_strided(
&self,
size: impl IntList,
stride: impl IntList,
storage_offset: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let storage_offset = storage_offset.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_as_strided(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
storage_offset.unwrap_or(0i64),
storage_offset.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_as_strided_(
&mut self,
size: impl IntList,
stride: impl IntList,
storage_offset: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let storage_offset = storage_offset.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_as_strided_(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
storage_offset.unwrap_or(0i64),
storage_offset.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_as_strided_copy(
&self,
size: impl IntList,
stride: impl IntList,
storage_offset: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let storage_offset = storage_offset.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_as_strided_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
storage_offset.unwrap_or(0i64),
storage_offset.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_as_strided_copy_out(
&self,
out: &Tensor,
size: impl IntList,
stride: impl IntList,
storage_offset: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let storage_offset = storage_offset.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_as_strided_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
storage_offset.unwrap_or(0i64),
storage_offset.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_as_strided_scatter(
&self,
src: &Tensor,
size: impl IntList,
stride: impl IntList,
storage_offset: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let storage_offset = storage_offset.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_as_strided_scatter(
c_tensors.as_mut_ptr(),
self.c_tensor,
src.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
storage_offset.unwrap_or(0i64),
storage_offset.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_as_strided_scatter_out(
&self,
out: &Tensor,
src: &Tensor,
size: impl IntList,
stride: impl IntList,
storage_offset: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let storage_offset = storage_offset.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_as_strided_scatter_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
src.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
storage_offset.unwrap_or(0i64),
storage_offset.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_asin(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_asin(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_asin_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_asin_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_asin_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_asin_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_asinh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_asinh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_asinh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_asinh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_asinh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_asinh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_atan(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atan(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_atan2(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atan2(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_atan2_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atan2_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_atan2_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atan2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_atan_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atan_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_atan_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atan_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_atanh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atanh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_atanh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atanh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_atanh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atanh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_atleast_1d(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atleast_1d(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_atleast_1d_sequence<T: Borrow<Tensor>>(
tensors: &[T],
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_atleast_1d_sequence(
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_atleast_2d(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atleast_2d(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_atleast_2d_sequence<T: Borrow<Tensor>>(
tensors: &[T],
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_atleast_2d_sequence(
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_atleast_3d(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_atleast_3d(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_atleast_3d_sequence<T: Borrow<Tensor>>(
tensors: &[T],
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_atleast_3d_sequence(
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_avg_pool1d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_avg_pool2d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_avg_pool2d_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_avg_pool2d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool2d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_avg_pool2d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_avg_pool3d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_avg_pool3d_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_avg_pool3d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool3d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_avg_pool3d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
divisor_override: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let divisor_override = divisor_override.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 },
divisor_override.unwrap_or(0i64),
divisor_override.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_baddbmm<S: Into<Scalar>>(
&self,
batch1: &Tensor,
batch2: &Tensor,
beta: S,
alpha: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_baddbmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor,
beta.into().c_scalar,
alpha.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_baddbmm_(&mut self, batch1: &Tensor, batch2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_baddbmm_(
c_tensors.as_mut_ptr(),
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_baddbmm_out(
&self,
out: &Tensor,
batch1: &Tensor,
batch2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_baddbmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
batch1.c_tensor,
batch2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bartlett_window(
window_length: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bartlett_window(
c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bartlett_window_out(out: &Tensor, window_length: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bartlett_window_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
window_length
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bartlett_window_periodic(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bartlett_window_periodic(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bartlett_window_periodic_out(
out: &Tensor,
window_length: i64,
periodic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bartlett_window_periodic_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
window_length,
if periodic { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_batch_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
momentum: f64,
eps: f64,
cudnn_enabled: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_batch_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
momentum,
eps,
if cudnn_enabled { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_batch_norm_backward_elemt<T: Borrow<Tensor>>(
&self,
grad_out: &Tensor,
mean: &Tensor,
invstd: &Tensor,
weight: Option<T>,
sum_dy: &Tensor,
sum_dy_xmu: &Tensor,
count: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_batch_norm_backward_elemt(
c_tensors.as_mut_ptr(),
grad_out.c_tensor,
self.c_tensor,
mean.c_tensor,
invstd.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
sum_dy.c_tensor,
sum_dy_xmu.c_tensor,
count.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_batch_norm_backward_elemt_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
grad_out: &Tensor,
mean: &Tensor,
invstd: &Tensor,
weight: Option<T>,
sum_dy: &Tensor,
sum_dy_xmu: &Tensor,
count: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_batch_norm_backward_elemt_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_out.c_tensor,
self.c_tensor,
mean.c_tensor,
invstd.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
sum_dy.c_tensor,
sum_dy_xmu.c_tensor,
count.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_batch_norm_backward_reduce<T: Borrow<Tensor>>(
&self,
grad_out: &Tensor,
mean: &Tensor,
invstd: &Tensor,
weight: Option<T>,
input_g: bool,
weight_g: bool,
bias_g: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg_batch_norm_backward_reduce(
c_tensors.as_mut_ptr(),
grad_out.c_tensor,
self.c_tensor,
mean.c_tensor,
invstd.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if input_g { 1 } else { 0 },
if weight_g { 1 } else { 0 },
if bias_g { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_batch_norm_backward_reduce_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
grad_out: &Tensor,
mean: &Tensor,
invstd: &Tensor,
weight: Option<T>,
input_g: bool,
weight_g: bool,
bias_g: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg_batch_norm_backward_reduce_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
out3.c_tensor,
grad_out.c_tensor,
self.c_tensor,
mean.c_tensor,
invstd.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if input_g { 1 } else { 0 },
if weight_g { 1 } else { 0 },
if bias_g { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_batch_norm_elemt<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
mean: &Tensor,
invstd: &Tensor,
eps: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_batch_norm_elemt(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mean.c_tensor,
invstd.c_tensor,
eps
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_batch_norm_elemt_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: Option<T>,
bias: Option<T>,
mean: &Tensor,
invstd: &Tensor,
eps: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_batch_norm_elemt_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mean.c_tensor,
invstd.c_tensor,
eps
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_batch_norm_gather_stats<T: Borrow<Tensor>>(
&self,
mean: &Tensor,
invstd: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
eps: f64,
count: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_batch_norm_gather_stats(
c_tensors.as_mut_ptr(),
self.c_tensor,
mean.c_tensor,
invstd.c_tensor,
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
momentum,
eps,
count
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_batch_norm_gather_stats_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
mean: &Tensor,
invstd: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
eps: f64,
count: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_batch_norm_gather_stats_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
mean.c_tensor,
invstd.c_tensor,
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
momentum,
eps,
count
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_batch_norm_gather_stats_with_counts<T: Borrow<Tensor>>(
&self,
mean: &Tensor,
invstd: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
eps: f64,
counts: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_batch_norm_gather_stats_with_counts(
c_tensors.as_mut_ptr(),
self.c_tensor,
mean.c_tensor,
invstd.c_tensor,
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
momentum,
eps,
counts.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_batch_norm_gather_stats_with_counts_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
mean: &Tensor,
invstd: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
eps: f64,
counts: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_batch_norm_gather_stats_with_counts_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
mean.c_tensor,
invstd.c_tensor,
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
momentum,
eps,
counts.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_batch_norm_stats(&self, eps: f64) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_batch_norm_stats(c_tensors.as_mut_ptr(), self.c_tensor, eps));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_batch_norm_stats_out(
&self,
out0: &Tensor,
out1: &Tensor,
eps: f64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_batch_norm_stats_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
eps
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_batch_norm_update_stats<T: Borrow<Tensor>>(
&self,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_batch_norm_update_stats(
c_tensors.as_mut_ptr(),
self.c_tensor,
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
momentum
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_batch_norm_update_stats_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
momentum: f64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_batch_norm_update_stats_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
momentum
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_bernoulli(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bernoulli(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bernoulli_(&mut self, p: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bernoulli_(c_tensors.as_mut_ptr(), self.c_tensor, p.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bernoulli_float_(&mut self, p: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bernoulli_float_(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bernoulli_p(&self, p: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bernoulli_p(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bernoulli_tensor(&self, p: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bernoulli_tensor(c_tensors.as_mut_ptr(), self.c_tensor, p.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bilinear<T: Borrow<Tensor>>(
input1: &Tensor,
input2: &Tensor,
weight: &Tensor,
bias: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bilinear(
c_tensors.as_mut_ptr(),
input1.c_tensor,
input2.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_binary_cross_entropy<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binary_cross_entropy(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_binary_cross_entropy_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binary_cross_entropy_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_binary_cross_entropy_backward_grad_input<T: Borrow<Tensor>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binary_cross_entropy_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_binary_cross_entropy_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binary_cross_entropy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_binary_cross_entropy_with_logits<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
pos_weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binary_cross_entropy_with_logits(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
pos_weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_binary_cross_entropy_with_logits_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
target: &Tensor,
weight: Option<T>,
pos_weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binary_cross_entropy_with_logits_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
pos_weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bincount<T: Borrow<Tensor>>(
&self,
weights: Option<T>,
minlength: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bincount(
c_tensors.as_mut_ptr(),
self.c_tensor,
weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
minlength
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bincount_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weights: Option<T>,
minlength: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bincount_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
minlength
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_binomial(count: &Tensor, prob: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binomial(c_tensors.as_mut_ptr(), count.c_tensor, prob.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_binomial_out(out: &Tensor, count: &Tensor, prob: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_binomial_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
count.c_tensor,
prob.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_and<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_and_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_and_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_and_scalar_tensor<S: Into<Scalar>>(
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and_scalar_tensor(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_and_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and_scalar_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_and_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_and_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_and_tensor_out(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_and_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_left_shift(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_left_shift(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_left_shift_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_left_shift_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_left_shift_scalar_tensor<S: Into<Scalar>>(
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_left_shift_scalar_tensor(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_left_shift_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_left_shift_scalar_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_left_shift_tensor_out(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_left_shift_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_left_shift_tensor_scalar<S: Into<Scalar>>(
&self,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_left_shift_tensor_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_left_shift_tensor_scalar_<S: Into<Scalar>>(
&mut self,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_left_shift_tensor_scalar_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_left_shift_tensor_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_left_shift_tensor_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_not(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_not(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_not_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_not_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_not_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_not_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_or<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_or_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_or_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_or_scalar_tensor<S: Into<Scalar>>(
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or_scalar_tensor(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_or_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or_scalar_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_or_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_or_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_or_tensor_out(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_or_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_right_shift(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_right_shift(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_right_shift_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_right_shift_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_right_shift_scalar_tensor<S: Into<Scalar>>(
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_right_shift_scalar_tensor(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_right_shift_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_right_shift_scalar_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_right_shift_tensor_out(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_right_shift_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_right_shift_tensor_scalar<S: Into<Scalar>>(
&self,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_right_shift_tensor_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_right_shift_tensor_scalar_<S: Into<Scalar>>(
&mut self,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_right_shift_tensor_scalar_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_right_shift_tensor_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_right_shift_tensor_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_xor<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_xor_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_xor_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_xor_scalar_tensor<S: Into<Scalar>>(
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor_scalar_tensor(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_xor_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor_scalar_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_xor_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_xor_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bitwise_xor_tensor_out(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bitwise_xor_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_blackman_window(
window_length: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_blackman_window(
c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_blackman_window_out(out: &Tensor, window_length: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_blackman_window_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
window_length
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_blackman_window_periodic(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_blackman_window_periodic(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_blackman_window_periodic_out(
out: &Tensor,
window_length: i64,
periodic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_blackman_window_periodic_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
window_length,
if periodic { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_block_diag<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_block_diag(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_block_diag_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_block_diag_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bmm(&self, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bmm(c_tensors.as_mut_ptr(), self.c_tensor, mat2.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bmm_out(&self, out: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_broadcast_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_broadcast_tensors(
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_broadcast_to(&self, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_broadcast_to(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bucketize(
&self,
boundaries: &Tensor,
out_int32: bool,
right: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bucketize(
c_tensors.as_mut_ptr(),
self.c_tensor,
boundaries.c_tensor,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bucketize_scalar<S: Into<Scalar>>(
self_scalar: S,
boundaries: &Tensor,
out_int32: bool,
right: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bucketize_scalar(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
boundaries.c_tensor,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bucketize_scalar_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
boundaries: &Tensor,
out_int32: bool,
right: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bucketize_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
boundaries.c_tensor,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_bucketize_tensor_out(
&self,
out: &Tensor,
boundaries: &Tensor,
out_int32: bool,
right: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_bucketize_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
boundaries.c_tensor,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_can_cast(from: Kind, to: Kind) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_can_cast(from.c_int(), to.c_int()));
Ok(return_ != 0)
}
pub fn f_cartesian_prod<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cartesian_prod(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cat<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cat(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cat_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cat_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cauchy(&self, median: f64, sigma: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cauchy(c_tensors.as_mut_ptr(), self.c_tensor, median, sigma));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cauchy_(&mut self, median: f64, sigma: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cauchy_(c_tensors.as_mut_ptr(), self.c_tensor, median, sigma));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cauchy_out(&self, out: &Tensor, median: f64, sigma: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cauchy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
median,
sigma
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ccol_indices(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ccol_indices(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ccol_indices_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ccol_indices_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ccol_indices_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ccol_indices_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cdist(
x1: &Tensor,
x2: &Tensor,
p: f64,
compute_mode: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let compute_mode = compute_mode.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cdist(
c_tensors.as_mut_ptr(),
x1.c_tensor,
x2.c_tensor,
p,
compute_mode.unwrap_or(0i64),
compute_mode.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ceil(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ceil(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ceil_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ceil_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ceil_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ceil_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_celu(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_celu(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_celu_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_celu_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_celu_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_celu_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_chain_matmul<T: Borrow<Tensor>>(matrices: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_chain_matmul(
c_tensors.as_mut_ptr(),
ptr_list(matrices).as_ptr(),
matrices.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_chain_matmul_out<T: Borrow<Tensor>>(
out: &Tensor,
matrices: &[T],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_chain_matmul_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(matrices).as_ptr(),
matrices.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_chalf(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_chalf(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_channel_shuffle(&self, groups: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_channel_shuffle(c_tensors.as_mut_ptr(), self.c_tensor, groups));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_channel_shuffle_out(&self, out: &Tensor, groups: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_channel_shuffle_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cholesky(&self, upper: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cholesky(
c_tensors.as_mut_ptr(),
self.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cholesky_inverse(&self, upper: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cholesky_inverse(
c_tensors.as_mut_ptr(),
self.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cholesky_inverse_out(&self, out: &Tensor, upper: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cholesky_inverse_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cholesky_out(&self, out: &Tensor, upper: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cholesky_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cholesky_solve(&self, input2: &Tensor, upper: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cholesky_solve(
c_tensors.as_mut_ptr(),
self.c_tensor,
input2.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cholesky_solve_out(
&self,
out: &Tensor,
input2: &Tensor,
upper: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cholesky_solve_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
input2.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_choose_qparams_optimized(
&self,
numel: i64,
n_bins: i64,
ratio: f64,
bit_width: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_choose_qparams_optimized(
c_tensors.as_mut_ptr(),
self.c_tensor,
numel,
n_bins,
ratio,
bit_width
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_chunk(&self, chunks: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_chunk(self.c_tensor, chunks, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_clamp<S: Into<Scalar>>(&self, min: S, max: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.into().c_scalar,
max.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_<S: Into<Scalar>>(&mut self, min: S, max: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.into().c_scalar,
max.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_max<S: Into<Scalar>>(&self, max: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_max(
c_tensors.as_mut_ptr(),
self.c_tensor,
max.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_max_<S: Into<Scalar>>(&mut self, max: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_max_(
c_tensors.as_mut_ptr(),
self.c_tensor,
max.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_max_out<S: Into<Scalar>>(
&self,
out: &Tensor,
max: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_max_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
max.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_max_tensor(&self, max: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_max_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
max.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_max_tensor_(&mut self, max: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_max_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
max.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_max_tensor_out(&self, out: &Tensor, max: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_max_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
max.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_min<S: Into<Scalar>>(&self, min: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_min(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_min_<S: Into<Scalar>>(&mut self, min: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_min_(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_min_out<S: Into<Scalar>>(
&self,
out: &Tensor,
min: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_min_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
min.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_min_tensor(&self, min: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_min_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_min_tensor_(&mut self, min: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_min_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_min_tensor_out(&self, out: &Tensor, min: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_min_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
min.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_out<S: Into<Scalar>>(
&self,
out: &Tensor,
min: S,
max: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
min.into().c_scalar,
max.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_tensor<T: Borrow<Tensor>>(
&self,
min: Option<T>,
max: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
max.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_tensor_<T: Borrow<Tensor>>(
&mut self,
min: Option<T>,
max: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
max.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clamp_tensor_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
min: Option<T>,
max: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clamp_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
min.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
max.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clip<S: Into<Scalar>>(&self, min: S, max: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clip(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.into().c_scalar,
max.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clip_<S: Into<Scalar>>(&mut self, min: S, max: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clip_(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.into().c_scalar,
max.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clip_out<S: Into<Scalar>>(
&self,
out: &Tensor,
min: S,
max: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clip_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
min.into().c_scalar,
max.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clip_tensor<T: Borrow<Tensor>>(
&self,
min: Option<T>,
max: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clip_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
max.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clip_tensor_<T: Borrow<Tensor>>(
&mut self,
min: Option<T>,
max: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clip_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
min.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
max.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clip_tensor_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
min: Option<T>,
max: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clip_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
min.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
max.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_clone(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_clone(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_coalesce(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_coalesce(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_col2im(
&self,
output_size: impl IntList,
kernel_size: impl IntList,
dilation: impl IntList,
padding: impl IntList,
stride: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_col2im(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
kernel_size.as_ptr(),
kernel_size.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_col2im_out(
&self,
out: &Tensor,
output_size: impl IntList,
kernel_size: impl IntList,
dilation: impl IntList,
padding: impl IntList,
stride: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_col2im_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
kernel_size.as_ptr(),
kernel_size.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_col_indices(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_col_indices(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_col_indices_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_col_indices_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_col_indices_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_col_indices_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_column_stack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_column_stack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_column_stack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_column_stack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_combinations(&self, r: i64, with_replacement: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_combinations(
c_tensors.as_mut_ptr(),
self.c_tensor,
r,
if with_replacement { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_complex(real: &Tensor, imag: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_complex(c_tensors.as_mut_ptr(), real.c_tensor, imag.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_complex_out(out: &Tensor, real: &Tensor, imag: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_complex_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
real.c_tensor,
imag.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_concat<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_concat(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_concat_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_concat_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_concatenate<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_concatenate(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_concatenate_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_concatenate_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conj(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conj(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conj_physical(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conj_physical(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conj_physical_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conj_physical_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conj_physical_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conj_physical_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_constant_pad_nd(&self, pad: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_constant_pad_nd(
c_tensors.as_mut_ptr(),
self.c_tensor,
pad.as_ptr(),
pad.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_constant_pad_nd_out(
&self,
out: &Tensor,
pad: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_constant_pad_nd_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
pad.as_ptr(),
pad.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_contiguous(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_contiguous(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conv1d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conv1d_padding<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: &str,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv1d_padding(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conv2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conv2d_padding<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: &str,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv2d_padding(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conv3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conv3d_padding<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: &str,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv3d_padding(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len() as i32,
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conv_depthwise3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv_depthwise3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conv_depthwise3d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv_depthwise3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conv_tbc(&self, weight: &Tensor, bias: &Tensor, pad: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv_tbc(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.c_tensor,
pad
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conv_tbc_backward(
&self,
input: &Tensor,
weight: &Tensor,
bias: &Tensor,
pad: i64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_conv_tbc_backward(
c_tensors.as_mut_ptr(),
self.c_tensor,
input.c_tensor,
weight.c_tensor,
bias.c_tensor,
pad
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_conv_tbc_out(
&self,
out: &Tensor,
weight: &Tensor,
bias: &Tensor,
pad: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv_tbc_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.c_tensor,
pad
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conv_transpose1d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
groups: i64,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv_transpose1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
output_padding.as_ptr(),
output_padding.len_i32(),
groups,
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conv_transpose2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
groups: i64,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv_transpose2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
output_padding.as_ptr(),
output_padding.len_i32(),
groups,
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_conv_transpose3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
groups: i64,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_conv_transpose3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
output_padding.as_ptr(),
output_padding.len_i32(),
groups,
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if transposed { 1 } else { 0 },
output_padding.as_ptr(),
output_padding.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_convolution_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if transposed { 1 } else { 0 },
output_padding.as_ptr(),
output_padding.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_convolution_overrideable<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_convolution_overrideable(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if transposed { 1 } else { 0 },
output_padding.as_ptr(),
output_padding.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_convolution_overrideable_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
transposed: bool,
output_padding: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_convolution_overrideable_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if transposed { 1 } else { 0 },
output_padding.as_ptr(),
output_padding.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_copy_sparse_to_sparse(
&self,
src: &Tensor,
non_blocking: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copy_sparse_to_sparse(
c_tensors.as_mut_ptr(),
self.c_tensor,
src.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_copy_sparse_to_sparse_(
&mut self,
src: &Tensor,
non_blocking: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copy_sparse_to_sparse_(
c_tensors.as_mut_ptr(),
self.c_tensor,
src.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_copy_sparse_to_sparse_out(
&self,
out: &Tensor,
src: &Tensor,
non_blocking: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copy_sparse_to_sparse_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
src.c_tensor,
if non_blocking { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_copysign(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copysign(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_copysign_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copysign_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_copysign_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copysign_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_copysign_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copysign_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_copysign_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copysign_scalar_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_copysign_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_copysign_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_corrcoef(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_corrcoef(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cos(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cos(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cos_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cos_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cos_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cos_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cosh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cosh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cosh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cosh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cosh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cosh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cosine_embedding_loss(
input1: &Tensor,
input2: &Tensor,
target: &Tensor,
margin: f64,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cosine_embedding_loss(
c_tensors.as_mut_ptr(),
input1.c_tensor,
input2.c_tensor,
target.c_tensor,
margin,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cosine_similarity(
x1: &Tensor,
x2: &Tensor,
dim: i64,
eps: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cosine_similarity(
c_tensors.as_mut_ptr(),
x1.c_tensor,
x2.c_tensor,
dim,
eps
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_count_nonzero(&self, dim: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_count_nonzero(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_count_nonzero_dim_intlist(&self, dim: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_count_nonzero_dim_intlist(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_count_nonzero_dim_intlist_out(
&self,
out: &Tensor,
dim: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_count_nonzero_dim_intlist_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_count_nonzero_out(
&self,
out: &Tensor,
dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_count_nonzero_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cov<T: Borrow<Tensor>>(
&self,
correction: i64,
fweights: Option<T>,
aweights: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cov(
c_tensors.as_mut_ptr(),
self.c_tensor,
correction,
fweights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
aweights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cross(&self, other: &Tensor, dim: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cross(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cross_entropy_loss<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
label_smoothing: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cross_entropy_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index,
label_smoothing
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cross_out(
&self,
out: &Tensor,
other: &Tensor,
dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cross_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_crow_indices(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_crow_indices(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_crow_indices_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_crow_indices_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_crow_indices_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_crow_indices_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ctc_loss(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: impl IntList,
target_lengths: impl IntList,
blank: i64,
reduction: crate::Reduction,
zero_infinity: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ctc_loss(
c_tensors.as_mut_ptr(),
log_probs.c_tensor,
targets.c_tensor,
input_lengths.as_ptr(),
input_lengths.len_i32(),
target_lengths.as_ptr(),
target_lengths.len_i32(),
blank,
reduction.to_int(),
if zero_infinity { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ctc_loss_tensor(
log_probs: &Tensor,
targets: &Tensor,
input_lengths: &Tensor,
target_lengths: &Tensor,
blank: i64,
reduction: crate::Reduction,
zero_infinity: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ctc_loss_tensor(
c_tensors.as_mut_ptr(),
log_probs.c_tensor,
targets.c_tensor,
input_lengths.c_tensor,
target_lengths.c_tensor,
blank,
reduction.to_int(),
if zero_infinity { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_affine_grid_generator(
theta: &Tensor,
n: i64,
c: i64,
h: i64,
w: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_affine_grid_generator(
c_tensors.as_mut_ptr(),
theta.c_tensor,
n,
c,
h,
w
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_affine_grid_generator_backward(
grad: &Tensor,
n: i64,
c: i64,
h: i64,
w: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_affine_grid_generator_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
n,
c,
h,
w
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_affine_grid_generator_backward_out(
out: &Tensor,
grad: &Tensor,
n: i64,
c: i64,
h: i64,
w: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_affine_grid_generator_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad.c_tensor,
n,
c,
h,
w
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_affine_grid_generator_out(
out: &Tensor,
theta: &Tensor,
n: i64,
c: i64,
h: i64,
w: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_affine_grid_generator_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
theta.c_tensor,
n,
c,
h,
w
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_batch_norm<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
exponential_average_factor: f64,
epsilon: f64,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg_cudnn_batch_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
exponential_average_factor,
epsilon
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_cudnn_batch_norm_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
weight: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
save_mean: Option<T>,
save_var: Option<T>,
epsilon: f64,
reservespace: &Tensor,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_cudnn_batch_norm_backward(
c_tensors.as_mut_ptr(),
self.c_tensor,
grad_output.c_tensor,
weight.c_tensor,
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
save_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
save_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
epsilon,
reservespace.c_tensor
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_cudnn_batch_norm_backward_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
grad_output: &Tensor,
weight: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
save_mean: Option<T>,
save_var: Option<T>,
epsilon: f64,
reservespace: &Tensor,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_cudnn_batch_norm_backward_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
self.c_tensor,
grad_output.c_tensor,
weight.c_tensor,
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
save_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
save_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
epsilon,
reservespace.c_tensor
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_cudnn_batch_norm_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
weight: &Tensor,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
exponential_average_factor: f64,
epsilon: f64,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg_cudnn_batch_norm_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
out3.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
exponential_average_factor,
epsilon
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_cudnn_convolution(
&self,
weight: &Tensor,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if allow_tf32 { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_convolution_add_relu<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
weight: &Tensor,
z: &Tensor,
alpha: S,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_add_relu(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
z.c_tensor,
alpha.into().c_scalar,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_convolution_add_relu_out<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
out: &Tensor,
weight: &Tensor,
z: &Tensor,
alpha: S,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_add_relu_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
z.c_tensor,
alpha.into().c_scalar,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_convolution_out(
&self,
out: &Tensor,
weight: &Tensor,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if allow_tf32 { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_convolution_relu<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_relu(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_convolution_relu_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_relu_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_convolution_transpose(
&self,
weight: &Tensor,
padding: impl IntList,
output_padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_transpose(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len_i32(),
output_padding.as_ptr(),
output_padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if allow_tf32 { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_convolution_transpose_out(
&self,
out: &Tensor,
weight: &Tensor,
padding: impl IntList,
output_padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
allow_tf32: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_convolution_transpose_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
padding.as_ptr(),
padding.len_i32(),
output_padding.as_ptr(),
output_padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 },
if allow_tf32 { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_grid_sampler(&self, grid: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_grid_sampler(
c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_grid_sampler_backward(
&self,
grid: &Tensor,
grad_output: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_cudnn_grid_sampler_backward(
c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
grad_output.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_cudnn_grid_sampler_backward_out(
&self,
out0: &Tensor,
out1: &Tensor,
grid: &Tensor,
grad_output: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_cudnn_grid_sampler_backward_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
grid.c_tensor,
grad_output.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_cudnn_grid_sampler_out(
&self,
out: &Tensor,
grid: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cudnn_grid_sampler_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
grid.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cudnn_is_acceptable(&self) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_cudnn_is_acceptable(self.c_tensor));
Ok(return_ != 0)
}
pub fn f_cummax(&self, dim: i64) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_cummax(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_cummax_out(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_cummax_out(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_cummaxmin_backward(
&self,
grad: &Tensor,
indices: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cummaxmin_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
indices.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cummin(&self, dim: i64) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_cummin(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_cummin_out(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_cummin_out(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_cumprod(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumprod(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cumprod_(
&mut self,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumprod_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cumprod_backward(
&self,
grad: &Tensor,
dim: i64,
output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumprod_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
dim,
output.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cumprod_out(
&self,
out: &Tensor,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumprod_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cumsum(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumsum(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cumsum_(
&mut self,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumsum_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cumsum_out(
&self,
out: &Tensor,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumsum_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cumulative_trapezoid(y: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumulative_trapezoid(c_tensors.as_mut_ptr(), y.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_cumulative_trapezoid_x(y: &Tensor, x: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_cumulative_trapezoid_x(
c_tensors.as_mut_ptr(),
y.c_tensor,
x.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_data(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_data(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_deg2rad(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_deg2rad(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_deg2rad_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_deg2rad_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_deg2rad_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_deg2rad_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_dense_dim(&self) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_dense_dim(self.c_tensor));
Ok(return_)
}
pub fn f_dequantize(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dequantize(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_dequantize_self_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dequantize_self_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_dequantize_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_dequantize_tensors(
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_dequantize_tensors_out<T: Borrow<Tensor>>(
out: &[T],
tensors: &[T],
) -> Result<(), TchError> {
unsafe_torch_err!(atg_dequantize_tensors_out(
ptr_list(out).as_ptr(),
out.len() as i32,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(())
}
pub fn f_det(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_det(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_detach(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_detach(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_detach_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_detach_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_detach_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_detach_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_detach_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_detach_copy_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diag(&self, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diag(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diag_embed(&self, offset: i64, dim1: i64, dim2: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diag_embed(
c_tensors.as_mut_ptr(),
self.c_tensor,
offset,
dim1,
dim2
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diag_embed_out(
&self,
out: &Tensor,
offset: i64,
dim1: i64,
dim2: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diag_embed_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
offset,
dim1,
dim2
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diag_out(&self, out: &Tensor, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diag_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
diagonal
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diagflat(&self, offset: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diagflat(c_tensors.as_mut_ptr(), self.c_tensor, offset));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diagonal(&self, offset: i64, dim1: i64, dim2: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diagonal(c_tensors.as_mut_ptr(), self.c_tensor, offset, dim1, dim2));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diagonal_backward(
grad_output: &Tensor,
input_sizes: impl IntList,
offset: i64,
dim1: i64,
dim2: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diagonal_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
input_sizes.as_ptr(),
input_sizes.len_i32(),
offset,
dim1,
dim2
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diagonal_backward_out(
out: &Tensor,
grad_output: &Tensor,
input_sizes: impl IntList,
offset: i64,
dim1: i64,
dim2: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diagonal_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
input_sizes.as_ptr(),
input_sizes.len_i32(),
offset,
dim1,
dim2
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diagonal_copy(&self, offset: i64, dim1: i64, dim2: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diagonal_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
offset,
dim1,
dim2
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diagonal_copy_out(
&self,
out: &Tensor,
offset: i64,
dim1: i64,
dim2: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diagonal_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
offset,
dim1,
dim2
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diagonal_scatter(
&self,
src: &Tensor,
offset: i64,
dim1: i64,
dim2: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diagonal_scatter(
c_tensors.as_mut_ptr(),
self.c_tensor,
src.c_tensor,
offset,
dim1,
dim2
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diagonal_scatter_out(
&self,
out: &Tensor,
src: &Tensor,
offset: i64,
dim1: i64,
dim2: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diagonal_scatter_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
src.c_tensor,
offset,
dim1,
dim2
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diff<T: Borrow<Tensor>>(
&self,
n: i64,
dim: i64,
prepend: Option<T>,
append: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diff(
c_tensors.as_mut_ptr(),
self.c_tensor,
n,
dim,
prepend.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
append.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_diff_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
n: i64,
dim: i64,
prepend: Option<T>,
append: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_diff_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n,
dim,
prepend.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
append.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_digamma(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_digamma(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_digamma_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_digamma_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_digamma_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_digamma_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_dist(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dist(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_dist_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dist_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_div(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_div_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_div_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_div_out_mode(
&self,
out: &Tensor,
other: &Tensor,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_out_mode(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_div_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_div_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_scalar_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_div_scalar_mode<S: Into<Scalar>>(
&self,
other: S,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_scalar_mode(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_div_scalar_mode_<S: Into<Scalar>>(
&mut self,
other: S,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_scalar_mode_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_div_scalar_mode_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_scalar_mode_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_div_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_div_tensor_mode(
&self,
other: &Tensor,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_tensor_mode(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_div_tensor_mode_(
&mut self,
other: &Tensor,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_div_tensor_mode_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_divide(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_divide_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_divide_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_divide_out_mode(
&self,
out: &Tensor,
other: &Tensor,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_out_mode(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_divide_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_divide_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_scalar_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_divide_scalar_mode<S: Into<Scalar>>(
&self,
other: S,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_scalar_mode(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_divide_scalar_mode_<S: Into<Scalar>>(
&mut self,
other: S,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_scalar_mode_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_divide_tensor_mode(
&self,
other: &Tensor,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_tensor_mode(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_divide_tensor_mode_(
&mut self,
other: &Tensor,
rounding_mode: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_divide_tensor_mode_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
rounding_mode.as_ptr(),
rounding_mode.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_dot(&self, tensor: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dot(c_tensors.as_mut_ptr(), self.c_tensor, tensor.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_dot_out(&self, out: &Tensor, tensor: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dot_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
tensor.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dropout(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_dropout_(&mut self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dropout_(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_dsplit(&self, sections: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_dsplit(self.c_tensor, sections));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_dsplit_array(&self, indices: impl IntList) -> Result<Vec<Tensor>, TchError> {
let c_tensors =
unsafe_torch_err!(atg_dsplit_array(self.c_tensor, indices.as_ptr(), indices.len_i32()));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_dstack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dstack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_dstack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_dstack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_einsum<T: Borrow<Tensor>>(
equation: &str,
tensors: &[T],
path: impl IntListOption,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_einsum(
c_tensors.as_mut_ptr(),
equation.as_ptr(),
equation.len() as i32,
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
path.as_ptr(),
path.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_elu(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_elu(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_elu_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_elu_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_elu_backward<S: Into<Scalar>>(
grad_output: &Tensor,
alpha: S,
scale: S,
input_scale: S,
is_result: bool,
self_or_result: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_elu_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
alpha.into().c_scalar,
scale.into().c_scalar,
input_scale.into().c_scalar,
if is_result { 1 } else { 0 },
self_or_result.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_elu_backward_grad_input<S: Into<Scalar>>(
grad_input: &Tensor,
grad_output: &Tensor,
alpha: S,
scale: S,
input_scale: S,
is_result: bool,
self_or_result: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_elu_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
alpha.into().c_scalar,
scale.into().c_scalar,
input_scale.into().c_scalar,
if is_result { 1 } else { 0 },
self_or_result.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_elu_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_elu_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_embedding(
weight: &Tensor,
indices: &Tensor,
padding_idx: i64,
scale_grad_by_freq: bool,
sparse: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding(
c_tensors.as_mut_ptr(),
weight.c_tensor,
indices.c_tensor,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 },
if sparse { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_embedding_backward(
grad: &Tensor,
indices: &Tensor,
num_weights: i64,
padding_idx: i64,
scale_grad_by_freq: bool,
sparse: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
indices.c_tensor,
num_weights,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 },
if sparse { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_embedding_bag<T: Borrow<Tensor>>(
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg_embedding_bag(
c_tensors.as_mut_ptr(),
weight.c_tensor,
indices.c_tensor,
offsets.c_tensor,
if scale_grad_by_freq { 1 } else { 0 },
mode,
if sparse { 1 } else { 0 },
per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if include_last_offset { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_embedding_bag_padding_idx<T: Borrow<Tensor>>(
weight: &Tensor,
indices: &Tensor,
offsets: &Tensor,
scale_grad_by_freq: bool,
mode: i64,
sparse: bool,
per_sample_weights: Option<T>,
include_last_offset: bool,
padding_idx: impl Into<Option<i64>>,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let padding_idx = padding_idx.into();
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg_embedding_bag_padding_idx(
c_tensors.as_mut_ptr(),
weight.c_tensor,
indices.c_tensor,
offsets.c_tensor,
if scale_grad_by_freq { 1 } else { 0 },
mode,
if sparse { 1 } else { 0 },
per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if include_last_offset { 1 } else { 0 },
padding_idx.unwrap_or(0i64),
padding_idx.is_none() as i8
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_embedding_dense_backward(
grad_output: &Tensor,
indices: &Tensor,
num_weights: i64,
padding_idx: i64,
scale_grad_by_freq: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding_dense_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
indices.c_tensor,
num_weights,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_embedding_dense_backward_out(
out: &Tensor,
grad_output: &Tensor,
indices: &Tensor,
num_weights: i64,
padding_idx: i64,
scale_grad_by_freq: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding_dense_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
indices.c_tensor,
num_weights,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_embedding_out(
out: &Tensor,
weight: &Tensor,
indices: &Tensor,
padding_idx: i64,
scale_grad_by_freq: bool,
sparse: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
weight.c_tensor,
indices.c_tensor,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 },
if sparse { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_embedding_renorm(
&self,
indices: &Tensor,
max_norm: f64,
norm_type: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding_renorm(
c_tensors.as_mut_ptr(),
self.c_tensor,
indices.c_tensor,
max_norm,
norm_type
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_embedding_renorm_(
&mut self,
indices: &Tensor,
max_norm: f64,
norm_type: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding_renorm_(
c_tensors.as_mut_ptr(),
self.c_tensor,
indices.c_tensor,
max_norm,
norm_type
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_embedding_renorm_out(
&self,
out: &Tensor,
indices: &Tensor,
max_norm: f64,
norm_type: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding_renorm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
indices.c_tensor,
max_norm,
norm_type
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_embedding_sparse_backward(
grad: &Tensor,
indices: &Tensor,
num_weights: i64,
padding_idx: i64,
scale_grad_by_freq: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_embedding_sparse_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
indices.c_tensor,
num_weights,
padding_idx,
if scale_grad_by_freq { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_empty(size: impl IntList, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_empty_like(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_like(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_empty_like_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_like_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_empty_out(out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_empty_permuted(
size: impl IntList,
physical_layout: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_permuted(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len_i32(),
physical_layout.as_ptr(),
physical_layout.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_empty_permuted_out(
out: &Tensor,
size: impl IntList,
physical_layout: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_permuted_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len_i32(),
physical_layout.as_ptr(),
physical_layout.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_empty_quantized(
size: impl IntList,
qtensor: &Tensor,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_quantized(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len_i32(),
qtensor.c_tensor,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_empty_quantized_out(
out: &Tensor,
size: impl IntList,
qtensor: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_quantized_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len_i32(),
qtensor.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_empty_strided(
size: impl IntList,
stride: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_strided(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_empty_strided_out(
out: &Tensor,
size: impl IntList,
stride: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_empty_strided_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_eq<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eq(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_eq_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eq_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_eq_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eq_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_eq_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eq_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_eq_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eq_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_eq_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eq_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_equal(&self, other: &Tensor) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_equal(self.c_tensor, other.c_tensor));
Ok(return_ != 0)
}
pub fn f_erf(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erf(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_erf_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erf_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_erf_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erf_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_erfc(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erfc(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_erfc_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erfc_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_erfc_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erfc_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_erfinv(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erfinv(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_erfinv_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erfinv_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_erfinv_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_erfinv_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_exp(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exp(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_exp2(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exp2(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_exp2_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exp2_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_exp2_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exp2_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_exp_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exp_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_exp_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exp_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_expand(&self, size: impl IntList, implicit: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_expand(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
if implicit { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_expand_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_expand_as(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_expand_copy(&self, size: impl IntList, implicit: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_expand_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
if implicit { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_expand_copy_out(
&self,
out: &Tensor,
size: impl IntList,
implicit: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_expand_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32(),
if implicit { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_expm1(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_expm1(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_expm1_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_expm1_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_expm1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_expm1_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_exponential(&self, lambd: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exponential(c_tensors.as_mut_ptr(), self.c_tensor, lambd));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_exponential_(&mut self, lambd: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exponential_(c_tensors.as_mut_ptr(), self.c_tensor, lambd));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_exponential_out(&self, out: &Tensor, lambd: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_exponential_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
lambd
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_eye(n: i64, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eye(c_tensors.as_mut_ptr(), n, options.0.c_int(), options.1.c_int()));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_eye_m(n: i64, m: i64, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eye_m(
c_tensors.as_mut_ptr(),
n,
m,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_eye_m_out(out: &Tensor, n: i64, m: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eye_m_out(c_tensors.as_mut_ptr(), out.c_tensor, n, m));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_eye_out(out: &Tensor, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_eye_out(c_tensors.as_mut_ptr(), out.c_tensor, n));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fake_quantize_per_channel_affine(
&self,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fake_quantize_per_channel_affine(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
axis,
quant_min,
quant_max
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fake_quantize_per_channel_affine_cachemask(
&self,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fake_quantize_per_channel_affine_cachemask(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
axis,
quant_min,
quant_max
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_fake_quantize_per_channel_affine_cachemask_backward(
grad: &Tensor,
mask: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fake_quantize_per_channel_affine_cachemask_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
mask.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fake_quantize_per_channel_affine_cachemask_out(
&self,
out0: &Tensor,
out1: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
axis: i64,
quant_min: i64,
quant_max: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fake_quantize_per_channel_affine_cachemask_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
axis,
quant_min,
quant_max
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_fake_quantize_per_tensor_affine(
&self,
scale: f64,
zero_point: i64,
quant_min: i64,
quant_max: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fake_quantize_per_tensor_affine(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale,
zero_point,
quant_min,
quant_max
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fake_quantize_per_tensor_affine_cachemask(
&self,
scale: f64,
zero_point: i64,
quant_min: i64,
quant_max: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fake_quantize_per_tensor_affine_cachemask(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale,
zero_point,
quant_min,
quant_max
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_fake_quantize_per_tensor_affine_cachemask_backward(
grad: &Tensor,
mask: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fake_quantize_per_tensor_affine_cachemask_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
mask.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fake_quantize_per_tensor_affine_cachemask_out(
&self,
out0: &Tensor,
out1: &Tensor,
scale: f64,
zero_point: i64,
quant_min: i64,
quant_max: i64,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fake_quantize_per_tensor_affine_cachemask_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
scale,
zero_point,
quant_min,
quant_max
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_fake_quantize_per_tensor_affine_tensor_qparams(
&self,
scale: &Tensor,
zero_point: &Tensor,
quant_min: i64,
quant_max: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fake_quantize_per_tensor_affine_tensor_qparams(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
quant_min,
quant_max
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fbgemm_linear_fp16_weight(
&self,
packed_weight: &Tensor,
bias: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_linear_fp16_weight(
c_tensors.as_mut_ptr(),
self.c_tensor,
packed_weight.c_tensor,
bias.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fbgemm_linear_fp16_weight_fp32_activation(
&self,
packed_weight: &Tensor,
bias: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_linear_fp16_weight_fp32_activation(
c_tensors.as_mut_ptr(),
self.c_tensor,
packed_weight.c_tensor,
bias.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fbgemm_linear_int8_weight<S: Into<Scalar>>(
&self,
weight: &Tensor,
packed: &Tensor,
col_offsets: &Tensor,
weight_scale: S,
weight_zero_point: S,
bias: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_linear_int8_weight(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
packed.c_tensor,
col_offsets.c_tensor,
weight_scale.into().c_scalar,
weight_zero_point.into().c_scalar,
bias.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fbgemm_linear_int8_weight_fp32_activation<S: Into<Scalar>>(
&self,
weight: &Tensor,
packed: &Tensor,
col_offsets: &Tensor,
weight_scale: S,
weight_zero_point: S,
bias: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_linear_int8_weight_fp32_activation(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
packed.c_tensor,
col_offsets.c_tensor,
weight_scale.into().c_scalar,
weight_zero_point.into().c_scalar,
bias.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fbgemm_pack_gemm_matrix_fp16(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_pack_gemm_matrix_fp16(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fbgemm_pack_quantized_matrix(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_pack_quantized_matrix(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fbgemm_pack_quantized_matrix_kn(&self, k: i64, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fbgemm_pack_quantized_matrix_kn(
c_tensors.as_mut_ptr(),
self.c_tensor,
k,
n
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_feature_alpha_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_feature_alpha_dropout(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_feature_alpha_dropout_(&mut self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_feature_alpha_dropout_(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_feature_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_feature_dropout(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_feature_dropout_(&mut self, p: f64, train: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_feature_dropout_(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_fft(
&self,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_fft2(
&self,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fft2(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_fft2_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fft2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_fft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fft_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_fftfreq(n: i64, d: f64, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fftfreq(
c_tensors.as_mut_ptr(),
n,
d,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_fftfreq_out(out: &Tensor, n: i64, d: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fftfreq_out(c_tensors.as_mut_ptr(), out.c_tensor, n, d));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_fftn(
&self,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fftn(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_fftn_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fftn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_fftshift(&self, dim: impl IntListOption) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_fftshift(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_hfft(
&self,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_hfft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_hfft2(
&self,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_hfft2(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_hfft2_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_hfft2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_hfft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_hfft_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_hfftn(
&self,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_hfftn(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_hfftn_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_hfftn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_ifft(
&self,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_ifft2(
&self,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifft2(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_ifft2_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifft2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_ifft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifft_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_ifftn(
&self,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifftn(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_ifftn_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifftn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_ifftshift(&self, dim: impl IntListOption) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ifftshift(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_ihfft(
&self,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ihfft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_ihfft2(
&self,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ihfft2(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_ihfft2_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ihfft2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_ihfft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ihfft_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_ihfftn(
&self,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ihfftn(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_ihfftn_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_ihfftn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_irfft(
&self,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_irfft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_irfft2(
&self,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_irfft2(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_irfft2_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_irfft2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_irfft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_irfft_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_irfftn(
&self,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_irfftn(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_irfftn_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_irfftn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_rfft(
&self,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_rfft2(
&self,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfft2(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_rfft2_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntList,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfft2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_rfft_out(
&self,
out: &Tensor,
n: impl Into<Option<i64>>,
dim: i64,
norm: &str,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfft_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
dim,
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_rfftfreq(n: i64, d: f64, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfftfreq(
c_tensors.as_mut_ptr(),
n,
d,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_rfftfreq_out(out: &Tensor, n: i64, d: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfftfreq_out(c_tensors.as_mut_ptr(), out.c_tensor, n, d));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_rfftn(
&self,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfftn(
c_tensors.as_mut_ptr(),
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fft_rfftn_out(
&self,
out: &Tensor,
s: impl IntListOption,
dim: impl IntListOption,
norm: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fft_rfftn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
s.as_ptr(),
s.len_i32(),
dim.as_ptr(),
dim.len_i32(),
norm.as_ptr(),
norm.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fill<S: Into<Scalar>>(&self, value: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fill(c_tensors.as_mut_ptr(), self.c_tensor, value.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fill_<S: Into<Scalar>>(&mut self, value: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fill_(c_tensors.as_mut_ptr(), self.c_tensor, value.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fill_diagonal_<S: Into<Scalar>>(
&mut self,
fill_value: S,
wrap: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fill_diagonal_(
c_tensors.as_mut_ptr(),
self.c_tensor,
fill_value.into().c_scalar,
if wrap { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fill_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fill_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fill_tensor(&self, value: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fill_tensor(c_tensors.as_mut_ptr(), self.c_tensor, value.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fill_tensor_(&mut self, value: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fill_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, value.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fill_tensor_out(&self, out: &Tensor, value: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fill_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
value.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fix(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fix(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fix_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fix_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fix_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fix_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_flatten(&self, start_dim: i64, end_dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_flatten(c_tensors.as_mut_ptr(), self.c_tensor, start_dim, end_dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_flatten_dense_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_flatten_dense_tensors(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_flip(&self, dims: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_flip(
c_tensors.as_mut_ptr(),
self.c_tensor,
dims.as_ptr(),
dims.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_flip_out(&self, out: &Tensor, dims: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_flip_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dims.as_ptr(),
dims.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fliplr(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fliplr(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_flipud(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_flipud(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_float_power(&self, exponent: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_float_power_<S: Into<Scalar>>(&mut self, exponent: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power_(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_float_power_scalar<S: Into<Scalar>>(
self_scalar: S,
exponent: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power_scalar(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
exponent.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_float_power_scalar_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
exponent: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
exponent.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_float_power_tensor_(&mut self, exponent: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_float_power_tensor_scalar<S: Into<Scalar>>(
&self,
exponent: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power_tensor_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_float_power_tensor_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
exponent: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power_tensor_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
exponent.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_float_power_tensor_tensor_out(
&self,
out: &Tensor,
exponent: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_float_power_tensor_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
exponent.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_floor(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_floor_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_floor_divide(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_divide(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_floor_divide_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_divide_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_floor_divide_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_divide_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_floor_divide_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_divide_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_floor_divide_scalar_<S: Into<Scalar>>(
&mut self,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_divide_scalar_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_floor_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_floor_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fmax(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmax(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fmax_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmax_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fmin(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmin(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fmin_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmin_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fmod<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmod(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fmod_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmod_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fmod_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmod_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fmod_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmod_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fmod_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmod_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fmod_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fmod_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_frac(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_frac(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_frac_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_frac_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_frac_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_frac_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fractional_max_pool2d(
&self,
kernel_size: impl IntList,
output_size: impl IntList,
random_samples: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fractional_max_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
output_size.as_ptr(),
output_size.len_i32(),
random_samples.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_fractional_max_pool2d_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
output_size: impl IntList,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fractional_max_pool2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
output_size.as_ptr(),
output_size.len_i32(),
indices.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fractional_max_pool2d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
output_size: impl IntList,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fractional_max_pool2d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
output_size.as_ptr(),
output_size.len_i32(),
indices.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fractional_max_pool2d_output(
&self,
output: &Tensor,
indices: &Tensor,
kernel_size: impl IntList,
output_size: impl IntList,
random_samples: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fractional_max_pool2d_output(
c_tensors.as_mut_ptr(),
output.c_tensor,
indices.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
output_size.as_ptr(),
output_size.len_i32(),
random_samples.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_fractional_max_pool3d(
&self,
kernel_size: impl IntList,
output_size: impl IntList,
random_samples: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fractional_max_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
output_size.as_ptr(),
output_size.len_i32(),
random_samples.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_fractional_max_pool3d_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
output_size: impl IntList,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fractional_max_pool3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
output_size.as_ptr(),
output_size.len_i32(),
indices.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fractional_max_pool3d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
output_size: impl IntList,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fractional_max_pool3d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
output_size.as_ptr(),
output_size.len_i32(),
indices.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fractional_max_pool3d_output(
&self,
output: &Tensor,
indices: &Tensor,
kernel_size: impl IntList,
output_size: impl IntList,
random_samples: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_fractional_max_pool3d_output(
c_tensors.as_mut_ptr(),
output.c_tensor,
indices.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
output_size.as_ptr(),
output_size.len_i32(),
random_samples.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_frexp(&self) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_frexp(c_tensors.as_mut_ptr(), self.c_tensor));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_frexp_tensor_out(
&self,
mantissa: &Tensor,
exponent: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_frexp_tensor_out(
c_tensors.as_mut_ptr(),
mantissa.c_tensor,
exponent.c_tensor,
self.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_frobenius_norm(&self, dim: impl IntList, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_frobenius_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_frobenius_norm_out(
&self,
out: &Tensor,
dim: impl IntList,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_frobenius_norm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_from_file(
filename: &str,
shared: bool,
size: impl Into<Option<i64>>,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let size = size.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_from_file(
c_tensors.as_mut_ptr(),
filename.as_ptr(),
filename.len() as i32,
if shared { 1 } else { 0 },
size.unwrap_or(0i64),
size.is_none() as i8,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_from_file_out(
out: &Tensor,
filename: &str,
shared: bool,
size: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let size = size.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_from_file_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
filename.as_ptr(),
filename.len() as i32,
if shared { 1 } else { 0 },
size.unwrap_or(0i64),
size.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_full<S: Into<Scalar>>(
size: impl IntList,
fill_value: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_full(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len_i32(),
fill_value.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_full_like<S: Into<Scalar>>(&self, fill_value: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_full_like(
c_tensors.as_mut_ptr(),
self.c_tensor,
fill_value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_full_like_out<S: Into<Scalar>>(
&self,
out: &Tensor,
fill_value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_full_like_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
fill_value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_full_out<S: Into<Scalar>>(
out: &Tensor,
size: impl IntList,
fill_value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_full_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len_i32(),
fill_value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_fused_moving_avg_obs_fake_quant(
&self,
observer_on: &Tensor,
fake_quant_on: &Tensor,
running_min: &Tensor,
running_max: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
averaging_const: f64,
quant_min: i64,
quant_max: i64,
ch_axis: i64,
per_row_fake_quant: bool,
symmetric_quant: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_fused_moving_avg_obs_fake_quant(
c_tensors.as_mut_ptr(),
self.c_tensor,
observer_on.c_tensor,
fake_quant_on.c_tensor,
running_min.c_tensor,
running_max.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
averaging_const,
quant_min,
quant_max,
ch_axis,
if per_row_fake_quant { 1 } else { 0 },
if symmetric_quant { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gather(
&self,
dim: i64,
index: &Tensor,
sparse_grad: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gather(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
if sparse_grad { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gather_backward(
&self,
grad: &Tensor,
dim: i64,
index: &Tensor,
sparse_grad: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gather_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
if sparse_grad { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gather_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
sparse_grad: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gather_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
if sparse_grad { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gcd(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gcd(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gcd_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gcd_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gcd_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gcd_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ge<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ge(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ge_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ge_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ge_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ge_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ge_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ge_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ge_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ge_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ge_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ge_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gelu(&self, approximate: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gelu(
c_tensors.as_mut_ptr(),
self.c_tensor,
approximate.as_ptr(),
approximate.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gelu_(&mut self, approximate: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gelu_(
c_tensors.as_mut_ptr(),
self.c_tensor,
approximate.as_ptr(),
approximate.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gelu_backward(
&self,
grad_output: &Tensor,
approximate: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gelu_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
approximate.as_ptr(),
approximate.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gelu_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
approximate: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gelu_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
approximate.as_ptr(),
approximate.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gelu_out(&self, out: &Tensor, approximate: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gelu_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
approximate.as_ptr(),
approximate.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_geometric(&self, p: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_geometric(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_geometric_(&mut self, p: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_geometric_(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_geometric_out(&self, out: &Tensor, p: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_geometric_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_geqrf(&self) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_geqrf(c_tensors.as_mut_ptr(), self.c_tensor));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_geqrf_a(&self, a: &Tensor, tau: &Tensor) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_geqrf_a(
c_tensors.as_mut_ptr(),
a.c_tensor,
tau.c_tensor,
self.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_ger(&self, vec2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ger(c_tensors.as_mut_ptr(), self.c_tensor, vec2.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ger_out(&self, out: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ger_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
vec2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_glu(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_glu(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_glu_backward(&self, grad_output: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_glu_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_glu_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_glu_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_glu_backward_jvp(
grad_x: &Tensor,
grad_glu: &Tensor,
x: &Tensor,
dgrad_glu: &Tensor,
dx: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_glu_backward_jvp(
c_tensors.as_mut_ptr(),
grad_x.c_tensor,
grad_glu.c_tensor,
x.c_tensor,
dgrad_glu.c_tensor,
dx.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_glu_backward_jvp_out(
out: &Tensor,
grad_x: &Tensor,
grad_glu: &Tensor,
x: &Tensor,
dgrad_glu: &Tensor,
dx: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_glu_backward_jvp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_x.c_tensor,
grad_glu.c_tensor,
x.c_tensor,
dgrad_glu.c_tensor,
dx.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_glu_jvp(glu: &Tensor, x: &Tensor, dx: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_glu_jvp(
c_tensors.as_mut_ptr(),
glu.c_tensor,
x.c_tensor,
dx.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_glu_jvp_out(
out: &Tensor,
glu: &Tensor,
x: &Tensor,
dx: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_glu_jvp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
glu.c_tensor,
x.c_tensor,
dx.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_glu_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_glu_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_grad(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_grad(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_greater<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_greater_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_greater_equal<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_equal(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_greater_equal_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_equal_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_greater_equal_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_equal_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_greater_equal_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_equal_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_greater_equal_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_equal_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_greater_equal_tensor_out(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_equal_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_greater_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_greater_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_greater_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_greater_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_greater_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_grid_sampler(
&self,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_grid_sampler(
c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_grid_sampler_2d(
&self,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_grid_sampler_2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_grid_sampler_2d_out(
&self,
out: &Tensor,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_grid_sampler_2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_grid_sampler_3d(
&self,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_grid_sampler_3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_grid_sampler_3d_out(
&self,
out: &Tensor,
grid: &Tensor,
interpolation_mode: i64,
padding_mode: i64,
align_corners: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_grid_sampler_3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
grid.c_tensor,
interpolation_mode,
padding_mode,
if align_corners { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_group_norm<T: Borrow<Tensor>>(
&self,
num_groups: i64,
weight: Option<T>,
bias: Option<T>,
eps: f64,
cudnn_enabled: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_group_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
num_groups,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
eps,
if cudnn_enabled { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gru<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_gru(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_gru_cell<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: Option<T>,
b_hh: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gru_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
b_hh.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gru_data<T: Borrow<Tensor>>(
data: &Tensor,
batch_sizes: &Tensor,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_gru_data(
c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_gt<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gt(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gt_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gt_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gt_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gt_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gt_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gt_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gt_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gt_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_gt_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_gt_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hamming_window(
window_length: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hamming_window(
c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hamming_window_out(out: &Tensor, window_length: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hamming_window_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
window_length
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hamming_window_periodic(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hamming_window_periodic(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hamming_window_periodic_alpha(
window_length: i64,
periodic: bool,
alpha: f64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hamming_window_periodic_alpha(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
alpha,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hamming_window_periodic_alpha_beta(
window_length: i64,
periodic: bool,
alpha: f64,
beta: f64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hamming_window_periodic_alpha_beta(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
alpha,
beta,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hamming_window_periodic_alpha_beta_out(
out: &Tensor,
window_length: i64,
periodic: bool,
alpha: f64,
beta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hamming_window_periodic_alpha_beta_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
window_length,
if periodic { 1 } else { 0 },
alpha,
beta
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hamming_window_periodic_alpha_out(
out: &Tensor,
window_length: i64,
periodic: bool,
alpha: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hamming_window_periodic_alpha_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
window_length,
if periodic { 1 } else { 0 },
alpha
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hamming_window_periodic_out(
out: &Tensor,
window_length: i64,
periodic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hamming_window_periodic_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
window_length,
if periodic { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hann_window(window_length: i64, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hann_window(
c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hann_window_out(out: &Tensor, window_length: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hann_window_out(c_tensors.as_mut_ptr(), out.c_tensor, window_length));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hann_window_periodic(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hann_window_periodic(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hann_window_periodic_out(
out: &Tensor,
window_length: i64,
periodic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hann_window_periodic_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
window_length,
if periodic { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardshrink(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardshrink(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardshrink_backward<S: Into<Scalar>>(
&self,
grad_out: &Tensor,
lambd: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardshrink_backward(
c_tensors.as_mut_ptr(),
grad_out.c_tensor,
self.c_tensor,
lambd.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardshrink_backward_grad_input<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_out: &Tensor,
lambd: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardshrink_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_out.c_tensor,
self.c_tensor,
lambd.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardshrink_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardshrink_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardsigmoid(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardsigmoid(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardsigmoid_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardsigmoid_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardsigmoid_backward(&self, grad_output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardsigmoid_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardsigmoid_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardsigmoid_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardsigmoid_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardsigmoid_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardswish(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardswish(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardswish_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardswish_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardswish_backward(&self, grad_output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardswish_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardswish_backward_out(
&self,
out: &Tensor,
grad_output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardswish_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardswish_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardswish_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardtanh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardtanh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardtanh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardtanh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardtanh_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
min_val: S,
max_val: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardtanh_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
min_val.into().c_scalar,
max_val.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardtanh_backward_grad_input<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
min_val: S,
max_val: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardtanh_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
min_val.into().c_scalar,
max_val.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hardtanh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hardtanh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_heaviside(&self, values: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_heaviside(c_tensors.as_mut_ptr(), self.c_tensor, values.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_heaviside_(&mut self, values: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_heaviside_(c_tensors.as_mut_ptr(), self.c_tensor, values.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_heaviside_out(&self, out: &Tensor, values: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_heaviside_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
values.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hinge_embedding_loss(
&self,
target: &Tensor,
margin: f64,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hinge_embedding_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
margin,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_histc(&self, bins: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_histc(c_tensors.as_mut_ptr(), self.c_tensor, bins));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_histc_out(&self, out: &Tensor, bins: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_histc_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor, bins));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_histogram<T: Borrow<Tensor>>(
&self,
bins: &Tensor,
weight: Option<T>,
density: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_histogram(
c_tensors.as_mut_ptr(),
self.c_tensor,
bins.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if density { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_histogram_bin_ct<T: Borrow<Tensor>>(
&self,
bins: i64,
range: impl DoubleList,
weight: Option<T>,
density: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_histogram_bin_ct(
c_tensors.as_mut_ptr(),
self.c_tensor,
bins,
range.as_ptr(),
range.len_i32(),
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if density { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_histogram_bin_ct_out<T: Borrow<Tensor>>(
&self,
hist: &Tensor,
bin_edges: &Tensor,
bins: i64,
range: impl DoubleList,
weight: Option<T>,
density: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_histogram_bin_ct_out(
c_tensors.as_mut_ptr(),
hist.c_tensor,
bin_edges.c_tensor,
self.c_tensor,
bins,
range.as_ptr(),
range.len_i32(),
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if density { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_histogram_bins_tensor_out<T: Borrow<Tensor>>(
&self,
hist: &Tensor,
bin_edges: &Tensor,
bins: &Tensor,
weight: Option<T>,
density: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_histogram_bins_tensor_out(
c_tensors.as_mut_ptr(),
hist.c_tensor,
bin_edges.c_tensor,
self.c_tensor,
bins.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if density { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_hsplit(&self, sections: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_hsplit(self.c_tensor, sections));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_hsplit_array(&self, indices: impl IntList) -> Result<Vec<Tensor>, TchError> {
let c_tensors =
unsafe_torch_err!(atg_hsplit_array(self.c_tensor, indices.as_ptr(), indices.len_i32()));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_hspmm(mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hspmm(c_tensors.as_mut_ptr(), mat1.c_tensor, mat2.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hspmm_out(out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hspmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hstack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hstack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hstack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hstack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_huber_loss(
&self,
target: &Tensor,
reduction: crate::Reduction,
delta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_huber_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction.to_int(),
delta
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_huber_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
delta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_huber_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
delta
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_huber_loss_backward_out(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
delta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_huber_loss_backward_out(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
delta
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_huber_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
delta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_huber_loss_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
delta
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hypot(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hypot(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hypot_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hypot_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_hypot_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_hypot_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_i0(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_i0(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_i0_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_i0_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_i0_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_i0_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_igamma(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_igamma(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_igamma_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_igamma_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_igamma_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_igamma_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_igammac(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_igammac(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_igammac_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_igammac_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_igammac_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_igammac_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_im2col(
&self,
kernel_size: impl IntList,
dilation: impl IntList,
padding: impl IntList,
stride: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_im2col(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_im2col_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
dilation: impl IntList,
padding: impl IntList,
stride: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_im2col_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_imag(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_imag(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index<T: Borrow<Tensor>>(&self, indices: &[Option<T>]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_add(
&self,
dim: i64,
index: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_add(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_add_(
&mut self,
dim: i64,
index: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_add_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_add_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_add_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_copy(
&self,
dim: i64,
index: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_copy_(
&mut self,
dim: i64,
index: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_copy_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_copy_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_fill<S: Into<Scalar>>(
&self,
dim: i64,
index: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_fill(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_fill_<S: Into<Scalar>>(
&mut self,
dim: i64,
index: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_fill_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_fill_int_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_fill_int_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_fill_int_tensor(
&self,
dim: i64,
index: &Tensor,
value: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_fill_int_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_fill_int_tensor_(
&mut self,
dim: i64,
index: &Tensor,
value: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_fill_int_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_fill_int_tensor_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
value: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_fill_int_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
value.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_put<T: Borrow<Tensor>>(
&self,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_put(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32,
values.c_tensor,
if accumulate { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_put_<T: Borrow<Tensor>>(
&mut self,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_put_(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32,
values.c_tensor,
if accumulate { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_put_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
indices: &[Option<T>],
values: &Tensor,
accumulate: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_put_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32,
values.c_tensor,
if accumulate { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_reduce(
&self,
dim: i64,
index: &Tensor,
source: &Tensor,
reduce: &str,
include_self: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_reduce(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor,
reduce.as_ptr(),
reduce.len() as i32,
if include_self { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_reduce_(
&mut self,
dim: i64,
index: &Tensor,
source: &Tensor,
reduce: &str,
include_self: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_reduce_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor,
reduce.as_ptr(),
reduce.len() as i32,
if include_self { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_reduce_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
source: &Tensor,
reduce: &str,
include_self: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_reduce_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
source.c_tensor,
reduce.as_ptr(),
reduce.len() as i32,
if include_self { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_select(&self, dim: i64, index: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_select(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_select_backward(
grad: &Tensor,
self_sizes: impl IntList,
dim: i64,
index: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_select_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self_sizes.as_ptr(),
self_sizes.len_i32(),
dim,
index.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_select_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_select_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_index_tensor_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
indices: &[Option<T>],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_index_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
ptr_list_opt(indices).as_ptr(),
indices.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_indices(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_indices(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_indices_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_indices_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_indices_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_indices_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_infinitely_differentiable_gelu_backward(
&self,
grad: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_infinitely_differentiable_gelu_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_inner(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_inner(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_inner_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_inner_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_instance_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
use_input_stats: bool,
momentum: f64,
eps: f64,
cudnn_enabled: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_instance_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if use_input_stats { 1 } else { 0 },
momentum,
eps,
if cudnn_enabled { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_int_repr(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_int_repr(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_int_repr_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_int_repr_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_inverse(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_inverse(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_inverse_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_inverse_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_is_coalesced(&self) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_coalesced(self.c_tensor));
Ok(return_ != 0)
}
pub fn f_is_complex(&self) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_complex(self.c_tensor));
Ok(return_ != 0)
}
pub fn f_is_conj(&self) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_conj(self.c_tensor));
Ok(return_ != 0)
}
pub fn f_is_distributed(&self) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_distributed(self.c_tensor));
Ok(return_ != 0)
}
pub fn f_is_floating_point(&self) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_floating_point(self.c_tensor));
Ok(return_ != 0)
}
pub fn f_is_inference(&self) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_inference(self.c_tensor));
Ok(return_ != 0)
}
pub fn f_is_leaf(&self) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_leaf(self.c_tensor));
Ok(return_ != 0)
}
pub fn f_is_neg(&self) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_neg(self.c_tensor));
Ok(return_ != 0)
}
pub fn f_is_nonzero(&self) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_nonzero(self.c_tensor));
Ok(return_ != 0)
}
pub fn f_is_pinned(&self, device: Device) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_pinned(self.c_tensor, device.c_int()));
Ok(return_ != 0)
}
pub fn f_is_same_size(&self, other: &Tensor) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_same_size(self.c_tensor, other.c_tensor));
Ok(return_ != 0)
}
pub fn f_is_set_to(&self, tensor: &Tensor) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_set_to(self.c_tensor, tensor.c_tensor));
Ok(return_ != 0)
}
pub fn f_is_signed(&self) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_signed(self.c_tensor));
Ok(return_ != 0)
}
pub fn f_is_vulkan_available() -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_is_vulkan_available());
Ok(return_ != 0)
}
pub fn f_isclose(
&self,
other: &Tensor,
rtol: f64,
atol: f64,
equal_nan: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isclose(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
rtol,
atol,
if equal_nan { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isfinite(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isfinite(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isin(
elements: &Tensor,
test_elements: &Tensor,
assume_unique: bool,
invert: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isin(
c_tensors.as_mut_ptr(),
elements.c_tensor,
test_elements.c_tensor,
if assume_unique { 1 } else { 0 },
if invert { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isin_scalar_tensor<S: Into<Scalar>>(
element: S,
test_elements: &Tensor,
assume_unique: bool,
invert: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isin_scalar_tensor(
c_tensors.as_mut_ptr(),
element.into().c_scalar,
test_elements.c_tensor,
if assume_unique { 1 } else { 0 },
if invert { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isin_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
element: S,
test_elements: &Tensor,
assume_unique: bool,
invert: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isin_scalar_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
element.into().c_scalar,
test_elements.c_tensor,
if assume_unique { 1 } else { 0 },
if invert { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isin_tensor_scalar<S: Into<Scalar>>(
elements: &Tensor,
test_element: S,
assume_unique: bool,
invert: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isin_tensor_scalar(
c_tensors.as_mut_ptr(),
elements.c_tensor,
test_element.into().c_scalar,
if assume_unique { 1 } else { 0 },
if invert { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isin_tensor_scalar_out<S: Into<Scalar>>(
out: &Tensor,
elements: &Tensor,
test_element: S,
assume_unique: bool,
invert: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isin_tensor_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
elements.c_tensor,
test_element.into().c_scalar,
if assume_unique { 1 } else { 0 },
if invert { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isin_tensor_tensor_out(
out: &Tensor,
elements: &Tensor,
test_elements: &Tensor,
assume_unique: bool,
invert: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isin_tensor_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
elements.c_tensor,
test_elements.c_tensor,
if assume_unique { 1 } else { 0 },
if invert { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isinf(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isinf(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isinf_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isinf_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isnan(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isnan(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isnan_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isnan_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isneginf(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isneginf(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isneginf_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isneginf_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isposinf(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isposinf(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isposinf_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isposinf_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_isreal(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_isreal(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_istft<T: Borrow<Tensor>>(
&self,
n_fft: i64,
hop_length: impl Into<Option<i64>>,
win_length: impl Into<Option<i64>>,
window: Option<T>,
center: bool,
normalized: bool,
onesided: bool,
length: impl Into<Option<i64>>,
return_complex: bool,
) -> Result<Tensor, TchError> {
let hop_length = hop_length.into();
let win_length = win_length.into();
let length = length.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_istft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n_fft,
hop_length.unwrap_or(0i64),
hop_length.is_none() as i8,
win_length.unwrap_or(0i64),
win_length.is_none() as i8,
window.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if center { 1 } else { 0 },
if normalized { 1 } else { 0 },
if onesided { 1 } else { 0 },
length.unwrap_or(0i64),
length.is_none() as i8,
if return_complex { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_kaiser_window(
window_length: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kaiser_window(
c_tensors.as_mut_ptr(),
window_length,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_kaiser_window_beta(
window_length: i64,
periodic: bool,
beta: f64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kaiser_window_beta(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
beta,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_kaiser_window_beta_out(
out: &Tensor,
window_length: i64,
periodic: bool,
beta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kaiser_window_beta_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
window_length,
if periodic { 1 } else { 0 },
beta
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_kaiser_window_out(out: &Tensor, window_length: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kaiser_window_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
window_length
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_kaiser_window_periodic(
window_length: i64,
periodic: bool,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kaiser_window_periodic(
c_tensors.as_mut_ptr(),
window_length,
if periodic { 1 } else { 0 },
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_kaiser_window_periodic_out(
out: &Tensor,
window_length: i64,
periodic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kaiser_window_periodic_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
window_length,
if periodic { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_kl_div(
&self,
target: &Tensor,
reduction: crate::Reduction,
log_target: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kl_div(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction.to_int(),
if log_target { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_kron(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kron(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_kron_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_kron_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_kthvalue(
&self,
k: i64,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_kthvalue(
c_tensors.as_mut_ptr(),
self.c_tensor,
k,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_kthvalue_values(
&self,
values: &Tensor,
indices: &Tensor,
k: i64,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_kthvalue_values(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
k,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_l1_loss(
&self,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_l1_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_layer_norm<T: Borrow<Tensor>>(
&self,
normalized_shape: impl IntList,
weight: Option<T>,
bias: Option<T>,
eps: f64,
cudnn_enable: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_layer_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
normalized_shape.as_ptr(),
normalized_shape.len_i32(),
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
eps,
if cudnn_enable { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lcm(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lcm(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lcm_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lcm_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lcm_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lcm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ldexp(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ldexp(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ldexp_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ldexp_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ldexp_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ldexp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_le<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_le(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_le_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_le_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_le_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_le_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_le_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_le_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_le_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_le_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_le_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_le_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_leaky_relu(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_leaky_relu(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_leaky_relu_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_leaky_relu_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_leaky_relu_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
negative_slope: S,
self_is_result: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_leaky_relu_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
negative_slope.into().c_scalar,
if self_is_result { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_leaky_relu_backward_grad_input<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
negative_slope: S,
self_is_result: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_leaky_relu_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
negative_slope.into().c_scalar,
if self_is_result { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_leaky_relu_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_leaky_relu_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lerp<S: Into<Scalar>>(&self, end: &Tensor, weight: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lerp(
c_tensors.as_mut_ptr(),
self.c_tensor,
end.c_tensor,
weight.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lerp_<S: Into<Scalar>>(
&mut self,
end: &Tensor,
weight: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lerp_(
c_tensors.as_mut_ptr(),
self.c_tensor,
end.c_tensor,
weight.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lerp_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
end: &Tensor,
weight: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lerp_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
end.c_tensor,
weight.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lerp_tensor(&self, end: &Tensor, weight: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lerp_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
end.c_tensor,
weight.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lerp_tensor_(&mut self, end: &Tensor, weight: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lerp_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
end.c_tensor,
weight.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lerp_tensor_out(
&self,
out: &Tensor,
end: &Tensor,
weight: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lerp_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
end.c_tensor,
weight.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_less<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_less_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_less_equal<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_equal(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_less_equal_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_equal_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_less_equal_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_equal_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_less_equal_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_equal_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_less_equal_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_equal_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_less_equal_tensor_out(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_equal_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_less_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_less_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_less_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_less_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_less_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lgamma(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lgamma(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lgamma_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lgamma_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lgamma_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lgamma_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lift(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lift(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lift_fresh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lift_fresh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lift_fresh_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lift_fresh_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lift_fresh_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lift_fresh_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lift_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lift_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_cholesky(&self, upper: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cholesky(
c_tensors.as_mut_ptr(),
self.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_cholesky_ex(
&self,
upper: bool,
check_errors: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_cholesky_ex(
c_tensors.as_mut_ptr(),
self.c_tensor,
if upper { 1 } else { 0 },
if check_errors { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_cholesky_ex_l(
&self,
l: &Tensor,
info: &Tensor,
upper: bool,
check_errors: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_cholesky_ex_l(
c_tensors.as_mut_ptr(),
l.c_tensor,
info.c_tensor,
self.c_tensor,
if upper { 1 } else { 0 },
if check_errors { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_cholesky_out(&self, out: &Tensor, upper: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cholesky_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
if upper { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_cond<S: Into<Scalar>>(&self, p: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cond(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_cond_out<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cond_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_cond_p_str(&self, p: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cond_p_str(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.as_ptr(),
p.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_cond_p_str_out(&self, out: &Tensor, p: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cond_p_str_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p.as_ptr(),
p.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_cross(&self, other: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cross(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_cross_out(
&self,
out: &Tensor,
other: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_cross_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_det(a: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_det(c_tensors.as_mut_ptr(), a.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_det_out(out: &Tensor, a: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_det_out(c_tensors.as_mut_ptr(), out.c_tensor, a.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_diagonal(
a: &Tensor,
offset: i64,
dim1: i64,
dim2: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_diagonal(
c_tensors.as_mut_ptr(),
a.c_tensor,
offset,
dim1,
dim2
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_eig(&self) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_eig(c_tensors.as_mut_ptr(), self.c_tensor));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_eig_out(
&self,
eigenvalues: &Tensor,
eigenvectors: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_eig_out(
c_tensors.as_mut_ptr(),
eigenvalues.c_tensor,
eigenvectors.c_tensor,
self.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_eigh(&self, uplo: &str) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_eigh(
c_tensors.as_mut_ptr(),
self.c_tensor,
uplo.as_ptr(),
uplo.len() as i32
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_eigh_eigvals(
&self,
eigvals: &Tensor,
eigvecs: &Tensor,
uplo: &str,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_eigh_eigvals(
c_tensors.as_mut_ptr(),
eigvals.c_tensor,
eigvecs.c_tensor,
self.c_tensor,
uplo.as_ptr(),
uplo.len() as i32
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_eigvals(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_eigvals(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_eigvals_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_eigvals_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_eigvalsh(&self, uplo: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_eigvalsh(
c_tensors.as_mut_ptr(),
self.c_tensor,
uplo.as_ptr(),
uplo.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_eigvalsh_out(&self, out: &Tensor, uplo: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_eigvalsh_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
uplo.as_ptr(),
uplo.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_householder_product(&self, tau: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_householder_product(
c_tensors.as_mut_ptr(),
self.c_tensor,
tau.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_householder_product_out(
&self,
out: &Tensor,
tau: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_householder_product_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
tau.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_inv(a: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_inv(c_tensors.as_mut_ptr(), a.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_inv_ex(a: &Tensor, check_errors: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_inv_ex(
c_tensors.as_mut_ptr(),
a.c_tensor,
if check_errors { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_inv_ex_inverse(
inverse: &Tensor,
info: &Tensor,
a: &Tensor,
check_errors: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_inv_ex_inverse(
c_tensors.as_mut_ptr(),
inverse.c_tensor,
info.c_tensor,
a.c_tensor,
if check_errors { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_inv_out(out: &Tensor, a: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_inv_out(c_tensors.as_mut_ptr(), out.c_tensor, a.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_ldl_factor(&self, hermitian: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_ldl_factor(
c_tensors.as_mut_ptr(),
self.c_tensor,
if hermitian { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_ldl_factor_ex(
&self,
hermitian: bool,
check_errors: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_linalg_ldl_factor_ex(
c_tensors.as_mut_ptr(),
self.c_tensor,
if hermitian { 1 } else { 0 },
if check_errors { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_linalg_ldl_factor_ex_out(
&self,
ld: &Tensor,
pivots: &Tensor,
info: &Tensor,
hermitian: bool,
check_errors: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_linalg_ldl_factor_ex_out(
c_tensors.as_mut_ptr(),
ld.c_tensor,
pivots.c_tensor,
info.c_tensor,
self.c_tensor,
if hermitian { 1 } else { 0 },
if check_errors { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_linalg_ldl_factor_out(
&self,
ld: &Tensor,
pivots: &Tensor,
hermitian: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_ldl_factor_out(
c_tensors.as_mut_ptr(),
ld.c_tensor,
pivots.c_tensor,
self.c_tensor,
if hermitian { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_ldl_solve(
ld: &Tensor,
pivots: &Tensor,
b: &Tensor,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_ldl_solve(
c_tensors.as_mut_ptr(),
ld.c_tensor,
pivots.c_tensor,
b.c_tensor,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_ldl_solve_out(
out: &Tensor,
ld: &Tensor,
pivots: &Tensor,
b: &Tensor,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_ldl_solve_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ld.c_tensor,
pivots.c_tensor,
b.c_tensor,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_lstsq(
&self,
b: &Tensor,
rcond: impl Into<Option<f64>>,
driver: &str,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let rcond = rcond.into();
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg_linalg_lstsq(
c_tensors.as_mut_ptr(),
self.c_tensor,
b.c_tensor,
rcond.unwrap_or(std::f64::NAN),
rcond.is_none() as i8,
driver.as_ptr(),
driver.len() as i32
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_linalg_lstsq_out(
&self,
solution: &Tensor,
residuals: &Tensor,
rank: &Tensor,
singular_values: &Tensor,
b: &Tensor,
rcond: impl Into<Option<f64>>,
driver: &str,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let rcond = rcond.into();
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg_linalg_lstsq_out(
c_tensors.as_mut_ptr(),
solution.c_tensor,
residuals.c_tensor,
rank.c_tensor,
singular_values.c_tensor,
self.c_tensor,
b.c_tensor,
rcond.unwrap_or(std::f64::NAN),
rcond.is_none() as i8,
driver.as_ptr(),
driver.len() as i32
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_linalg_lu(a: &Tensor, pivot: bool) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_linalg_lu(
c_tensors.as_mut_ptr(),
a.c_tensor,
if pivot { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_linalg_lu_factor(a: &Tensor, pivot: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_lu_factor(
c_tensors.as_mut_ptr(),
a.c_tensor,
if pivot { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_lu_factor_ex(
a: &Tensor,
pivot: bool,
check_errors: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_linalg_lu_factor_ex(
c_tensors.as_mut_ptr(),
a.c_tensor,
if pivot { 1 } else { 0 },
if check_errors { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_linalg_lu_factor_ex_out(
lu: &Tensor,
pivots: &Tensor,
info: &Tensor,
a: &Tensor,
pivot: bool,
check_errors: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_linalg_lu_factor_ex_out(
c_tensors.as_mut_ptr(),
lu.c_tensor,
pivots.c_tensor,
info.c_tensor,
a.c_tensor,
if pivot { 1 } else { 0 },
if check_errors { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_linalg_lu_factor_out(
lu: &Tensor,
pivots: &Tensor,
a: &Tensor,
pivot: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_lu_factor_out(
c_tensors.as_mut_ptr(),
lu.c_tensor,
pivots.c_tensor,
a.c_tensor,
if pivot { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_lu_out(
p: &Tensor,
l: &Tensor,
u: &Tensor,
a: &Tensor,
pivot: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_linalg_lu_out(
c_tensors.as_mut_ptr(),
p.c_tensor,
l.c_tensor,
u.c_tensor,
a.c_tensor,
if pivot { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_linalg_lu_solve(
lu: &Tensor,
pivots: &Tensor,
b: &Tensor,
left: bool,
adjoint: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_lu_solve(
c_tensors.as_mut_ptr(),
lu.c_tensor,
pivots.c_tensor,
b.c_tensor,
if left { 1 } else { 0 },
if adjoint { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_lu_solve_out(
out: &Tensor,
lu: &Tensor,
pivots: &Tensor,
b: &Tensor,
left: bool,
adjoint: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_lu_solve_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
lu.c_tensor,
pivots.c_tensor,
b.c_tensor,
if left { 1 } else { 0 },
if adjoint { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matmul(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matmul(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matmul_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matmul_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matrix_exp(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_exp(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matrix_exp_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_exp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matrix_power(&self, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_power(c_tensors.as_mut_ptr(), self.c_tensor, n));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matrix_power_out(&self, out: &Tensor, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_power_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matrix_rank(&self, tol: f64, hermitian: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_rank(
c_tensors.as_mut_ptr(),
self.c_tensor,
tol,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matrix_rank_atol_rtol_float(
&self,
atol: impl Into<Option<f64>>,
rtol: impl Into<Option<f64>>,
hermitian: bool,
) -> Result<Tensor, TchError> {
let atol = atol.into();
let rtol = rtol.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_rank_atol_rtol_float(
c_tensors.as_mut_ptr(),
self.c_tensor,
atol.unwrap_or(std::f64::NAN),
atol.is_none() as i8,
rtol.unwrap_or(std::f64::NAN),
rtol.is_none() as i8,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matrix_rank_atol_rtol_float_out(
&self,
out: &Tensor,
atol: impl Into<Option<f64>>,
rtol: impl Into<Option<f64>>,
hermitian: bool,
) -> Result<Tensor, TchError> {
let atol = atol.into();
let rtol = rtol.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_rank_atol_rtol_float_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
atol.unwrap_or(std::f64::NAN),
atol.is_none() as i8,
rtol.unwrap_or(std::f64::NAN),
rtol.is_none() as i8,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matrix_rank_atol_rtol_tensor<T: Borrow<Tensor>>(
&self,
atol: Option<T>,
rtol: Option<T>,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_rank_atol_rtol_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
atol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
rtol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matrix_rank_atol_rtol_tensor_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
atol: Option<T>,
rtol: Option<T>,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_rank_atol_rtol_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
atol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
rtol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matrix_rank_out(
&self,
out: &Tensor,
tol: f64,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_rank_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
tol,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matrix_rank_out_tol_tensor(
&self,
out: &Tensor,
tol: &Tensor,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_rank_out_tol_tensor(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
tol.c_tensor,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_matrix_rank_tol_tensor(
&self,
tol: &Tensor,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_matrix_rank_tol_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
tol.c_tensor,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_multi_dot<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_multi_dot(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_multi_dot_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_multi_dot_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_norm<S: Into<Scalar>>(
&self,
ord: S,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
ord.into().c_scalar,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_norm_ord_str(
&self,
ord: &str,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_norm_ord_str(
c_tensors.as_mut_ptr(),
self.c_tensor,
ord.as_ptr(),
ord.len() as i32,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_norm_ord_str_out(
&self,
out: &Tensor,
ord: &str,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_norm_ord_str_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
ord.as_ptr(),
ord.len() as i32,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_norm_out<S: Into<Scalar>>(
&self,
out: &Tensor,
ord: S,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_norm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
ord.into().c_scalar,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_pinv(&self, rcond: f64, hermitian: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_pinv(
c_tensors.as_mut_ptr(),
self.c_tensor,
rcond,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_pinv_atol_rtol_float(
&self,
atol: impl Into<Option<f64>>,
rtol: impl Into<Option<f64>>,
hermitian: bool,
) -> Result<Tensor, TchError> {
let atol = atol.into();
let rtol = rtol.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_pinv_atol_rtol_float(
c_tensors.as_mut_ptr(),
self.c_tensor,
atol.unwrap_or(std::f64::NAN),
atol.is_none() as i8,
rtol.unwrap_or(std::f64::NAN),
rtol.is_none() as i8,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_pinv_atol_rtol_float_out(
&self,
out: &Tensor,
atol: impl Into<Option<f64>>,
rtol: impl Into<Option<f64>>,
hermitian: bool,
) -> Result<Tensor, TchError> {
let atol = atol.into();
let rtol = rtol.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_pinv_atol_rtol_float_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
atol.unwrap_or(std::f64::NAN),
atol.is_none() as i8,
rtol.unwrap_or(std::f64::NAN),
rtol.is_none() as i8,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_pinv_atol_rtol_tensor<T: Borrow<Tensor>>(
&self,
atol: Option<T>,
rtol: Option<T>,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_pinv_atol_rtol_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
atol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
rtol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_pinv_atol_rtol_tensor_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
atol: Option<T>,
rtol: Option<T>,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_pinv_atol_rtol_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
atol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
rtol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_pinv_out(
&self,
out: &Tensor,
rcond: f64,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_pinv_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
rcond,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_pinv_out_rcond_tensor(
&self,
out: &Tensor,
rcond: &Tensor,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_pinv_out_rcond_tensor(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
rcond.c_tensor,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_pinv_rcond_tensor(
&self,
rcond: &Tensor,
hermitian: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_pinv_rcond_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
rcond.c_tensor,
if hermitian { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_qr(a: &Tensor, mode: &str) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_qr(
c_tensors.as_mut_ptr(),
a.c_tensor,
mode.as_ptr(),
mode.len() as i32
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_qr_out(
q: &Tensor,
r: &Tensor,
a: &Tensor,
mode: &str,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_qr_out(
c_tensors.as_mut_ptr(),
q.c_tensor,
r.c_tensor,
a.c_tensor,
mode.as_ptr(),
mode.len() as i32
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_slogdet(a: &Tensor) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_slogdet(c_tensors.as_mut_ptr(), a.c_tensor));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_slogdet_out(
sign: &Tensor,
logabsdet: &Tensor,
a: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_slogdet_out(
c_tensors.as_mut_ptr(),
sign.c_tensor,
logabsdet.c_tensor,
a.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_solve(a: &Tensor, b: &Tensor, left: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_solve(
c_tensors.as_mut_ptr(),
a.c_tensor,
b.c_tensor,
if left { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_solve_ex(
a: &Tensor,
b: &Tensor,
left: bool,
check_errors: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_solve_ex(
c_tensors.as_mut_ptr(),
a.c_tensor,
b.c_tensor,
if left { 1 } else { 0 },
if check_errors { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_solve_ex_out(
result: &Tensor,
info: &Tensor,
a: &Tensor,
b: &Tensor,
left: bool,
check_errors: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_linalg_solve_ex_out(
c_tensors.as_mut_ptr(),
result.c_tensor,
info.c_tensor,
a.c_tensor,
b.c_tensor,
if left { 1 } else { 0 },
if check_errors { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_linalg_solve_out(
out: &Tensor,
a: &Tensor,
b: &Tensor,
left: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_solve_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
a.c_tensor,
b.c_tensor,
if left { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_solve_triangular(
&self,
b: &Tensor,
upper: bool,
left: bool,
unitriangular: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_solve_triangular(
c_tensors.as_mut_ptr(),
self.c_tensor,
b.c_tensor,
if upper { 1 } else { 0 },
if left { 1 } else { 0 },
if unitriangular { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_solve_triangular_out(
&self,
out: &Tensor,
b: &Tensor,
upper: bool,
left: bool,
unitriangular: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_solve_triangular_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
b.c_tensor,
if upper { 1 } else { 0 },
if left { 1 } else { 0 },
if unitriangular { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_svd(
a: &Tensor,
full_matrices: bool,
driver: &str,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_linalg_svd(
c_tensors.as_mut_ptr(),
a.c_tensor,
if full_matrices { 1 } else { 0 },
driver.as_ptr(),
driver.len() as i32
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_linalg_svd_u(
u: &Tensor,
s: &Tensor,
vh: &Tensor,
a: &Tensor,
full_matrices: bool,
driver: &str,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_linalg_svd_u(
c_tensors.as_mut_ptr(),
u.c_tensor,
s.c_tensor,
vh.c_tensor,
a.c_tensor,
if full_matrices { 1 } else { 0 },
driver.as_ptr(),
driver.len() as i32
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_linalg_svdvals(a: &Tensor, driver: &str) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_svdvals(
c_tensors.as_mut_ptr(),
a.c_tensor,
driver.as_ptr(),
driver.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_svdvals_out(
out: &Tensor,
a: &Tensor,
driver: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_svdvals_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
a.c_tensor,
driver.as_ptr(),
driver.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_tensorinv(&self, ind: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_tensorinv(c_tensors.as_mut_ptr(), self.c_tensor, ind));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_tensorinv_out(&self, out: &Tensor, ind: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_tensorinv_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
ind
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_tensorsolve(
&self,
other: &Tensor,
dims: impl IntListOption,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_tensorsolve(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
dims.as_ptr(),
dims.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_tensorsolve_out(
&self,
out: &Tensor,
other: &Tensor,
dims: impl IntListOption,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_tensorsolve_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor,
dims.as_ptr(),
dims.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_vander(x: &Tensor, n: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_vander(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_vecdot(x: &Tensor, y: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_vecdot(c_tensors.as_mut_ptr(), x.c_tensor, y.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linalg_vecdot_out(
out: &Tensor,
x: &Tensor,
y: &Tensor,
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linalg_vecdot_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
y.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linear<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linear(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linear_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linear_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linspace<S: Into<Scalar>>(
start: S,
end: S,
steps: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linspace(
c_tensors.as_mut_ptr(),
start.into().c_scalar,
end.into().c_scalar,
steps,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_linspace_out<S: Into<Scalar>>(
out: &Tensor,
start: S,
end: S,
steps: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_linspace_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
start.into().c_scalar,
end.into().c_scalar,
steps
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log10(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log10(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log10_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log10_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log10_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log10_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log1p(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log1p(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log1p_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log1p_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log1p_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log1p_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log2(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log2(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log2_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log2_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log2_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log2_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log_normal(&self, mean: f64, std: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_normal(c_tensors.as_mut_ptr(), self.c_tensor, mean, std));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log_normal_(&mut self, mean: f64, std: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_normal_(c_tensors.as_mut_ptr(), self.c_tensor, mean, std));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log_normal_out(&self, out: &Tensor, mean: f64, std: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_normal_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mean,
std
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log_sigmoid(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_sigmoid(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log_sigmoid_backward(
&self,
grad_output: &Tensor,
buffer: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_sigmoid_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
buffer.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log_sigmoid_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
buffer: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_sigmoid_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
buffer.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log_sigmoid_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_sigmoid_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log_softmax(
&self,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_log_softmax_int_out(
&self,
out: &Tensor,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_log_softmax_int_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logaddexp(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logaddexp(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logaddexp2(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logaddexp2(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logaddexp2_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logaddexp2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logaddexp_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logaddexp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logcumsumexp(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logcumsumexp(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logcumsumexp_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logcumsumexp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logdet(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logdet(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logical_and(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_and(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logical_and_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_and_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logical_and_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_and_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logical_not(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_not(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logical_not_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_not_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logical_not_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_not_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logical_or(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_or(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logical_or_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_or_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logical_or_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_or_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logical_xor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_xor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logical_xor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_xor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logical_xor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logical_xor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logit(&self, eps: impl Into<Option<f64>>) -> Result<Tensor, TchError> {
let eps = eps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logit(
c_tensors.as_mut_ptr(),
self.c_tensor,
eps.unwrap_or(std::f64::NAN),
eps.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logit_(&mut self, eps: impl Into<Option<f64>>) -> Result<Tensor, TchError> {
let eps = eps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logit_(
c_tensors.as_mut_ptr(),
self.c_tensor,
eps.unwrap_or(std::f64::NAN),
eps.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logit_backward(
&self,
grad_output: &Tensor,
eps: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let eps = eps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logit_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
eps.unwrap_or(std::f64::NAN),
eps.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logit_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
eps: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let eps = eps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logit_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
eps.unwrap_or(std::f64::NAN),
eps.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logit_out(
&self,
out: &Tensor,
eps: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let eps = eps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logit_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
eps.unwrap_or(std::f64::NAN),
eps.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logspace<S: Into<Scalar>>(
start: S,
end: S,
steps: i64,
base: f64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logspace(
c_tensors.as_mut_ptr(),
start.into().c_scalar,
end.into().c_scalar,
steps,
base,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logspace_out<S: Into<Scalar>>(
out: &Tensor,
start: S,
end: S,
steps: i64,
base: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logspace_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
start.into().c_scalar,
end.into().c_scalar,
steps,
base
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logsumexp(&self, dim: impl IntList, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logsumexp(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_logsumexp_out(
&self,
out: &Tensor,
dim: impl IntList,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_logsumexp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lstm<T: Borrow<Tensor>>(
&self,
hx: &[T],
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_lstm(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(hx).as_ptr(),
hx.len() as i32,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_lstm_cell<T: Borrow<Tensor>>(
&self,
hx: &[T],
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: Option<T>,
b_hh: Option<T>,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_lstm_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(hx).as_ptr(),
hx.len() as i32,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
b_hh.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_lstm_data<T: Borrow<Tensor>>(
data: &Tensor,
batch_sizes: &Tensor,
hx: &[T],
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_lstm_data(
c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
ptr_list(hx).as_ptr(),
hx.len() as i32,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_lstm_mps_backward<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &[T],
out2: &[T],
grad_y: Option<T>,
grad_hy: Option<T>,
grad_cy: Option<T>,
z_state: &Tensor,
cell_state_fwd: &Tensor,
layersoutputs: &Tensor,
hx: &[T],
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> Result<(), TchError> {
unsafe_torch_err!(atg_lstm_mps_backward(
out0.c_tensor,
ptr_list(out1).as_ptr(),
out1.len() as i32,
ptr_list(out2).as_ptr(),
out2.len() as i32,
grad_y.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
grad_hy.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
grad_cy.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
z_state.c_tensor,
cell_state_fwd.c_tensor,
self.c_tensor,
layersoutputs.c_tensor,
ptr_list(hx).as_ptr(),
hx.len() as i32,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
));
Ok(())
}
pub fn f_lt<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lt(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lt_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lt_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lt_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lt_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lt_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lt_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lt_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lt_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lt_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lt_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lu_solve(&self, lu_data: &Tensor, lu_pivots: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lu_solve(
c_tensors.as_mut_ptr(),
self.c_tensor,
lu_data.c_tensor,
lu_pivots.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lu_solve_out(
&self,
out: &Tensor,
lu_data: &Tensor,
lu_pivots: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_lu_solve_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
lu_data.c_tensor,
lu_pivots.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_lu_unpack(
lu_data: &Tensor,
lu_pivots: &Tensor,
unpack_data: bool,
unpack_pivots: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_lu_unpack(
c_tensors.as_mut_ptr(),
lu_data.c_tensor,
lu_pivots.c_tensor,
if unpack_data { 1 } else { 0 },
if unpack_pivots { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_lu_unpack_out(
p: &Tensor,
l: &Tensor,
u: &Tensor,
lu_data: &Tensor,
lu_pivots: &Tensor,
unpack_data: bool,
unpack_pivots: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_lu_unpack_out(
c_tensors.as_mut_ptr(),
p.c_tensor,
l.c_tensor,
u.c_tensor,
lu_data.c_tensor,
lu_pivots.c_tensor,
if unpack_data { 1 } else { 0 },
if unpack_pivots { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_margin_ranking_loss(
input1: &Tensor,
input2: &Tensor,
target: &Tensor,
margin: f64,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_margin_ranking_loss(
c_tensors.as_mut_ptr(),
input1.c_tensor,
input2.c_tensor,
target.c_tensor,
margin,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_masked_fill<S: Into<Scalar>>(
&self,
mask: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_fill(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_masked_fill_<S: Into<Scalar>>(
&mut self,
mask: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_fill_(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_masked_fill_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
mask: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_fill_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mask.c_tensor,
value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_masked_fill_tensor(&self, mask: &Tensor, value: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_fill_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
value.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_masked_fill_tensor_(
&mut self,
mask: &Tensor,
value: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_fill_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
value.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_masked_fill_tensor_out(
&self,
out: &Tensor,
mask: &Tensor,
value: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_fill_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mask.c_tensor,
value.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_masked_scatter(&self, mask: &Tensor, source: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_scatter(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
source.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_masked_scatter_(
&mut self,
mask: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_scatter_(
c_tensors.as_mut_ptr(),
self.c_tensor,
mask.c_tensor,
source.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_masked_scatter_out(
&self,
out: &Tensor,
mask: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_scatter_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mask.c_tensor,
source.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_masked_select(&self, mask: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_select(c_tensors.as_mut_ptr(), self.c_tensor, mask.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_masked_select_backward(
&self,
grad: &Tensor,
mask: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_select_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
mask.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_masked_select_out(&self, out: &Tensor, mask: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_masked_select_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mask.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_matmul(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matmul(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_matmul_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matmul_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_matrix_exp(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matrix_exp(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_matrix_exp_backward(&self, grad: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matrix_exp_backward(
c_tensors.as_mut_ptr(),
self.c_tensor,
grad.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_matrix_h(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matrix_h(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_matrix_power(&self, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matrix_power(c_tensors.as_mut_ptr(), self.c_tensor, n));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_matrix_power_out(&self, out: &Tensor, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_matrix_power_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
n
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_dim(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_max_dim_max(
&self,
max: &Tensor,
max_values: &Tensor,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max_dim_max(
c_tensors.as_mut_ptr(),
max.c_tensor,
max_values.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_max_other(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_other(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_pool1d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_pool1d_with_indices(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max_pool1d_with_indices(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_max_pool2d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_pool2d_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_pool2d_backward_out(
&self,
out: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool2d_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_pool2d_with_indices(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max_pool2d_with_indices(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_max_pool2d_with_indices_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool2d_with_indices_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 },
indices.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_pool2d_with_indices_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool2d_with_indices_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 },
indices.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_pool2d_with_indices_out(
&self,
out: &Tensor,
indices: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max_pool2d_with_indices_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
indices.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_max_pool3d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_pool3d_with_indices(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max_pool3d_with_indices(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_max_pool3d_with_indices_backward(
&self,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool3d_with_indices_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 },
indices.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_pool3d_with_indices_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
indices: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_pool3d_with_indices_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 },
indices.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_pool3d_with_indices_out(
&self,
out: &Tensor,
indices: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_max_pool3d_with_indices_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
indices.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_max_unary_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_unary_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_unpool2d(
&self,
indices: &Tensor,
output_size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_unpool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_unpool2d_out(
&self,
out: &Tensor,
indices: &Tensor,
output_size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_unpool2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_unpool3d(
&self,
indices: &Tensor,
output_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_unpool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_max_unpool3d_out(
&self,
out: &Tensor,
indices: &Tensor,
output_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_max_unpool3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
indices.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_maximum(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_maximum(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_maximum_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_maximum_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mean(&self, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mean(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mean_dim(
&self,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mean_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mean_out(
&self,
out: &Tensor,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mean_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_median(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_median(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_median_dim(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_median_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_median_dim_values(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_median_dim_values(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_median_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_median_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_meshgrid<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
let c_tensors =
unsafe_torch_err!(atg_meshgrid(ptr_list(tensors).as_ptr(), tensors.len() as i32));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_meshgrid_indexing<T: Borrow<Tensor>>(
tensors: &[T],
indexing: &str,
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_meshgrid_indexing(
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
indexing.as_ptr(),
indexing.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_mh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_min(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_min(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_min_dim(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_min_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_min_dim_min(
&self,
min: &Tensor,
min_indices: &Tensor,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_min_dim_min(
c_tensors.as_mut_ptr(),
min.c_tensor,
min_indices.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_min_other(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_min_other(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_min_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_min_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_min_unary_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_min_unary_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_minimum(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_minimum(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_minimum_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_minimum_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_miopen_batch_norm<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
exponential_average_factor: f64,
epsilon: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_miopen_batch_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
exponential_average_factor,
epsilon
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_miopen_batch_norm_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
weight: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
save_mean: Option<T>,
save_var: Option<T>,
epsilon: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_miopen_batch_norm_backward(
c_tensors.as_mut_ptr(),
self.c_tensor,
grad_output.c_tensor,
weight.c_tensor,
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
save_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
save_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
epsilon
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_miopen_batch_norm_backward_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
grad_output: &Tensor,
weight: &Tensor,
running_mean: Option<T>,
running_var: Option<T>,
save_mean: Option<T>,
save_var: Option<T>,
epsilon: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_miopen_batch_norm_backward_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
self.c_tensor,
grad_output.c_tensor,
weight.c_tensor,
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
save_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
save_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
epsilon
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_miopen_batch_norm_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
weight: &Tensor,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
exponential_average_factor: f64,
epsilon: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_miopen_batch_norm_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
exponential_average_factor,
epsilon
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_miopen_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_miopen_convolution_add_relu<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
weight: &Tensor,
z: &Tensor,
alpha: S,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_convolution_add_relu(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
z.c_tensor,
alpha.into().c_scalar,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_miopen_convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_convolution_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_miopen_convolution_relu<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_convolution_relu(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_miopen_convolution_transpose<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
output_padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_convolution_transpose(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len_i32(),
output_padding.as_ptr(),
output_padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_miopen_convolution_transpose_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
output_padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_convolution_transpose_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len_i32(),
output_padding.as_ptr(),
output_padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_miopen_depthwise_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_depthwise_convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_miopen_depthwise_convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
benchmark: bool,
deterministic: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_miopen_depthwise_convolution_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups,
if benchmark { 1 } else { 0 },
if deterministic { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_miopen_rnn<T: Borrow<Tensor>>(
&self,
weight: &[T],
weight_stride0: i64,
hx: &Tensor,
cx: Option<T>,
mode: i64,
hidden_size: i64,
num_layers: i64,
batch_first: bool,
dropout: f64,
train: bool,
bidirectional: bool,
batch_sizes: impl IntList,
dropout_state: Option<T>,
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 5];
unsafe_torch_err!(atg_miopen_rnn(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(weight).as_ptr(),
weight.len() as i32,
weight_stride0,
hx.c_tensor,
cx.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mode,
hidden_size,
num_layers,
if batch_first { 1 } else { 0 },
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
batch_sizes.as_ptr(),
batch_sizes.len_i32(),
dropout_state.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
Tensor { c_tensor: c_tensors[4] },
))
}
pub fn f_miopen_rnn_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
out4: &Tensor,
weight: &[T],
weight_stride0: i64,
hx: &Tensor,
cx: Option<T>,
mode: i64,
hidden_size: i64,
num_layers: i64,
batch_first: bool,
dropout: f64,
train: bool,
bidirectional: bool,
batch_sizes: impl IntList,
dropout_state: Option<T>,
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 5];
unsafe_torch_err!(atg_miopen_rnn_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
out3.c_tensor,
out4.c_tensor,
self.c_tensor,
ptr_list(weight).as_ptr(),
weight.len() as i32,
weight_stride0,
hx.c_tensor,
cx.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mode,
hidden_size,
num_layers,
if batch_first { 1 } else { 0 },
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
batch_sizes.as_ptr(),
batch_sizes.len_i32(),
dropout_state.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
Tensor { c_tensor: c_tensors[4] },
))
}
pub fn f_mish(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mish(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mish_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mish_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mish_backward(&self, grad_output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mish_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mish_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mish_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_adaptive_avg_pool2d(
&self,
output_size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_adaptive_avg_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_adaptive_avg_pool2d_backward(
&self,
grad_output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_adaptive_avg_pool2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_adaptive_avg_pool2d_backward_out(
&self,
out: &Tensor,
grad_output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_adaptive_avg_pool2d_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_adaptive_avg_pool2d_out(
&self,
out: &Tensor,
output_size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_adaptive_avg_pool2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_convolution(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_convolution_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_convolution_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_linear<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
bias: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_linear(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_linear_backward_input(
input_size: impl IntList,
grad_output: &Tensor,
weight: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_linear_backward_input(
c_tensors.as_mut_ptr(),
input_size.as_ptr(),
input_size.len_i32(),
grad_output.c_tensor,
weight.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_linear_backward_input_out(
out: &Tensor,
input_size: impl IntList,
grad_output: &Tensor,
weight: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_linear_backward_input_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
input_size.as_ptr(),
input_size.len_i32(),
grad_output.c_tensor,
weight.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_linear_backward_weights(
&self,
grad_output: &Tensor,
weight: &Tensor,
bias_defined: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_mkldnn_linear_backward_weights(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
weight.c_tensor,
if bias_defined { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_mkldnn_linear_backward_weights_out(
&self,
out0: &Tensor,
out1: &Tensor,
grad_output: &Tensor,
weight: &Tensor,
bias_defined: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_mkldnn_linear_backward_weights_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
grad_output.c_tensor,
self.c_tensor,
weight.c_tensor,
if bias_defined { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_mkldnn_linear_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
bias: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_linear_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_max_pool2d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_max_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_max_pool2d_backward(
&self,
grad_output: &Tensor,
output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_max_pool2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_max_pool2d_backward_out(
&self,
out: &Tensor,
grad_output: &Tensor,
output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_max_pool2d_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_max_pool2d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_max_pool2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_max_pool3d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_max_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_max_pool3d_backward(
&self,
grad_output: &Tensor,
output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_max_pool3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_max_pool3d_backward_out(
&self,
out: &Tensor,
grad_output: &Tensor,
output: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_max_pool3d_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
output.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_max_pool3d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_max_pool3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_reorder_conv2d_weight(
&self,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
input_size: impl IntListOption,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_reorder_conv2d_weight(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups,
input_size.as_ptr(),
input_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_reorder_conv2d_weight_out(
&self,
out: &Tensor,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
input_size: impl IntListOption,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_reorder_conv2d_weight_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups,
input_size.as_ptr(),
input_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_reorder_conv3d_weight(
&self,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_reorder_conv3d_weight(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_reorder_conv3d_weight_out(
&self,
out: &Tensor,
padding: impl IntList,
stride: impl IntList,
dilation: impl IntList,
groups: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mkldnn_reorder_conv3d_weight_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mkldnn_rnn_layer(
&self,
weight0: &Tensor,
weight1: &Tensor,
weight2: &Tensor,
weight3: &Tensor,
hx_: &Tensor,
cx_: &Tensor,
reverse: bool,
batch_sizes: impl IntList,
mode: i64,
hidden_size: i64,
num_layers: i64,
has_biases: bool,
bidirectional: bool,
batch_first: bool,
train: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg_mkldnn_rnn_layer(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight0.c_tensor,
weight1.c_tensor,
weight2.c_tensor,
weight3.c_tensor,
hx_.c_tensor,
cx_.c_tensor,
if reverse { 1 } else { 0 },
batch_sizes.as_ptr(),
batch_sizes.len_i32(),
mode,
hidden_size,
num_layers,
if has_biases { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 },
if train { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_mkldnn_rnn_layer_backward<T: Borrow<Tensor>>(
&self,
weight1: &Tensor,
weight2: &Tensor,
weight3: &Tensor,
weight4: &Tensor,
hx_: &Tensor,
cx_tmp: &Tensor,
output: &Tensor,
hy_: &Tensor,
cy_: &Tensor,
grad_output: Option<T>,
grad_hy: Option<T>,
grad_cy: Option<T>,
reverse: bool,
mode: i64,
hidden_size: i64,
num_layers: i64,
has_biases: bool,
train: bool,
bidirectional: bool,
batch_sizes: impl IntList,
batch_first: bool,
workspace: &Tensor,
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 7];
unsafe_torch_err!(atg_mkldnn_rnn_layer_backward(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight1.c_tensor,
weight2.c_tensor,
weight3.c_tensor,
weight4.c_tensor,
hx_.c_tensor,
cx_tmp.c_tensor,
output.c_tensor,
hy_.c_tensor,
cy_.c_tensor,
grad_output.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
grad_hy.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
grad_cy.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if reverse { 1 } else { 0 },
mode,
hidden_size,
num_layers,
if has_biases { 1 } else { 0 },
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
batch_sizes.as_ptr(),
batch_sizes.len_i32(),
if batch_first { 1 } else { 0 },
workspace.c_tensor
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
Tensor { c_tensor: c_tensors[4] },
Tensor { c_tensor: c_tensors[5] },
Tensor { c_tensor: c_tensors[6] },
))
}
pub fn f_mkldnn_rnn_layer_backward_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
out4: &Tensor,
out5: &Tensor,
out6: &Tensor,
weight1: &Tensor,
weight2: &Tensor,
weight3: &Tensor,
weight4: &Tensor,
hx_: &Tensor,
cx_tmp: &Tensor,
output: &Tensor,
hy_: &Tensor,
cy_: &Tensor,
grad_output: Option<T>,
grad_hy: Option<T>,
grad_cy: Option<T>,
reverse: bool,
mode: i64,
hidden_size: i64,
num_layers: i64,
has_biases: bool,
train: bool,
bidirectional: bool,
batch_sizes: impl IntList,
batch_first: bool,
workspace: &Tensor,
) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 7];
unsafe_torch_err!(atg_mkldnn_rnn_layer_backward_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
out3.c_tensor,
out4.c_tensor,
out5.c_tensor,
out6.c_tensor,
self.c_tensor,
weight1.c_tensor,
weight2.c_tensor,
weight3.c_tensor,
weight4.c_tensor,
hx_.c_tensor,
cx_tmp.c_tensor,
output.c_tensor,
hy_.c_tensor,
cy_.c_tensor,
grad_output.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
grad_hy.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
grad_cy.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if reverse { 1 } else { 0 },
mode,
hidden_size,
num_layers,
if has_biases { 1 } else { 0 },
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
batch_sizes.as_ptr(),
batch_sizes.len_i32(),
if batch_first { 1 } else { 0 },
workspace.c_tensor
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
Tensor { c_tensor: c_tensors[4] },
Tensor { c_tensor: c_tensors[5] },
Tensor { c_tensor: c_tensors[6] },
))
}
pub fn f_mkldnn_rnn_layer_out(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
out3: &Tensor,
weight0: &Tensor,
weight1: &Tensor,
weight2: &Tensor,
weight3: &Tensor,
hx_: &Tensor,
cx_: &Tensor,
reverse: bool,
batch_sizes: impl IntList,
mode: i64,
hidden_size: i64,
num_layers: i64,
has_biases: bool,
bidirectional: bool,
batch_first: bool,
train: bool,
) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 4];
unsafe_torch_err!(atg_mkldnn_rnn_layer_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
out3.c_tensor,
self.c_tensor,
weight0.c_tensor,
weight1.c_tensor,
weight2.c_tensor,
weight3.c_tensor,
hx_.c_tensor,
cx_.c_tensor,
if reverse { 1 } else { 0 },
batch_sizes.as_ptr(),
batch_sizes.len_i32(),
mode,
hidden_size,
num_layers,
if has_biases { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 },
if train { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
Tensor { c_tensor: c_tensors[3] },
))
}
pub fn f_mm(&self, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mm(c_tensors.as_mut_ptr(), self.c_tensor, mat2.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mm_out(&self, out: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mode(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_mode(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_mode_values(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_mode_values(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_moveaxis(
&self,
source: impl IntList,
destination: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_moveaxis(
c_tensors.as_mut_ptr(),
self.c_tensor,
source.as_ptr(),
source.len_i32(),
destination.as_ptr(),
destination.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_moveaxis_int(&self, source: i64, destination: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_moveaxis_int(
c_tensors.as_mut_ptr(),
self.c_tensor,
source,
destination
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_movedim(
&self,
source: impl IntList,
destination: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_movedim(
c_tensors.as_mut_ptr(),
self.c_tensor,
source.as_ptr(),
source.len_i32(),
destination.as_ptr(),
destination.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_movedim_int(&self, source: i64, destination: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_movedim_int(
c_tensors.as_mut_ptr(),
self.c_tensor,
source,
destination
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mse_loss(
&self,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mse_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mse_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mse_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mse_loss_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mse_loss_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mse_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mse_loss_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_msort(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_msort(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_msort_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_msort_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mt(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mt(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mul(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mul(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mul_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mul_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mul_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mul_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mul_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mul_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mul_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mul_scalar_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mul_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mul_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_multi_margin_loss_backward<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
grad_output: &Tensor,
target: &Tensor,
p: S,
margin: S,
weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multi_margin_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
p.into().c_scalar,
margin.into().c_scalar,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_multi_margin_loss_backward_grad_input<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
p: S,
margin: S,
weight: Option<T>,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multi_margin_loss_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
p.into().c_scalar,
margin.into().c_scalar,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_multilabel_margin_loss(
&self,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multilabel_margin_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_multilabel_margin_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
is_target: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multilabel_margin_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
is_target.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_multilabel_margin_loss_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
is_target: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multilabel_margin_loss_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
is_target.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_multilabel_margin_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multilabel_margin_loss_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_multinomial(&self, num_samples: i64, replacement: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multinomial(
c_tensors.as_mut_ptr(),
self.c_tensor,
num_samples,
if replacement { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_multinomial_out(
&self,
out: &Tensor,
num_samples: i64,
replacement: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multinomial_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
num_samples,
if replacement { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_multiply(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multiply(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_multiply_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multiply_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_multiply_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multiply_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_multiply_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multiply_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_multiply_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_multiply_scalar_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mv(&self, vec: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mv(c_tensors.as_mut_ptr(), self.c_tensor, vec.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mv_out(&self, out: &Tensor, vec: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mv_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
vec.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mvlgamma(&self, p: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mvlgamma(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mvlgamma_(&mut self, p: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mvlgamma_(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_mvlgamma_out(&self, out: &Tensor, p: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_mvlgamma_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor, p));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nan_to_num(
&self,
nan: impl Into<Option<f64>>,
posinf: impl Into<Option<f64>>,
neginf: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let nan = nan.into();
let posinf = posinf.into();
let neginf = neginf.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nan_to_num(
c_tensors.as_mut_ptr(),
self.c_tensor,
nan.unwrap_or(std::f64::NAN),
nan.is_none() as i8,
posinf.unwrap_or(std::f64::NAN),
posinf.is_none() as i8,
neginf.unwrap_or(std::f64::NAN),
neginf.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nan_to_num_(
&mut self,
nan: impl Into<Option<f64>>,
posinf: impl Into<Option<f64>>,
neginf: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let nan = nan.into();
let posinf = posinf.into();
let neginf = neginf.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nan_to_num_(
c_tensors.as_mut_ptr(),
self.c_tensor,
nan.unwrap_or(std::f64::NAN),
nan.is_none() as i8,
posinf.unwrap_or(std::f64::NAN),
posinf.is_none() as i8,
neginf.unwrap_or(std::f64::NAN),
neginf.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nan_to_num_out(
&self,
out: &Tensor,
nan: impl Into<Option<f64>>,
posinf: impl Into<Option<f64>>,
neginf: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let nan = nan.into();
let posinf = posinf.into();
let neginf = neginf.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nan_to_num_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
nan.unwrap_or(std::f64::NAN),
nan.is_none() as i8,
posinf.unwrap_or(std::f64::NAN),
posinf.is_none() as i8,
neginf.unwrap_or(std::f64::NAN),
neginf.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nanmean(
&self,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nanmean(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nanmean_out(
&self,
out: &Tensor,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nanmean_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nanmedian(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nanmedian(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nanmedian_dim(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_nanmedian_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_nanmedian_dim_values(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_nanmedian_dim_values(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_nanmedian_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nanmedian_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nanquantile(
&self,
q: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nanquantile(
c_tensors.as_mut_ptr(),
self.c_tensor,
q.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 },
interpolation.as_ptr(),
interpolation.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nanquantile_out(
&self,
out: &Tensor,
q: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nanquantile_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
q.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 },
interpolation.as_ptr(),
interpolation.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nanquantile_scalar(
&self,
q: f64,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nanquantile_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
q,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 },
interpolation.as_ptr(),
interpolation.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nanquantile_scalar_out(
&self,
out: &Tensor,
q: f64,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nanquantile_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
q,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 },
interpolation.as_ptr(),
interpolation.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nansum(
&self,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nansum(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nansum_out(
&self,
out: &Tensor,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nansum_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_narrow(&self, dim: i64, start: i64, length: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_narrow(c_tensors.as_mut_ptr(), self.c_tensor, dim, start, length));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_narrow_copy(&self, dim: i64, start: i64, length: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_narrow_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
start,
length
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_narrow_copy_out(
&self,
out: &Tensor,
dim: i64,
start: i64,
length: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_narrow_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
start,
length
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_narrow_tensor(
&self,
dim: i64,
start: &Tensor,
length: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_narrow_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
start.c_tensor,
length
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_native_batch_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
momentum: f64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_native_batch_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
momentum,
eps
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_native_batch_norm_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
save_mean: &Tensor,
save_invstd: &Tensor,
weight: Option<T>,
bias: Option<T>,
running_mean: Option<T>,
running_var: Option<T>,
training: bool,
momentum: f64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_native_batch_norm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
save_mean.c_tensor,
save_invstd.c_tensor,
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if training { 1 } else { 0 },
momentum,
eps
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_native_channel_shuffle(&self, groups: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_native_channel_shuffle(
c_tensors.as_mut_ptr(),
self.c_tensor,
groups
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_native_dropout(&self, p: f64, train: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_native_dropout(
c_tensors.as_mut_ptr(),
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_native_dropout_backward(
grad_output: &Tensor,
mask: &Tensor,
scale: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_native_dropout_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
mask.c_tensor,
scale
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_native_dropout_backward_out(
out: &Tensor,
grad_output: &Tensor,
mask: &Tensor,
scale: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_native_dropout_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
mask.c_tensor,
scale
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_native_dropout_out(
&self,
out0: &Tensor,
out1: &Tensor,
p: f64,
train: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_native_dropout_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
p,
if train { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_native_group_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
n: i64,
c: i64,
hxw: i64,
group: i64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_native_group_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
n,
c,
hxw,
group,
eps
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_native_group_norm_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
weight: Option<T>,
bias: Option<T>,
n: i64,
c: i64,
hxw: i64,
group: i64,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_native_group_norm_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
n,
c,
hxw,
group,
eps
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_native_layer_norm<T: Borrow<Tensor>>(
&self,
normalized_shape: impl IntList,
weight: Option<T>,
bias: Option<T>,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_native_layer_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
normalized_shape.as_ptr(),
normalized_shape.len_i32(),
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
eps
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_native_layer_norm_out<T: Borrow<Tensor>>(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
normalized_shape: impl IntList,
weight: Option<T>,
bias: Option<T>,
eps: f64,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_native_layer_norm_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
self.c_tensor,
normalized_shape.as_ptr(),
normalized_shape.len_i32(),
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
eps
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_native_norm(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_native_norm(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_native_norm_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_native_norm_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_native_norm_scalaropt_dim_dtype<S: Into<Scalar>>(
&self,
p: S,
dim: impl IntList,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_native_norm_scalaropt_dim_dtype(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_native_norm_scalaropt_dim_dtype_out<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
dim: impl IntList,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_native_norm_scalaropt_dim_dtype_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p.into().c_scalar,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ne<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ne(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ne_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ne_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ne_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ne_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ne_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ne_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ne_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ne_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ne_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ne_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_neg(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_neg(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_neg_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_neg_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_neg_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_neg_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_negative(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_negative(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_negative_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_negative_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_negative_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_negative_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nested_to_padded_tensor(
&self,
padding: f64,
output_size: impl IntListOption,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nested_to_padded_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_new_empty(
&self,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_empty(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_new_empty_out(&self, out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_empty_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_new_empty_strided(
&self,
size: impl IntList,
stride: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_empty_strided(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_new_empty_strided_out(
&self,
out: &Tensor,
size: impl IntList,
stride: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_empty_strided_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_new_full<S: Into<Scalar>>(
&self,
size: impl IntList,
fill_value: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_full(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
fill_value.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_new_full_out<S: Into<Scalar>>(
&self,
out: &Tensor,
size: impl IntList,
fill_value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_full_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32(),
fill_value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_new_ones(
&self,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_ones(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_new_ones_out(&self, out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_ones_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_new_zeros(
&self,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_zeros(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_new_zeros_out(&self, out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_new_zeros_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nextafter(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nextafter(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nextafter_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nextafter_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nextafter_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nextafter_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nll_loss<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nll_loss2d<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nll_loss2d_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
total_weight: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index,
total_weight.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nll_loss2d_backward_grad_input<T: Borrow<Tensor>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
total_weight: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss2d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index,
total_weight.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nll_loss2d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nll_loss_backward<T: Borrow<Tensor>>(
&self,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
total_weight: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index,
total_weight.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nll_loss_backward_grad_input<T: Borrow<Tensor>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
total_weight: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index,
total_weight.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nll_loss_nd<T: Borrow<Tensor>>(
&self,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss_nd(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nll_loss_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
target: &Tensor,
weight: Option<T>,
reduction: crate::Reduction,
ignore_index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nll_loss_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
reduction.to_int(),
ignore_index
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nonzero(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nonzero(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nonzero_numpy(&self) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_nonzero_numpy(self.c_tensor));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_nonzero_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nonzero_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nonzero_static(&self, size: i64, fill_value: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nonzero_static(
c_tensors.as_mut_ptr(),
self.c_tensor,
size,
fill_value
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nonzero_static_out(
&self,
out: &Tensor,
size: i64,
fill_value: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nonzero_static_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size,
fill_value
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_norm(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_norm_dtype_out<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
dim: impl IntList,
keepdim: bool,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm_dtype_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p.into().c_scalar,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_norm_except_dim(v: &Tensor, pow: i64, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm_except_dim(c_tensors.as_mut_ptr(), v.c_tensor, pow, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_norm_out<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
dim: impl IntList,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p.into().c_scalar,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_norm_scalar_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm_scalar_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_norm_scalaropt_dim<S: Into<Scalar>>(
&self,
p: S,
dim: impl IntList,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm_scalaropt_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_norm_scalaropt_dim_dtype<S: Into<Scalar>>(
&self,
p: S,
dim: impl IntList,
keepdim: bool,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm_scalaropt_dim_dtype(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_norm_scalaropt_dtype<S: Into<Scalar>>(
&self,
p: S,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm_scalaropt_dtype(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar,
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_norm_scalaropt_dtype_out<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_norm_scalaropt_dtype_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p.into().c_scalar,
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_normal_(&mut self, mean: f64, std: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_normal_(c_tensors.as_mut_ptr(), self.c_tensor, mean, std));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_normal_functional(&self, mean: f64, std: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_normal_functional(c_tensors.as_mut_ptr(), self.c_tensor, mean, std));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_not_equal<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_not_equal(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_not_equal_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_not_equal_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_not_equal_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_not_equal_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_not_equal_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_not_equal_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_not_equal_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_not_equal_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_not_equal_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_not_equal_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nuclear_norm(&self, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nuclear_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nuclear_norm_dim(&self, dim: impl IntList, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nuclear_norm_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nuclear_norm_dim_out(
&self,
out: &Tensor,
dim: impl IntList,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nuclear_norm_dim_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_nuclear_norm_out(&self, out: &Tensor, keepdim: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_nuclear_norm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_numpy_t(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_numpy_t(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_one_hot(&self, num_classes: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_one_hot(c_tensors.as_mut_ptr(), self.c_tensor, num_classes));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ones(size: impl IntList, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ones(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ones_like(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ones_like(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ones_like_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ones_like_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ones_out(out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ones_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_orgqr(&self, input2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_orgqr(c_tensors.as_mut_ptr(), self.c_tensor, input2.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_orgqr_out(&self, out: &Tensor, input2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_orgqr_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
input2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ormqr(
&self,
input2: &Tensor,
input3: &Tensor,
left: bool,
transpose: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ormqr(
c_tensors.as_mut_ptr(),
self.c_tensor,
input2.c_tensor,
input3.c_tensor,
if left { 1 } else { 0 },
if transpose { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ormqr_out(
&self,
out: &Tensor,
input2: &Tensor,
input3: &Tensor,
left: bool,
transpose: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ormqr_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
input2.c_tensor,
input3.c_tensor,
if left { 1 } else { 0 },
if transpose { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_outer(&self, vec2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_outer(c_tensors.as_mut_ptr(), self.c_tensor, vec2.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_outer_out(&self, out: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_outer_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
vec2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_output_nr(&self) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_output_nr(self.c_tensor));
Ok(return_)
}
pub fn f_pad(
&self,
pad: impl IntList,
mode: &str,
value: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let value = value.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pad(
c_tensors.as_mut_ptr(),
self.c_tensor,
pad.as_ptr(),
pad.len_i32(),
mode.as_ptr(),
mode.len() as i32,
value.unwrap_or(std::f64::NAN),
value.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pad_sequence<T: Borrow<Tensor>>(
sequences: &[T],
batch_first: bool,
padding_value: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pad_sequence(
c_tensors.as_mut_ptr(),
ptr_list(sequences).as_ptr(),
sequences.len() as i32,
if batch_first { 1 } else { 0 },
padding_value
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pairwise_distance(
x1: &Tensor,
x2: &Tensor,
p: f64,
eps: f64,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pairwise_distance(
c_tensors.as_mut_ptr(),
x1.c_tensor,
x2.c_tensor,
p,
eps,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pdist(&self, p: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pdist(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_permute(&self, dims: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_permute(
c_tensors.as_mut_ptr(),
self.c_tensor,
dims.as_ptr(),
dims.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_permute_copy(&self, dims: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_permute_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
dims.as_ptr(),
dims.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_permute_copy_out(&self, out: &Tensor, dims: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_permute_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dims.as_ptr(),
dims.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pin_memory(&self, device: Device) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pin_memory(c_tensors.as_mut_ptr(), self.c_tensor, device.c_int()));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pinverse(&self, rcond: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pinverse(c_tensors.as_mut_ptr(), self.c_tensor, rcond));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pixel_shuffle(&self, upscale_factor: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pixel_shuffle(c_tensors.as_mut_ptr(), self.c_tensor, upscale_factor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pixel_shuffle_out(
&self,
out: &Tensor,
upscale_factor: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pixel_shuffle_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
upscale_factor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pixel_unshuffle(&self, downscale_factor: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pixel_unshuffle(
c_tensors.as_mut_ptr(),
self.c_tensor,
downscale_factor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pixel_unshuffle_out(
&self,
out: &Tensor,
downscale_factor: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pixel_unshuffle_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
downscale_factor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_poisson(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_poisson(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_poisson_nll_loss(
&self,
target: &Tensor,
log_input: bool,
full: bool,
eps: f64,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_poisson_nll_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
if log_input { 1 } else { 0 },
if full { 1 } else { 0 },
eps,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_poisson_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_poisson_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_polar(abs: &Tensor, angle: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_polar(c_tensors.as_mut_ptr(), abs.c_tensor, angle.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_polar_out(out: &Tensor, abs: &Tensor, angle: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_polar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
abs.c_tensor,
angle.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_polygamma(&self, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_polygamma(c_tensors.as_mut_ptr(), n, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_polygamma_(&mut self, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_polygamma_(c_tensors.as_mut_ptr(), self.c_tensor, n));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_polygamma_out(&self, out: &Tensor, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_polygamma_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
n,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_positive(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_positive(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pow(&self, exponent: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow(c_tensors.as_mut_ptr(), self.c_tensor, exponent.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pow_<S: Into<Scalar>>(&mut self, exponent: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow_(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pow_scalar<S: Into<Scalar>>(
self_scalar: S,
exponent: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow_scalar(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
exponent.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pow_scalar_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
exponent: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
exponent.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pow_tensor_(&mut self, exponent: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pow_tensor_scalar<S: Into<Scalar>>(&self, exponent: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow_tensor_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
exponent.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pow_tensor_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
exponent: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow_tensor_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
exponent.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_pow_tensor_tensor_out(
&self,
out: &Tensor,
exponent: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_pow_tensor_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
exponent.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_prelu(&self, weight: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_prelu(c_tensors.as_mut_ptr(), self.c_tensor, weight.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_prod(&self, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_prod(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_prod_dim_int(
&self,
dim: i64,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_prod_dim_int(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_prod_int_out(
&self,
out: &Tensor,
dim: i64,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_prod_int_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_prod_out(
&self,
out: &Tensor,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_prod_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_put(
&self,
index: &Tensor,
source: &Tensor,
accumulate: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_put(
c_tensors.as_mut_ptr(),
self.c_tensor,
index.c_tensor,
source.c_tensor,
if accumulate { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_put_(
&mut self,
index: &Tensor,
source: &Tensor,
accumulate: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_put_(
c_tensors.as_mut_ptr(),
self.c_tensor,
index.c_tensor,
source.c_tensor,
if accumulate { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_put_out(
&self,
out: &Tensor,
index: &Tensor,
source: &Tensor,
accumulate: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_put_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
index.c_tensor,
source.c_tensor,
if accumulate { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_q_per_channel_axis(&self) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_q_per_channel_axis(self.c_tensor));
Ok(return_)
}
pub fn f_q_per_channel_scales(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_q_per_channel_scales(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_q_per_channel_scales_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_q_per_channel_scales_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_q_per_channel_zero_points(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_q_per_channel_zero_points(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_q_per_channel_zero_points_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_q_per_channel_zero_points_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_q_scale(&self) -> Result<f64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_q_scale(self.c_tensor));
Ok(return_)
}
pub fn f_q_zero_point(&self) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_q_zero_point(self.c_tensor));
Ok(return_)
}
pub fn f_qr(&self, some: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_qr(c_tensors.as_mut_ptr(), self.c_tensor, if some { 1 } else { 0 }));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_qr_q(&self, q: &Tensor, r: &Tensor, some: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_qr_q(
c_tensors.as_mut_ptr(),
q.c_tensor,
r.c_tensor,
self.c_tensor,
if some { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_quantile(
&self,
q: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantile(
c_tensors.as_mut_ptr(),
self.c_tensor,
q.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 },
interpolation.as_ptr(),
interpolation.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantile_out(
&self,
out: &Tensor,
q: &Tensor,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantile_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
q.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 },
interpolation.as_ptr(),
interpolation.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantile_scalar(
&self,
q: f64,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantile_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
q,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 },
interpolation.as_ptr(),
interpolation.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantile_scalar_out(
&self,
out: &Tensor,
q: f64,
dim: impl Into<Option<i64>>,
keepdim: bool,
interpolation: &str,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantile_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
q,
dim.unwrap_or(0i64),
dim.is_none() as i8,
if keepdim { 1 } else { 0 },
interpolation.as_ptr(),
interpolation.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantize_per_channel(
&self,
scales: &Tensor,
zero_points: &Tensor,
axis: i64,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantize_per_channel(
c_tensors.as_mut_ptr(),
self.c_tensor,
scales.c_tensor,
zero_points.c_tensor,
axis,
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantize_per_channel_out(
&self,
out: &Tensor,
scales: &Tensor,
zero_points: &Tensor,
axis: i64,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantize_per_channel_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
scales.c_tensor,
zero_points.c_tensor,
axis,
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantize_per_tensor(
&self,
scale: f64,
zero_point: i64,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantize_per_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale,
zero_point,
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantize_per_tensor_dynamic(
&self,
dtype: Kind,
reduce_range: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantize_per_tensor_dynamic(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int(),
if reduce_range { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantize_per_tensor_dynamic_out(
&self,
out: &Tensor,
dtype: Kind,
reduce_range: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantize_per_tensor_dynamic_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dtype.c_int(),
if reduce_range { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantize_per_tensor_out(
&self,
out: &Tensor,
scale: f64,
zero_point: i64,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantize_per_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
scale,
zero_point,
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantize_per_tensor_tensor_qparams(
&self,
scale: &Tensor,
zero_point: &Tensor,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantize_per_tensor_tensor_qparams(
c_tensors.as_mut_ptr(),
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantize_per_tensor_tensor_qparams_out(
&self,
out: &Tensor,
scale: &Tensor,
zero_point: &Tensor,
dtype: Kind,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantize_per_tensor_tensor_qparams_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
scale.c_tensor,
zero_point.c_tensor,
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantize_per_tensor_tensors<T: Borrow<Tensor>>(
tensors: &[T],
scales: &Tensor,
zero_points: &Tensor,
dtype: Kind,
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_quantize_per_tensor_tensors(
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
scales.c_tensor,
zero_points.c_tensor,
dtype.c_int()
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_quantize_per_tensor_tensors_out<T: Borrow<Tensor>>(
out: &[T],
tensors: &[T],
scales: &Tensor,
zero_points: &Tensor,
dtype: Kind,
) -> Result<(), TchError> {
unsafe_torch_err!(atg_quantize_per_tensor_tensors_out(
ptr_list(out).as_ptr(),
out.len() as i32,
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
scales.c_tensor,
zero_points.c_tensor,
dtype.c_int()
));
Ok(())
}
pub fn f_quantized_batch_norm<T: Borrow<Tensor>>(
&self,
weight: Option<T>,
bias: Option<T>,
mean: &Tensor,
var: &Tensor,
eps: f64,
output_scale: f64,
output_zero_point: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_batch_norm(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mean.c_tensor,
var.c_tensor,
eps,
output_scale,
output_zero_point
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantized_batch_norm_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: Option<T>,
bias: Option<T>,
mean: &Tensor,
var: &Tensor,
eps: f64,
output_scale: f64,
output_zero_point: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_batch_norm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
mean.c_tensor,
var.c_tensor,
eps,
output_scale,
output_zero_point
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantized_gru_cell<S: Into<Scalar>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: &Tensor,
b_hh: &Tensor,
packed_ih: &Tensor,
packed_hh: &Tensor,
col_offsets_ih: &Tensor,
col_offsets_hh: &Tensor,
scale_ih: S,
scale_hh: S,
zero_point_ih: S,
zero_point_hh: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_gru_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.c_tensor,
b_hh.c_tensor,
packed_ih.c_tensor,
packed_hh.c_tensor,
col_offsets_ih.c_tensor,
col_offsets_hh.c_tensor,
scale_ih.into().c_scalar,
scale_hh.into().c_scalar,
zero_point_ih.into().c_scalar,
zero_point_hh.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantized_lstm_cell<T: Borrow<Tensor>, S: Into<Scalar>>(
&self,
hx: &[T],
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: &Tensor,
b_hh: &Tensor,
packed_ih: &Tensor,
packed_hh: &Tensor,
col_offsets_ih: &Tensor,
col_offsets_hh: &Tensor,
scale_ih: S,
scale_hh: S,
zero_point_ih: S,
zero_point_hh: S,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_quantized_lstm_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
ptr_list(hx).as_ptr(),
hx.len() as i32,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.c_tensor,
b_hh.c_tensor,
packed_ih.c_tensor,
packed_hh.c_tensor,
col_offsets_ih.c_tensor,
col_offsets_hh.c_tensor,
scale_ih.into().c_scalar,
scale_hh.into().c_scalar,
zero_point_ih.into().c_scalar,
zero_point_hh.into().c_scalar
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_quantized_max_pool1d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_max_pool1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantized_max_pool1d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_max_pool1d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantized_max_pool2d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_max_pool2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantized_max_pool2d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_max_pool2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantized_max_pool3d(
&self,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_max_pool3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantized_max_pool3d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
ceil_mode: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_max_pool3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32(),
if ceil_mode { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantized_rnn_relu_cell<S: Into<Scalar>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: &Tensor,
b_hh: &Tensor,
packed_ih: &Tensor,
packed_hh: &Tensor,
col_offsets_ih: &Tensor,
col_offsets_hh: &Tensor,
scale_ih: S,
scale_hh: S,
zero_point_ih: S,
zero_point_hh: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_rnn_relu_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.c_tensor,
b_hh.c_tensor,
packed_ih.c_tensor,
packed_hh.c_tensor,
col_offsets_ih.c_tensor,
col_offsets_hh.c_tensor,
scale_ih.into().c_scalar,
scale_hh.into().c_scalar,
zero_point_ih.into().c_scalar,
zero_point_hh.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_quantized_rnn_tanh_cell<S: Into<Scalar>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: &Tensor,
b_hh: &Tensor,
packed_ih: &Tensor,
packed_hh: &Tensor,
col_offsets_ih: &Tensor,
col_offsets_hh: &Tensor,
scale_ih: S,
scale_hh: S,
zero_point_ih: S,
zero_point_hh: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_quantized_rnn_tanh_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.c_tensor,
b_hh.c_tensor,
packed_ih.c_tensor,
packed_hh.c_tensor,
col_offsets_ih.c_tensor,
col_offsets_hh.c_tensor,
scale_ih.into().c_scalar,
scale_hh.into().c_scalar,
zero_point_ih.into().c_scalar,
zero_point_hh.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rad2deg(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rad2deg(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rad2deg_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rad2deg_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rad2deg_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rad2deg_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rand(size: impl IntList, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rand(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rand_like(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rand_like(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rand_like_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rand_like_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rand_out(out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rand_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randint(
high: i64,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint(
c_tensors.as_mut_ptr(),
high,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randint_like(&self, high: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint_like(c_tensors.as_mut_ptr(), self.c_tensor, high));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randint_like_low_dtype(&self, low: i64, high: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint_like_low_dtype(
c_tensors.as_mut_ptr(),
self.c_tensor,
low,
high
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randint_like_low_dtype_out(
&self,
out: &Tensor,
low: i64,
high: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint_like_low_dtype_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
low,
high
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randint_like_out(&self, out: &Tensor, high: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint_like_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
high
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randint_low(
low: i64,
high: i64,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint_low(
c_tensors.as_mut_ptr(),
low,
high,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randint_low_out(
out: &Tensor,
low: i64,
high: i64,
size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint_low_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
low,
high,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randint_out(out: &Tensor, high: i64, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randint_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
high,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randn(size: impl IntList, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randn(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randn_like(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randn_like(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randn_like_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randn_like_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randn_out(out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_random(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_random(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_random_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_random_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_random_from(&self, from: i64, to: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
let to = to.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_random_from(
c_tensors.as_mut_ptr(),
self.c_tensor,
from,
to.unwrap_or(0i64),
to.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_random_from_(
&mut self,
from: i64,
to: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let to = to.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_random_from_(
c_tensors.as_mut_ptr(),
self.c_tensor,
from,
to.unwrap_or(0i64),
to.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_random_from_out(
&self,
out: &Tensor,
from: i64,
to: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let to = to.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_random_from_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
from,
to.unwrap_or(0i64),
to.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_random_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_random_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_random_to(&self, to: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_random_to(c_tensors.as_mut_ptr(), self.c_tensor, to));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_random_to_(&mut self, to: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_random_to_(c_tensors.as_mut_ptr(), self.c_tensor, to));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_random_to_out(&self, out: &Tensor, to: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_random_to_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
to
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randperm(n: i64, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randperm(
c_tensors.as_mut_ptr(),
n,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_randperm_out(out: &Tensor, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_randperm_out(c_tensors.as_mut_ptr(), out.c_tensor, n));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_range<S: Into<Scalar>>(
start: S,
end: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_range(
c_tensors.as_mut_ptr(),
start.into().c_scalar,
end.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_range_out<S: Into<Scalar>>(
out: &Tensor,
start: S,
end: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_range_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
start.into().c_scalar,
end.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_range_out_<S: Into<Scalar>>(
out: &Tensor,
start: S,
end: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_range_out_(
c_tensors.as_mut_ptr(),
out.c_tensor,
start.into().c_scalar,
end.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_range_step<S: Into<Scalar>>(
start: S,
end: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_range_step(
c_tensors.as_mut_ptr(),
start.into().c_scalar,
end.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_ravel(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_ravel(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_real(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_real(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reciprocal(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reciprocal(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reciprocal_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reciprocal_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reciprocal_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reciprocal_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reflection_pad1d(&self, padding: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reflection_pad1d_backward(
&self,
grad_output: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad1d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reflection_pad1d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad1d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reflection_pad1d_out(
&self,
out: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad1d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reflection_pad2d(&self, padding: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reflection_pad2d_backward(
&self,
grad_output: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reflection_pad2d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad2d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reflection_pad2d_out(
&self,
out: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reflection_pad3d(&self, padding: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reflection_pad3d_backward(
&self,
grad_output: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reflection_pad3d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad3d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reflection_pad3d_out(
&self,
out: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reflection_pad3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_relu(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_relu(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_relu6(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_relu6(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_relu6_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_relu6_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_relu_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_relu_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_relu_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_relu_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_remainder<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_remainder_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_remainder_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_remainder_scalar_tensor<S: Into<Scalar>>(
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder_scalar_tensor(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_remainder_scalar_tensor_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder_scalar_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_remainder_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_remainder_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_remainder_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_remainder_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_renorm<S: Into<Scalar>>(
&self,
p: S,
dim: i64,
maxnorm: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_renorm(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar,
dim,
maxnorm.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_renorm_<S: Into<Scalar>>(
&mut self,
p: S,
dim: i64,
maxnorm: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_renorm_(
c_tensors.as_mut_ptr(),
self.c_tensor,
p.into().c_scalar,
dim,
maxnorm.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_renorm_out<S: Into<Scalar>>(
&self,
out: &Tensor,
p: S,
dim: i64,
maxnorm: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_renorm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p.into().c_scalar,
dim,
maxnorm.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_repeat(&self, repeats: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_repeat(
c_tensors.as_mut_ptr(),
self.c_tensor,
repeats.as_ptr(),
repeats.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_repeat_interleave(
repeats: &Tensor,
output_size: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let output_size = output_size.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_repeat_interleave(
c_tensors.as_mut_ptr(),
repeats.c_tensor,
output_size.unwrap_or(0i64),
output_size.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_repeat_interleave_self_int(
&self,
repeats: i64,
dim: impl Into<Option<i64>>,
output_size: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let output_size = output_size.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_repeat_interleave_self_int(
c_tensors.as_mut_ptr(),
self.c_tensor,
repeats,
dim.unwrap_or(0i64),
dim.is_none() as i8,
output_size.unwrap_or(0i64),
output_size.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_repeat_interleave_self_tensor(
&self,
repeats: &Tensor,
dim: impl Into<Option<i64>>,
output_size: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let output_size = output_size.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_repeat_interleave_self_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
repeats.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8,
output_size.unwrap_or(0i64),
output_size.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_repeat_interleave_tensor_out(
out: &Tensor,
repeats: &Tensor,
output_size: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let output_size = output_size.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_repeat_interleave_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
repeats.c_tensor,
output_size.unwrap_or(0i64),
output_size.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_repeat_out(&self, out: &Tensor, repeats: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_repeat_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
repeats.as_ptr(),
repeats.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_replication_pad1d(&self, padding: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_replication_pad1d_backward(
&self,
grad_output: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad1d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_replication_pad1d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad1d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_replication_pad1d_out(
&self,
out: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad1d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_replication_pad2d(&self, padding: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_replication_pad2d_backward(
&self,
grad_output: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_replication_pad2d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad2d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_replication_pad2d_out(
&self,
out: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_replication_pad3d(&self, padding: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_replication_pad3d_backward(
&self,
grad_output: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_replication_pad3d_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad3d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_replication_pad3d_out(
&self,
out: &Tensor,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_replication_pad3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_requires_grad_(&mut self, requires_grad: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_requires_grad_(
c_tensors.as_mut_ptr(),
self.c_tensor,
if requires_grad { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reshape(&self, shape: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reshape(
c_tensors.as_mut_ptr(),
self.c_tensor,
shape.as_ptr(),
shape.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_reshape_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_reshape_as(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_resize(&self, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_resize(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_resize_(&mut self, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_resize_(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_resize_as(&self, the_template: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_resize_as(
c_tensors.as_mut_ptr(),
self.c_tensor,
the_template.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_resize_as_(&mut self, the_template: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_resize_as_(
c_tensors.as_mut_ptr(),
self.c_tensor,
the_template.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_resize_as_out(&self, out: &Tensor, the_template: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_resize_as_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
the_template.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_resize_as_sparse(&self, the_template: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_resize_as_sparse(
c_tensors.as_mut_ptr(),
self.c_tensor,
the_template.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_resize_as_sparse_(&mut self, the_template: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_resize_as_sparse_(
c_tensors.as_mut_ptr(),
self.c_tensor,
the_template.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_resize_as_sparse_out(
&self,
out: &Tensor,
the_template: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_resize_as_sparse_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
the_template.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_resize_out(&self, out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_resize_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_resolve_conj(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_resolve_conj(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_resolve_neg(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_resolve_neg(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_retains_grad(&self) -> Result<bool, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_retains_grad(self.c_tensor));
Ok(return_ != 0)
}
pub fn f_rnn_relu<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_rnn_relu(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_rnn_relu_cell<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: Option<T>,
b_hh: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rnn_relu_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
b_hh.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rnn_relu_data<T: Borrow<Tensor>>(
data: &Tensor,
batch_sizes: &Tensor,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_rnn_relu_data(
c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_rnn_tanh<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
batch_first: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_rnn_tanh(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 },
if batch_first { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_rnn_tanh_cell<T: Borrow<Tensor>>(
&self,
hx: &Tensor,
w_ih: &Tensor,
w_hh: &Tensor,
b_ih: Option<T>,
b_hh: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rnn_tanh_cell(
c_tensors.as_mut_ptr(),
self.c_tensor,
hx.c_tensor,
w_ih.c_tensor,
w_hh.c_tensor,
b_ih.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
b_hh.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rnn_tanh_data<T: Borrow<Tensor>>(
data: &Tensor,
batch_sizes: &Tensor,
hx: &Tensor,
params: &[T],
has_biases: bool,
num_layers: i64,
dropout: f64,
train: bool,
bidirectional: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_rnn_tanh_data(
c_tensors.as_mut_ptr(),
data.c_tensor,
batch_sizes.c_tensor,
hx.c_tensor,
ptr_list(params).as_ptr(),
params.len() as i32,
if has_biases { 1 } else { 0 },
num_layers,
dropout,
if train { 1 } else { 0 },
if bidirectional { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_roll(&self, shifts: impl IntList, dims: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_roll(
c_tensors.as_mut_ptr(),
self.c_tensor,
shifts.as_ptr(),
shifts.len_i32(),
dims.as_ptr(),
dims.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_roll_out(
&self,
out: &Tensor,
shifts: impl IntList,
dims: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_roll_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
shifts.as_ptr(),
shifts.len_i32(),
dims.as_ptr(),
dims.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rot90(&self, k: i64, dims: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rot90(
c_tensors.as_mut_ptr(),
self.c_tensor,
k,
dims.as_ptr(),
dims.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rot90_out(
&self,
out: &Tensor,
k: i64,
dims: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rot90_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
k,
dims.as_ptr(),
dims.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_round(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_round(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_round_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_round_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_round_decimals(&self, decimals: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_round_decimals(c_tensors.as_mut_ptr(), self.c_tensor, decimals));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_round_decimals_(&mut self, decimals: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_round_decimals_(c_tensors.as_mut_ptr(), self.c_tensor, decimals));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_round_decimals_out(&self, out: &Tensor, decimals: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_round_decimals_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
decimals
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_round_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_round_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_row_indices(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_row_indices(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_row_indices_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_row_indices_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_row_indices_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_row_indices_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_row_stack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_row_stack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_row_stack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_row_stack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rrelu(&self, training: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rrelu(
c_tensors.as_mut_ptr(),
self.c_tensor,
if training { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rrelu_(&mut self, training: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rrelu_(
c_tensors.as_mut_ptr(),
self.c_tensor,
if training { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rrelu_with_noise(&self, noise: &Tensor, training: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rrelu_with_noise(
c_tensors.as_mut_ptr(),
self.c_tensor,
noise.c_tensor,
if training { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rrelu_with_noise_(
&mut self,
noise: &Tensor,
training: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rrelu_with_noise_(
c_tensors.as_mut_ptr(),
self.c_tensor,
noise.c_tensor,
if training { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rrelu_with_noise_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
noise: &Tensor,
lower: S,
upper: S,
training: bool,
self_is_result: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rrelu_with_noise_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
noise.c_tensor,
lower.into().c_scalar,
upper.into().c_scalar,
if training { 1 } else { 0 },
if self_is_result { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rrelu_with_noise_backward_out<S: Into<Scalar>>(
&self,
out: &Tensor,
grad_output: &Tensor,
noise: &Tensor,
lower: S,
upper: S,
training: bool,
self_is_result: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rrelu_with_noise_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
self.c_tensor,
noise.c_tensor,
lower.into().c_scalar,
upper.into().c_scalar,
if training { 1 } else { 0 },
if self_is_result { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rrelu_with_noise_out(
&self,
out: &Tensor,
noise: &Tensor,
training: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rrelu_with_noise_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
noise.c_tensor,
if training { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rsqrt(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rsqrt(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rsqrt_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rsqrt_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rsqrt_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rsqrt_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rsub(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rsub(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rsub_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rsub_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rsub_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rsub_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_rsub_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_rsub_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scalar_tensor<S: Into<Scalar>>(
s: S,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scalar_tensor(
c_tensors.as_mut_ptr(),
s.into().c_scalar,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scalar_tensor_out<S: Into<Scalar>>(out: &Tensor, s: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scalar_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
s.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scaled_dot_product_attention<T: Borrow<Tensor>>(
query: &Tensor,
key: &Tensor,
value: &Tensor,
attn_mask: Option<T>,
dropout_p: f64,
is_causal: bool,
scale: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scale = scale.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scaled_dot_product_attention(
c_tensors.as_mut_ptr(),
query.c_tensor,
key.c_tensor,
value.c_tensor,
attn_mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
dropout_p,
if is_causal { 1 } else { 0 },
scale.unwrap_or(std::f64::NAN),
scale.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter(&self, dim: i64, index: &Tensor, src: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_(
&mut self,
dim: i64,
index: &Tensor,
src: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_add(
&self,
dim: i64,
index: &Tensor,
src: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_add(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_add_(
&mut self,
dim: i64,
index: &Tensor,
src: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_add_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_add_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
src: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_add_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_reduce(
&self,
dim: i64,
index: &Tensor,
src: &Tensor,
reduce: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_reduce(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor,
reduce.as_ptr(),
reduce.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_reduce_(
&mut self,
dim: i64,
index: &Tensor,
src: &Tensor,
reduce: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_reduce_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor,
reduce.as_ptr(),
reduce.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_reduce_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
src: &Tensor,
reduce: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_reduce_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor,
reduce.as_ptr(),
reduce.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_src_out(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
src: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_src_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
src.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_value<S: Into<Scalar>>(
&self,
dim: i64,
index: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_value(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_value_<S: Into<Scalar>>(
&mut self,
dim: i64,
index: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_value_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_value_out<S: Into<Scalar>>(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_value_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_value_reduce<S: Into<Scalar>>(
&self,
dim: i64,
index: &Tensor,
value: S,
reduce: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_value_reduce(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar,
reduce.as_ptr(),
reduce.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_value_reduce_<S: Into<Scalar>>(
&mut self,
dim: i64,
index: &Tensor,
value: S,
reduce: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_value_reduce_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar,
reduce.as_ptr(),
reduce.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_scatter_value_reduce_out<S: Into<Scalar>>(
&self,
out: &Tensor,
dim: i64,
index: &Tensor,
value: S,
reduce: &str,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_scatter_value_reduce_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index.c_tensor,
value.into().c_scalar,
reduce.as_ptr(),
reduce.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_searchsorted<T: Borrow<Tensor>>(
&self,
sorted_sequence: &Tensor,
out_int32: bool,
right: bool,
side: &str,
sorter: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_searchsorted(
c_tensors.as_mut_ptr(),
sorted_sequence.c_tensor,
self.c_tensor,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 },
side.as_ptr(),
side.len() as i32,
sorter.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_searchsorted_scalar<T: Borrow<Tensor>, S: Into<Scalar>>(
sorted_sequence: &Tensor,
self_scalar: S,
out_int32: bool,
right: bool,
side: &str,
sorter: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_searchsorted_scalar(
c_tensors.as_mut_ptr(),
sorted_sequence.c_tensor,
self_scalar.into().c_scalar,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 },
side.as_ptr(),
side.len() as i32,
sorter.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_searchsorted_scalar_out<T: Borrow<Tensor>, S: Into<Scalar>>(
out: &Tensor,
sorted_sequence: &Tensor,
self_scalar: S,
out_int32: bool,
right: bool,
side: &str,
sorter: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_searchsorted_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
sorted_sequence.c_tensor,
self_scalar.into().c_scalar,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 },
side.as_ptr(),
side.len() as i32,
sorter.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_searchsorted_tensor_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
sorted_sequence: &Tensor,
out_int32: bool,
right: bool,
side: &str,
sorter: Option<T>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_searchsorted_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
sorted_sequence.c_tensor,
self.c_tensor,
if out_int32 { 1 } else { 0 },
if right { 1 } else { 0 },
side.as_ptr(),
side.len() as i32,
sorter.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_segment_reduce<T: Borrow<Tensor>, S: Into<Scalar>>(
data: &Tensor,
reduce: &str,
lengths: Option<T>,
indices: Option<T>,
offsets: Option<T>,
axis: i64,
unsafe_: bool,
initial: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_segment_reduce(
c_tensors.as_mut_ptr(),
data.c_tensor,
reduce.as_ptr(),
reduce.len() as i32,
lengths.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
indices.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
offsets.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
axis,
if unsafe_ { 1 } else { 0 },
initial.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_segment_reduce_out<T: Borrow<Tensor>, S: Into<Scalar>>(
out: &Tensor,
data: &Tensor,
reduce: &str,
lengths: Option<T>,
indices: Option<T>,
offsets: Option<T>,
axis: i64,
unsafe_: bool,
initial: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_segment_reduce_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
data.c_tensor,
reduce.as_ptr(),
reduce.len() as i32,
lengths.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
indices.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
offsets.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
axis,
if unsafe_ { 1 } else { 0 },
initial.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_select(&self, dim: i64, index: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_select(c_tensors.as_mut_ptr(), self.c_tensor, dim, index));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_select_backward(
grad_output: &Tensor,
input_sizes: impl IntList,
dim: i64,
index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_select_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
input_sizes.as_ptr(),
input_sizes.len_i32(),
dim,
index
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_select_backward_out(
out: &Tensor,
grad_output: &Tensor,
input_sizes: impl IntList,
dim: i64,
index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_select_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
input_sizes.as_ptr(),
input_sizes.len_i32(),
dim,
index
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_select_copy(&self, dim: i64, index: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_select_copy(c_tensors.as_mut_ptr(), self.c_tensor, dim, index));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_select_copy_int_out(
&self,
out: &Tensor,
dim: i64,
index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_select_copy_int_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
index
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_select_scatter(&self, src: &Tensor, dim: i64, index: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_select_scatter(
c_tensors.as_mut_ptr(),
self.c_tensor,
src.c_tensor,
dim,
index
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_select_scatter_out(
&self,
out: &Tensor,
src: &Tensor,
dim: i64,
index: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_select_scatter_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
src.c_tensor,
dim,
index
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_selu(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_selu(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_selu_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_selu_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_set(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_set(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_set_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_set_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_set_data(&mut self, new_data: &Tensor) -> Result<(), TchError> {
unsafe_torch_err!(atg_set_data(self.c_tensor, new_data.c_tensor));
Ok(())
}
pub fn f_set_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_set_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_set_requires_grad(&self, r: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_set_requires_grad(
c_tensors.as_mut_ptr(),
self.c_tensor,
if r { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_set_source_tensor(&self, source: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_set_source_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
source.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_set_source_tensor_(&mut self, source: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_set_source_tensor_(
c_tensors.as_mut_ptr(),
self.c_tensor,
source.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_set_source_tensor_out(
&self,
out: &Tensor,
source: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_set_source_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
source.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_set_source_tensor_storage_offset_(
&mut self,
source: &Tensor,
storage_offset: i64,
size: impl IntList,
stride: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_set_source_tensor_storage_offset_(
c_tensors.as_mut_ptr(),
self.c_tensor,
source.c_tensor,
storage_offset,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sgn(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sgn(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sgn_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sgn_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sgn_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sgn_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sigmoid(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sigmoid(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sigmoid_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sigmoid_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sigmoid_backward(grad_output: &Tensor, output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sigmoid_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sigmoid_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sigmoid_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sigmoid_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sigmoid_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sign(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sign(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sign_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sign_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sign_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sign_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_signbit(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_signbit(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_signbit_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_signbit_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_silu(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_silu(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_silu_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_silu_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_silu_backward(&self, grad_output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_silu_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_silu_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_silu_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_silu_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_silu_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sin(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sin(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sin_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sin_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sin_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sin_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sinc(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sinc(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sinc_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sinc_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sinc_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sinc_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sinh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sinh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sinh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sinh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sinh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sinh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slice(
&self,
dim: i64,
start: impl Into<Option<i64>>,
end: impl Into<Option<i64>>,
step: i64,
) -> Result<Tensor, TchError> {
let start = start.into();
let end = end.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slice(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
start.unwrap_or(0i64),
start.is_none() as i8,
end.unwrap_or(0i64),
end.is_none() as i8,
step
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slice_backward(
grad_output: &Tensor,
input_sizes: impl IntList,
dim: i64,
start: i64,
end: i64,
step: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slice_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
input_sizes.as_ptr(),
input_sizes.len_i32(),
dim,
start,
end,
step
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slice_backward_out(
out: &Tensor,
grad_output: &Tensor,
input_sizes: impl IntList,
dim: i64,
start: i64,
end: i64,
step: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slice_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_output.c_tensor,
input_sizes.as_ptr(),
input_sizes.len_i32(),
dim,
start,
end,
step
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slice_copy(
&self,
dim: i64,
start: impl Into<Option<i64>>,
end: impl Into<Option<i64>>,
step: i64,
) -> Result<Tensor, TchError> {
let start = start.into();
let end = end.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slice_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
start.unwrap_or(0i64),
start.is_none() as i8,
end.unwrap_or(0i64),
end.is_none() as i8,
step
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slice_copy_tensor_out(
&self,
out: &Tensor,
dim: i64,
start: impl Into<Option<i64>>,
end: impl Into<Option<i64>>,
step: i64,
) -> Result<Tensor, TchError> {
let start = start.into();
let end = end.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slice_copy_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
start.unwrap_or(0i64),
start.is_none() as i8,
end.unwrap_or(0i64),
end.is_none() as i8,
step
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slice_scatter(
&self,
src: &Tensor,
dim: i64,
start: impl Into<Option<i64>>,
end: impl Into<Option<i64>>,
step: i64,
) -> Result<Tensor, TchError> {
let start = start.into();
let end = end.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slice_scatter(
c_tensors.as_mut_ptr(),
self.c_tensor,
src.c_tensor,
dim,
start.unwrap_or(0i64),
start.is_none() as i8,
end.unwrap_or(0i64),
end.is_none() as i8,
step
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slice_scatter_out(
&self,
out: &Tensor,
src: &Tensor,
dim: i64,
start: impl Into<Option<i64>>,
end: impl Into<Option<i64>>,
step: i64,
) -> Result<Tensor, TchError> {
let start = start.into();
let end = end.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slice_scatter_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
src.c_tensor,
dim,
start.unwrap_or(0i64),
start.is_none() as i8,
end.unwrap_or(0i64),
end.is_none() as i8,
step
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slogdet(&self) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_slogdet(c_tensors.as_mut_ptr(), self.c_tensor));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_slogdet_out(
&self,
sign: &Tensor,
logabsdet: &Tensor,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_slogdet_out(
c_tensors.as_mut_ptr(),
sign.c_tensor,
logabsdet.c_tensor,
self.c_tensor
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_slow_conv3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slow_conv3d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slow_conv_dilated2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_dilated2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slow_conv_dilated2d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_dilated2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slow_conv_dilated3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_dilated3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slow_conv_dilated3d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_dilated3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slow_conv_transpose2d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_transpose2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
output_padding.as_ptr(),
output_padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slow_conv_transpose2d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_transpose2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
output_padding.as_ptr(),
output_padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slow_conv_transpose3d<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_transpose3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
output_padding.as_ptr(),
output_padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_slow_conv_transpose3d_out<T: Borrow<Tensor>>(
&self,
out: &Tensor,
weight: &Tensor,
kernel_size: impl IntList,
bias: Option<T>,
stride: impl IntList,
padding: impl IntList,
output_padding: impl IntList,
dilation: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_slow_conv_transpose3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
weight.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
output_padding.as_ptr(),
output_padding.len_i32(),
dilation.as_ptr(),
dilation.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_smm(&self, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_smm(c_tensors.as_mut_ptr(), self.c_tensor, mat2.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_smooth_l1_loss(
&self,
target: &Tensor,
reduction: crate::Reduction,
beta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_smooth_l1_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction.to_int(),
beta
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_smooth_l1_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
beta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_smooth_l1_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
beta
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_smooth_l1_loss_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
beta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_smooth_l1_loss_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
beta
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_smooth_l1_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
beta: f64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_smooth_l1_loss_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int(),
beta
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_soft_margin_loss(
&self,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_soft_margin_loss(
c_tensors.as_mut_ptr(),
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_soft_margin_loss_backward(
&self,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_soft_margin_loss_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_soft_margin_loss_backward_grad_input(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_soft_margin_loss_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_soft_margin_loss_out(
&self,
out: &Tensor,
target: &Tensor,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_soft_margin_loss_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
target.c_tensor,
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_softmax(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_softmax_int_out(
&self,
out: &Tensor,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softmax_int_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_softplus(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softplus(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_softplus_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
beta: S,
threshold: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softplus_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
beta.into().c_scalar,
threshold.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_softplus_backward_grad_input<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
beta: S,
threshold: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softplus_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
beta.into().c_scalar,
threshold.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_softplus_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softplus_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_softshrink(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softshrink(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_softshrink_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
lambd: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softshrink_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
lambd.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_softshrink_backward_grad_input<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
lambd: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softshrink_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
lambd.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_softshrink_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_softshrink_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sort(&self, dim: i64, descending: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_sort(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if descending { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_sort_stable(
&self,
stable: bool,
dim: i64,
descending: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_sort_stable(
c_tensors.as_mut_ptr(),
self.c_tensor,
if stable { 1 } else { 0 },
dim,
if descending { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_sort_values(
&self,
values: &Tensor,
indices: &Tensor,
dim: i64,
descending: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_sort_values(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
dim,
if descending { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_sort_values_stable(
&self,
values: &Tensor,
indices: &Tensor,
stable: bool,
dim: i64,
descending: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_sort_values_stable(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
if stable { 1 } else { 0 },
dim,
if descending { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_sparse_bsc_tensor(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_bsc_tensor(
c_tensors.as_mut_ptr(),
ccol_indices.c_tensor,
row_indices.c_tensor,
values.c_tensor,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_bsc_tensor_ccol_row_value_size(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_bsc_tensor_ccol_row_value_size(
c_tensors.as_mut_ptr(),
ccol_indices.c_tensor,
row_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_bsr_tensor(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_bsr_tensor(
c_tensors.as_mut_ptr(),
crow_indices.c_tensor,
col_indices.c_tensor,
values.c_tensor,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_bsr_tensor_crow_col_value_size(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_bsr_tensor_crow_col_value_size(
c_tensors.as_mut_ptr(),
crow_indices.c_tensor,
col_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_compressed_tensor(
compressed_indices: &Tensor,
plain_indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_compressed_tensor(
c_tensors.as_mut_ptr(),
compressed_indices.c_tensor,
plain_indices.c_tensor,
values.c_tensor,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_compressed_tensor_comp_plain_value_size(
compressed_indices: &Tensor,
plain_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_compressed_tensor_comp_plain_value_size(
c_tensors.as_mut_ptr(),
compressed_indices.c_tensor,
plain_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_coo_tensor(
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_coo_tensor(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_coo_tensor_indices(
indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
is_coalesced: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_coo_tensor_indices(
c_tensors.as_mut_ptr(),
indices.c_tensor,
values.c_tensor,
options.0.c_int(),
options.1.c_int(),
if is_coalesced { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_coo_tensor_indices_size(
indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
is_coalesced: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_coo_tensor_indices_size(
c_tensors.as_mut_ptr(),
indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int(),
if is_coalesced { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_coo_tensor_size_out(
out: &Tensor,
size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_coo_tensor_size_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_csc_tensor(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_csc_tensor(
c_tensors.as_mut_ptr(),
ccol_indices.c_tensor,
row_indices.c_tensor,
values.c_tensor,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_csc_tensor_ccol_row_value_size(
ccol_indices: &Tensor,
row_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_csc_tensor_ccol_row_value_size(
c_tensors.as_mut_ptr(),
ccol_indices.c_tensor,
row_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_csr_tensor(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_csr_tensor(
c_tensors.as_mut_ptr(),
crow_indices.c_tensor,
col_indices.c_tensor,
values.c_tensor,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_csr_tensor_crow_col_value_size(
crow_indices: &Tensor,
col_indices: &Tensor,
values: &Tensor,
size: impl IntList,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_csr_tensor_crow_col_value_size(
c_tensors.as_mut_ptr(),
crow_indices.c_tensor,
col_indices.c_tensor,
values.c_tensor,
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_dim(&self) -> Result<i64, TchError> {
let return_;
unsafe_torch_err!(return_ = atg_sparse_dim(self.c_tensor));
Ok(return_)
}
pub fn f_sparse_mask(&self, mask: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_mask(c_tensors.as_mut_ptr(), self.c_tensor, mask.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_mask_out(&self, out: &Tensor, mask: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_mask_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mask.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_resize(
&self,
size: impl IntList,
sparse_dim: i64,
dense_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_resize(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
sparse_dim,
dense_dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_resize_(
&mut self,
size: impl IntList,
sparse_dim: i64,
dense_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_resize_(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
sparse_dim,
dense_dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_resize_and_clear(
&self,
size: impl IntList,
sparse_dim: i64,
dense_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_resize_and_clear(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
sparse_dim,
dense_dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_resize_and_clear_(
&mut self,
size: impl IntList,
sparse_dim: i64,
dense_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_resize_and_clear_(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32(),
sparse_dim,
dense_dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_resize_and_clear_out(
&self,
out: &Tensor,
size: impl IntList,
sparse_dim: i64,
dense_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_resize_and_clear_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32(),
sparse_dim,
dense_dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_resize_out(
&self,
out: &Tensor,
size: impl IntList,
sparse_dim: i64,
dense_dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_resize_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32(),
sparse_dim,
dense_dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_sampled_addmm(&self, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_sampled_addmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sparse_sampled_addmm_out(
&self,
out: &Tensor,
mat1: &Tensor,
mat2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sparse_sampled_addmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_airy_ai(x: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_airy_ai(c_tensors.as_mut_ptr(), x.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_airy_ai_out(out: &Tensor, x: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_airy_ai_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_bessel_j0(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_bessel_j0(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_bessel_j0_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_bessel_j0_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_bessel_j1(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_bessel_j1(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_bessel_j1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_bessel_j1_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_bessel_y0(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_bessel_y0(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_bessel_y0_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_bessel_y0_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_bessel_y1(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_bessel_y1(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_bessel_y1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_bessel_y1_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_t(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_t(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_t_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_t_n_scalar(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_t_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_t_n_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_t_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_t_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_t_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_t_x_scalar(
c_tensors.as_mut_ptr(),
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_t_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_t_x_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_u(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_u(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_u_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_u_n_scalar(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_u_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_u_n_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_u_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_u_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_u_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_u_x_scalar(
c_tensors.as_mut_ptr(),
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_u_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_u_x_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_v(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_v(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_v_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_v_n_scalar(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_v_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_v_n_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_v_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_v_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_v_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_v_x_scalar(
c_tensors.as_mut_ptr(),
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_v_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_v_x_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_w(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_w(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_w_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_w_n_scalar(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_w_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_w_n_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_w_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_w_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_w_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_w_x_scalar(
c_tensors.as_mut_ptr(),
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_chebyshev_polynomial_w_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_chebyshev_polynomial_w_x_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_digamma(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_digamma(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_digamma_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_digamma_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_entr(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_entr(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_entr_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_entr_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_erf(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_erf(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_erf_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_erf_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_erfc(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_erfc(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_erfc_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_erfc_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_erfcx(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_erfcx(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_erfcx_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_erfcx_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_erfinv(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_erfinv(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_erfinv_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_erfinv_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_exp2(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_exp2(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_exp2_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_exp2_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_expit(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_expit(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_expit_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_expit_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_expm1(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_expm1(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_expm1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_expm1_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_gammainc(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_gammainc(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_gammainc_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_gammainc_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_gammaincc(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_gammaincc(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_gammaincc_out(
&self,
out: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_gammaincc_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_gammaln(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_gammaln(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_gammaln_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_gammaln_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_hermite_polynomial_h(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_hermite_polynomial_h(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_hermite_polynomial_h_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_hermite_polynomial_h_n_scalar(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_hermite_polynomial_h_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_hermite_polynomial_h_n_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_hermite_polynomial_h_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_hermite_polynomial_h_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_hermite_polynomial_h_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_hermite_polynomial_h_x_scalar(
c_tensors.as_mut_ptr(),
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_hermite_polynomial_h_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_hermite_polynomial_h_x_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_hermite_polynomial_he(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_hermite_polynomial_he(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_hermite_polynomial_he_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_hermite_polynomial_he_n_scalar(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_hermite_polynomial_he_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_hermite_polynomial_he_n_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_hermite_polynomial_he_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_hermite_polynomial_he_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_hermite_polynomial_he_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_hermite_polynomial_he_x_scalar(
c_tensors.as_mut_ptr(),
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_hermite_polynomial_he_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_hermite_polynomial_he_x_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_i0(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_i0(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_i0_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_i0_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_i0e(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_i0e(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_i0e_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_i0e_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_i1(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_i1(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_i1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_i1_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_i1e(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_i1e(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_i1e_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_i1e_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_laguerre_polynomial_l(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_laguerre_polynomial_l(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_laguerre_polynomial_l_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_laguerre_polynomial_l_n_scalar(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_laguerre_polynomial_l_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_laguerre_polynomial_l_n_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_laguerre_polynomial_l_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_laguerre_polynomial_l_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_laguerre_polynomial_l_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_laguerre_polynomial_l_x_scalar(
c_tensors.as_mut_ptr(),
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_laguerre_polynomial_l_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_laguerre_polynomial_l_x_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_legendre_polynomial_p(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_legendre_polynomial_p(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_legendre_polynomial_p_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_legendre_polynomial_p_n_scalar(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_legendre_polynomial_p_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_legendre_polynomial_p_n_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_legendre_polynomial_p_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_legendre_polynomial_p_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_legendre_polynomial_p_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_legendre_polynomial_p_x_scalar(
c_tensors.as_mut_ptr(),
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_legendre_polynomial_p_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_legendre_polynomial_p_x_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_log1p(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_log1p(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_log1p_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_log1p_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_log_ndtr(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_log_ndtr(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_log_ndtr_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_log_ndtr_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_log_softmax(
&self,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_log_softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_logit(&self, eps: impl Into<Option<f64>>) -> Result<Tensor, TchError> {
let eps = eps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_logit(
c_tensors.as_mut_ptr(),
self.c_tensor,
eps.unwrap_or(std::f64::NAN),
eps.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_logit_out(
&self,
out: &Tensor,
eps: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let eps = eps.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_logit_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
eps.unwrap_or(std::f64::NAN),
eps.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_logsumexp(
&self,
dim: impl IntList,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_logsumexp(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_logsumexp_out(
&self,
out: &Tensor,
dim: impl IntList,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_logsumexp_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_modified_bessel_i0(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_modified_bessel_i0(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_modified_bessel_i0_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_modified_bessel_i0_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_modified_bessel_i1(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_modified_bessel_i1(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_modified_bessel_i1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_modified_bessel_i1_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_modified_bessel_k0(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_modified_bessel_k0(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_modified_bessel_k0_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_modified_bessel_k0_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_modified_bessel_k1(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_modified_bessel_k1(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_modified_bessel_k1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_modified_bessel_k1_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_multigammaln(&self, p: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_multigammaln(c_tensors.as_mut_ptr(), self.c_tensor, p));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_multigammaln_out(&self, out: &Tensor, p: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_multigammaln_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
p
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_ndtr(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_ndtr(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_ndtr_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_ndtr_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_ndtri(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_ndtri(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_ndtri_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_ndtri_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_polygamma(&self, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_polygamma(c_tensors.as_mut_ptr(), n, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_polygamma_out(&self, out: &Tensor, n: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_polygamma_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
n,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_psi(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_psi(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_psi_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_psi_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_round(&self, decimals: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_round(c_tensors.as_mut_ptr(), self.c_tensor, decimals));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_round_out(&self, out: &Tensor, decimals: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_round_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
decimals
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_scaled_modified_bessel_k0(x: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_scaled_modified_bessel_k0(
c_tensors.as_mut_ptr(),
x.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_scaled_modified_bessel_k0_out(
out: &Tensor,
x: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_scaled_modified_bessel_k0_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_scaled_modified_bessel_k1(x: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_scaled_modified_bessel_k1(
c_tensors.as_mut_ptr(),
x.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_scaled_modified_bessel_k1_out(
out: &Tensor,
x: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_scaled_modified_bessel_k1_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_t(
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_t(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_t_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_t_n_scalar(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_t_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_t_n_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_t_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_t_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_t_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_t_x_scalar(
c_tensors.as_mut_ptr(),
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_t_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_t_x_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_u(
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_u(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_u_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_u_n_scalar(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_u_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_u_n_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_u_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_u_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_u_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_u_x_scalar(
c_tensors.as_mut_ptr(),
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_u_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_u_x_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_v(
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_v(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_v_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_v_n_scalar(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_v_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_v_n_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_v_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_v_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_v_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_v_x_scalar(
c_tensors.as_mut_ptr(),
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_v_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_v_x_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_w(
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_w(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_w_n_scalar<S: Into<Scalar>>(
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_w_n_scalar(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_w_n_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: &Tensor,
n: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_w_n_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_w_out(
out: &Tensor,
x: &Tensor,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_w_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_w_x_scalar<S: Into<Scalar>>(
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_w_x_scalar(
c_tensors.as_mut_ptr(),
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_shifted_chebyshev_polynomial_w_x_scalar_out<S: Into<Scalar>>(
out: &Tensor,
x: S,
n: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_w_x_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.into().c_scalar,
n.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_sinc(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_sinc(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_sinc_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_sinc_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_softmax(
&self,
dim: i64,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_softmax(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_spherical_bessel_j0(x: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_spherical_bessel_j0(c_tensors.as_mut_ptr(), x.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_spherical_bessel_j0_out(out: &Tensor, x: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_spherical_bessel_j0_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
x.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_xlog1py(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_xlog1py(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_xlog1py_other_scalar<S: Into<Scalar>>(
&self,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_xlog1py_other_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_xlog1py_other_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_xlog1py_other_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_xlog1py_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_xlog1py_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_xlog1py_self_scalar<S: Into<Scalar>>(
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_xlog1py_self_scalar(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_xlog1py_self_scalar_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_xlog1py_self_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_xlogy(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_xlogy(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_xlogy_other_scalar<S: Into<Scalar>>(
&self,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_xlogy_other_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_xlogy_other_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_xlogy_other_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_xlogy_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_xlogy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_xlogy_self_scalar<S: Into<Scalar>>(
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_xlogy_self_scalar(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_xlogy_self_scalar_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_xlogy_self_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_zeta(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_zeta(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_zeta_other_scalar<S: Into<Scalar>>(
&self,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_zeta_other_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_zeta_other_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_zeta_other_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_zeta_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_zeta_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_zeta_self_scalar<S: Into<Scalar>>(
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_zeta_self_scalar(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_special_zeta_self_scalar_out<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_special_zeta_self_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_split(&self, split_size: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_split(self.c_tensor, split_size, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_split_copy(&self, split_size: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_split_copy(self.c_tensor, split_size, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_split_copy_tensor_out<T: Borrow<Tensor>>(
&self,
out: &[T],
split_size: i64,
dim: i64,
) -> Result<(), TchError> {
unsafe_torch_err!(atg_split_copy_tensor_out(
ptr_list(out).as_ptr(),
out.len() as i32,
self.c_tensor,
split_size,
dim
));
Ok(())
}
pub fn f_split_sizes(
&self,
split_size: impl IntList,
dim: i64,
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_split_sizes(
self.c_tensor,
split_size.as_ptr(),
split_size.len_i32(),
dim
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_split_with_sizes(
&self,
split_sizes: impl IntList,
dim: i64,
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_split_with_sizes(
self.c_tensor,
split_sizes.as_ptr(),
split_sizes.len_i32(),
dim
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_split_with_sizes_copy(
&self,
split_sizes: impl IntList,
dim: i64,
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_split_with_sizes_copy(
self.c_tensor,
split_sizes.as_ptr(),
split_sizes.len_i32(),
dim
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_split_with_sizes_copy_out<T: Borrow<Tensor>>(
&self,
out: &[T],
split_sizes: impl IntList,
dim: i64,
) -> Result<(), TchError> {
unsafe_torch_err!(atg_split_with_sizes_copy_out(
ptr_list(out).as_ptr(),
out.len() as i32,
self.c_tensor,
split_sizes.as_ptr(),
split_sizes.len_i32(),
dim
));
Ok(())
}
pub fn f_sqrt(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sqrt(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sqrt_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sqrt_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sqrt_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sqrt_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_square(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_square(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_square_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_square_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_square_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_square_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_squeeze(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_squeeze_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_squeeze_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_squeeze_copy_dim(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze_copy_dim(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_squeeze_copy_dim_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze_copy_dim_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_squeeze_copy_dims(&self, dim: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze_copy_dims(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_squeeze_copy_dims_out(
&self,
out: &Tensor,
dim: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze_copy_dims_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_squeeze_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_squeeze_dim(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze_dim(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_squeeze_dim_(&mut self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze_dim_(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_squeeze_dims(&self, dim: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze_dims(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_squeeze_dims_(&mut self, dim: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_squeeze_dims_(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sspaddmm(&self, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sspaddmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sspaddmm_out(
&self,
out: &Tensor,
mat1: &Tensor,
mat2: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sspaddmm_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
mat1.c_tensor,
mat2.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_stack<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_stack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_stack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
dim: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_stack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_std(&self, unbiased: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_std(
c_tensors.as_mut_ptr(),
self.c_tensor,
if unbiased { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_std_correction<S: Into<Scalar>>(
&self,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_std_correction(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
correction.into().c_scalar,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_std_correction_out<S: Into<Scalar>>(
&self,
out: &Tensor,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_std_correction_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
correction.into().c_scalar,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_std_dim(
&self,
dim: impl IntListOption,
unbiased: bool,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_std_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_std_mean(&self, unbiased: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_std_mean(
c_tensors.as_mut_ptr(),
self.c_tensor,
if unbiased { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_std_mean_correction<S: Into<Scalar>>(
&self,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_std_mean_correction(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
correction.into().c_scalar,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_std_mean_correction_out<S: Into<Scalar>>(
&self,
out0: &Tensor,
out1: &Tensor,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_std_mean_correction_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
correction.into().c_scalar,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_std_mean_dim(
&self,
dim: impl IntListOption,
unbiased: bool,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_std_mean_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_std_out(
&self,
out: &Tensor,
dim: impl IntListOption,
unbiased: bool,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_std_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_stft<T: Borrow<Tensor>>(
&self,
n_fft: i64,
hop_length: impl Into<Option<i64>>,
win_length: impl Into<Option<i64>>,
window: Option<T>,
normalized: bool,
onesided: bool,
return_complex: bool,
) -> Result<Tensor, TchError> {
let hop_length = hop_length.into();
let win_length = win_length.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_stft(
c_tensors.as_mut_ptr(),
self.c_tensor,
n_fft,
hop_length.unwrap_or(0i64),
hop_length.is_none() as i8,
win_length.unwrap_or(0i64),
win_length.is_none() as i8,
window.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if normalized { 1 } else { 0 },
if onesided { 1 } else { 0 },
if return_complex { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_stft_center<T: Borrow<Tensor>>(
&self,
n_fft: i64,
hop_length: impl Into<Option<i64>>,
win_length: impl Into<Option<i64>>,
window: Option<T>,
center: bool,
pad_mode: &str,
normalized: bool,
onesided: bool,
return_complex: bool,
) -> Result<Tensor, TchError> {
let hop_length = hop_length.into();
let win_length = win_length.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_stft_center(
c_tensors.as_mut_ptr(),
self.c_tensor,
n_fft,
hop_length.unwrap_or(0i64),
hop_length.is_none() as i8,
win_length.unwrap_or(0i64),
win_length.is_none() as i8,
window.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
if center { 1 } else { 0 },
pad_mode.as_ptr(),
pad_mode.len() as i32,
if normalized { 1 } else { 0 },
if onesided { 1 } else { 0 },
if return_complex { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sub(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sub(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sub_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sub_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sub_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sub_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sub_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sub_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sub_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sub_scalar_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sub_scalar_out<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sub_scalar_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_subtract(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_subtract(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_subtract_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_subtract_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_subtract_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_subtract_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_subtract_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_subtract_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_subtract_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_subtract_scalar_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sum(&self, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sum(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sum_dim_intlist(
&self,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sum_dim_intlist(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sum_intlist_out(
&self,
out: &Tensor,
dim: impl IntListOption,
keepdim: bool,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sum_intlist_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if keepdim { 1 } else { 0 },
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sum_out(
&self,
out: &Tensor,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sum_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_sum_to_size(&self, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_sum_to_size(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_svd(
&self,
some: bool,
compute_uv: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_svd(
c_tensors.as_mut_ptr(),
self.c_tensor,
if some { 1 } else { 0 },
if compute_uv { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_svd_u(
&self,
u: &Tensor,
s: &Tensor,
v: &Tensor,
some: bool,
compute_uv: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_svd_u(
c_tensors.as_mut_ptr(),
u.c_tensor,
s.c_tensor,
v.c_tensor,
self.c_tensor,
if some { 1 } else { 0 },
if compute_uv { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_swapaxes(&self, axis0: i64, axis1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_swapaxes(c_tensors.as_mut_ptr(), self.c_tensor, axis0, axis1));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_swapaxes_(&mut self, axis0: i64, axis1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_swapaxes_(c_tensors.as_mut_ptr(), self.c_tensor, axis0, axis1));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_swapdims(&self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_swapdims(c_tensors.as_mut_ptr(), self.c_tensor, dim0, dim1));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_swapdims_(&mut self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_swapdims_(c_tensors.as_mut_ptr(), self.c_tensor, dim0, dim1));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tr(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_t(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_t_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_t_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_t_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_t_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_t_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_t_copy_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_take(&self, index: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_take(c_tensors.as_mut_ptr(), self.c_tensor, index.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_take_along_dim(
&self,
indices: &Tensor,
dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_take_along_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
indices.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_take_along_dim_out(
&self,
out: &Tensor,
indices: &Tensor,
dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_take_along_dim_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
indices.c_tensor,
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_take_out(&self, out: &Tensor, index: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_take_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
index.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tan(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tan(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tan_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tan_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tan_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tan_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tanh(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tanh(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tanh_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tanh_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tanh_backward(grad_output: &Tensor, output: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tanh_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tanh_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tanh_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tanh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tanh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tensor_split(&self, sections: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_tensor_split(self.c_tensor, sections, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_tensor_split_indices(
&self,
indices: impl IntList,
dim: i64,
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_tensor_split_indices(
self.c_tensor,
indices.as_ptr(),
indices.len_i32(),
dim
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_tensor_split_tensor_indices_or_sections(
&self,
tensor_indices_or_sections: &Tensor,
dim: i64,
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_tensor_split_tensor_indices_or_sections(
self.c_tensor,
tensor_indices_or_sections.c_tensor,
dim
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_tensordot(
&self,
other: &Tensor,
dims_self: impl IntList,
dims_other: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tensordot(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
dims_self.as_ptr(),
dims_self.len_i32(),
dims_other.as_ptr(),
dims_other.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tensordot_out(
&self,
out: &Tensor,
other: &Tensor,
dims_self: impl IntList,
dims_other: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tensordot_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor,
dims_self.as_ptr(),
dims_self.len_i32(),
dims_other.as_ptr(),
dims_other.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_threshold<S: Into<Scalar>>(&self, threshold: S, value: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_threshold(
c_tensors.as_mut_ptr(),
self.c_tensor,
threshold.into().c_scalar,
value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_threshold_<S: Into<Scalar>>(
&mut self,
threshold: S,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_threshold_(
c_tensors.as_mut_ptr(),
self.c_tensor,
threshold.into().c_scalar,
value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_threshold_backward<S: Into<Scalar>>(
&self,
grad_output: &Tensor,
threshold: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_threshold_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
self.c_tensor,
threshold.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_threshold_backward_grad_input<S: Into<Scalar>>(
&self,
grad_input: &Tensor,
grad_output: &Tensor,
threshold: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_threshold_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
self.c_tensor,
threshold.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_threshold_out<S: Into<Scalar>>(
&self,
out: &Tensor,
threshold: S,
value: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_threshold_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
threshold.into().c_scalar,
value.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tile(&self, dims: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tile(
c_tensors.as_mut_ptr(),
self.c_tensor,
dims.as_ptr(),
dims.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to(&self, device: Device) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to(c_tensors.as_mut_ptr(), self.c_tensor, device.c_int()));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_dense(
&self,
dtype: impl Into<Option<Kind>>,
masked_grad: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_dense(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.into().map_or(-1, |s| s.c_int()),
if masked_grad { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_dense_backward(
&self,
grad: &Tensor,
masked_grad: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_dense_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor,
if masked_grad { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_device_(
&self,
device: Device,
dtype: Kind,
non_blocking: bool,
copy: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_device(
c_tensors.as_mut_ptr(),
self.c_tensor,
device.c_int(),
dtype.c_int(),
if non_blocking { 1 } else { 0 },
if copy { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_dtype(
&self,
dtype: Kind,
non_blocking: bool,
copy: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_dtype(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int(),
if non_blocking { 1 } else { 0 },
if copy { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_dtype_layout(
&self,
options: (Kind, Device),
non_blocking: bool,
copy: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_dtype_layout(
c_tensors.as_mut_ptr(),
self.c_tensor,
options.0.c_int(),
options.1.c_int(),
if non_blocking { 1 } else { 0 },
if copy { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_mkldnn(&self, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_mkldnn(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_mkldnn_backward(&self, grad: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_mkldnn_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_mkldnn_out(
&self,
out: &Tensor,
dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_mkldnn_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_other(
&self,
other: &Tensor,
non_blocking: bool,
copy: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_other(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.c_tensor,
if non_blocking { 1 } else { 0 },
if copy { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_padded_tensor(
&self,
padding: f64,
output_size: impl IntListOption,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_padded_tensor(
c_tensors.as_mut_ptr(),
self.c_tensor,
padding,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_padded_tensor_out(
&self,
out: &Tensor,
padding: f64,
output_size: impl IntListOption,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_padded_tensor_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
padding,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_sparse(
&self,
layout: Option<Layout>,
blocksize: impl IntListOption,
dense_dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_sparse(
c_tensors.as_mut_ptr(),
self.c_tensor,
layout.map_or(-1, |s| s.to_i8()),
blocksize.as_ptr(),
blocksize.len_i32(),
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_sparse_bsc(
&self,
blocksize: impl IntList,
dense_dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_sparse_bsc(
c_tensors.as_mut_ptr(),
self.c_tensor,
blocksize.as_ptr(),
blocksize.len_i32(),
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_sparse_bsr(
&self,
blocksize: impl IntList,
dense_dim: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_sparse_bsr(
c_tensors.as_mut_ptr(),
self.c_tensor,
blocksize.as_ptr(),
blocksize.len_i32(),
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_sparse_csc(&self, dense_dim: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_sparse_csc(
c_tensors.as_mut_ptr(),
self.c_tensor,
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_sparse_csr(&self, dense_dim: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
let dense_dim = dense_dim.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_sparse_csr(
c_tensors.as_mut_ptr(),
self.c_tensor,
dense_dim.unwrap_or(0i64),
dense_dim.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_to_sparse_sparse_dim(&self, sparse_dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_to_sparse_sparse_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
sparse_dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_topk(
&self,
k: i64,
dim: i64,
largest: bool,
sorted: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_topk(
c_tensors.as_mut_ptr(),
self.c_tensor,
k,
dim,
if largest { 1 } else { 0 },
if sorted { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_topk_values(
&self,
values: &Tensor,
indices: &Tensor,
k: i64,
dim: i64,
largest: bool,
sorted: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_topk_values(
c_tensors.as_mut_ptr(),
values.c_tensor,
indices.c_tensor,
self.c_tensor,
k,
dim,
if largest { 1 } else { 0 },
if sorted { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_totype(&self, scalar_type: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_totype(c_tensors.as_mut_ptr(), self.c_tensor, scalar_type.c_int()));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_trace(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trace(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_trace_backward(grad: &Tensor, sizes: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trace_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
sizes.as_ptr(),
sizes.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_trace_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trace_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_transpose(&self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_transpose(c_tensors.as_mut_ptr(), self.c_tensor, dim0, dim1));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_transpose_(&mut self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_transpose_(c_tensors.as_mut_ptr(), self.c_tensor, dim0, dim1));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_transpose_copy(&self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_transpose_copy(c_tensors.as_mut_ptr(), self.c_tensor, dim0, dim1));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_transpose_copy_int_out(
&self,
out: &Tensor,
dim0: i64,
dim1: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_transpose_copy_int_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim0,
dim1
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_trapezoid(y: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trapezoid(c_tensors.as_mut_ptr(), y.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_trapezoid_x(y: &Tensor, x: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trapezoid_x(c_tensors.as_mut_ptr(), y.c_tensor, x.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_trapz(y: &Tensor, x: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trapz(c_tensors.as_mut_ptr(), y.c_tensor, x.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_trapz_dx(y: &Tensor, dx: f64, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trapz_dx(c_tensors.as_mut_ptr(), y.c_tensor, dx, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_triangular_solve(
&self,
a: &Tensor,
upper: bool,
transpose: bool,
unitriangular: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_triangular_solve(
c_tensors.as_mut_ptr(),
self.c_tensor,
a.c_tensor,
if upper { 1 } else { 0 },
if transpose { 1 } else { 0 },
if unitriangular { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_triangular_solve_x(
&self,
x: &Tensor,
m: &Tensor,
a: &Tensor,
upper: bool,
transpose: bool,
unitriangular: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_triangular_solve_x(
c_tensors.as_mut_ptr(),
x.c_tensor,
m.c_tensor,
self.c_tensor,
a.c_tensor,
if upper { 1 } else { 0 },
if transpose { 1 } else { 0 },
if unitriangular { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_tril(&self, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tril(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tril_(&mut self, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tril_(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tril_indices(
row: i64,
col: i64,
offset: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tril_indices(
c_tensors.as_mut_ptr(),
row,
col,
offset,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tril_indices_out(
out: &Tensor,
row: i64,
col: i64,
offset: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tril_indices_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
row,
col,
offset
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_tril_out(&self, out: &Tensor, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_tril_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
diagonal
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_triplet_margin_loss(
anchor: &Tensor,
positive: &Tensor,
negative: &Tensor,
margin: f64,
p: f64,
eps: f64,
swap: bool,
reduction: crate::Reduction,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_triplet_margin_loss(
c_tensors.as_mut_ptr(),
anchor.c_tensor,
positive.c_tensor,
negative.c_tensor,
margin,
p,
eps,
if swap { 1 } else { 0 },
reduction.to_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_triu(&self, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_triu(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_triu_(&mut self, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_triu_(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_triu_indices(
row: i64,
col: i64,
offset: i64,
options: (Kind, Device),
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_triu_indices(
c_tensors.as_mut_ptr(),
row,
col,
offset,
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_triu_indices_out(
out: &Tensor,
row: i64,
col: i64,
offset: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_triu_indices_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
row,
col,
offset
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_triu_out(&self, out: &Tensor, diagonal: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_triu_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
diagonal
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_true_divide(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_true_divide(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_true_divide_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_true_divide_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_true_divide_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_true_divide_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_true_divide_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_true_divide_scalar(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_true_divide_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_true_divide_scalar_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_trunc(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trunc(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_trunc_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trunc_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_trunc_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_trunc_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_type_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_type_as(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_unbind(&self, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_unbind(self.c_tensor, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_unbind_copy(&self, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_unbind_copy(self.c_tensor, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_unbind_copy_int_out<T: Borrow<Tensor>>(
&self,
out: &[T],
dim: i64,
) -> Result<(), TchError> {
unsafe_torch_err!(atg_unbind_copy_int_out(
ptr_list(out).as_ptr(),
out.len() as i32,
self.c_tensor,
dim
));
Ok(())
}
pub fn f_unflatten(&self, dim: i64, sizes: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unflatten(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
sizes.as_ptr(),
sizes.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_unflatten_dense_tensors<T: Borrow<Tensor>>(
flat: &Tensor,
tensors: &[T],
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_unflatten_dense_tensors(
flat.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_unfold(&self, dimension: i64, size: i64, step: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unfold(c_tensors.as_mut_ptr(), self.c_tensor, dimension, size, step));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_unfold_backward(
grad_in: &Tensor,
input_sizes: impl IntList,
dim: i64,
size: i64,
step: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unfold_backward(
c_tensors.as_mut_ptr(),
grad_in.c_tensor,
input_sizes.as_ptr(),
input_sizes.len_i32(),
dim,
size,
step
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_unfold_backward_out(
out: &Tensor,
grad_in: &Tensor,
input_sizes: impl IntList,
dim: i64,
size: i64,
step: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unfold_backward_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
grad_in.c_tensor,
input_sizes.as_ptr(),
input_sizes.len_i32(),
dim,
size,
step
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_unfold_copy(&self, dimension: i64, size: i64, step: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unfold_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
dimension,
size,
step
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_unfold_copy_out(
&self,
out: &Tensor,
dimension: i64,
size: i64,
step: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unfold_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dimension,
size,
step
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_uniform(&self, from: f64, to: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_uniform(c_tensors.as_mut_ptr(), self.c_tensor, from, to));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_uniform_(&mut self, from: f64, to: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_uniform_(c_tensors.as_mut_ptr(), self.c_tensor, from, to));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_uniform_out(&self, out: &Tensor, from: f64, to: f64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_uniform_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
from,
to
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_unique_consecutive(
&self,
return_inverse: bool,
return_counts: bool,
dim: impl Into<Option<i64>>,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_unique_consecutive(
c_tensors.as_mut_ptr(),
self.c_tensor,
if return_inverse { 1 } else { 0 },
if return_counts { 1 } else { 0 },
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_unique_consecutive_out(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
return_inverse: bool,
return_counts: bool,
dim: impl Into<Option<i64>>,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let dim = dim.into();
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_unique_consecutive_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
self.c_tensor,
if return_inverse { 1 } else { 0 },
if return_counts { 1 } else { 0 },
dim.unwrap_or(0i64),
dim.is_none() as i8
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_unique_dim(
&self,
dim: i64,
sorted: bool,
return_inverse: bool,
return_counts: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_unique_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if sorted { 1 } else { 0 },
if return_inverse { 1 } else { 0 },
if return_counts { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_unique_dim_consecutive(
&self,
dim: i64,
return_inverse: bool,
return_counts: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_unique_dim_consecutive(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim,
if return_inverse { 1 } else { 0 },
if return_counts { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_unique_dim_consecutive_out(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
dim: i64,
return_inverse: bool,
return_counts: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_unique_dim_consecutive_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
self.c_tensor,
dim,
if return_inverse { 1 } else { 0 },
if return_counts { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_unique_dim_out(
&self,
out0: &Tensor,
out1: &Tensor,
out2: &Tensor,
dim: i64,
sorted: bool,
return_inverse: bool,
return_counts: bool,
) -> Result<(Tensor, Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 3];
unsafe_torch_err!(atg_unique_dim_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
out2.c_tensor,
self.c_tensor,
dim,
if sorted { 1 } else { 0 },
if return_inverse { 1 } else { 0 },
if return_counts { 1 } else { 0 }
));
Ok((
Tensor { c_tensor: c_tensors[0] },
Tensor { c_tensor: c_tensors[1] },
Tensor { c_tensor: c_tensors[2] },
))
}
pub fn f_unsafe_chunk(&self, chunks: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_unsafe_chunk(self.c_tensor, chunks, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_unsafe_split(&self, split_size: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_unsafe_split(self.c_tensor, split_size, dim));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_unsafe_split_tensor_out<T: Borrow<Tensor>>(
&self,
out: &[T],
split_size: i64,
dim: i64,
) -> Result<(), TchError> {
unsafe_torch_err!(atg_unsafe_split_tensor_out(
ptr_list(out).as_ptr(),
out.len() as i32,
self.c_tensor,
split_size,
dim
));
Ok(())
}
pub fn f_unsafe_split_with_sizes(
&self,
split_sizes: impl IntList,
dim: i64,
) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_unsafe_split_with_sizes(
self.c_tensor,
split_sizes.as_ptr(),
split_sizes.len_i32(),
dim
));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_unsafe_split_with_sizes_out<T: Borrow<Tensor>>(
&self,
out: &[T],
split_sizes: impl IntList,
dim: i64,
) -> Result<(), TchError> {
unsafe_torch_err!(atg_unsafe_split_with_sizes_out(
ptr_list(out).as_ptr(),
out.len() as i32,
self.c_tensor,
split_sizes.as_ptr(),
split_sizes.len_i32(),
dim
));
Ok(())
}
pub fn f_unsqueeze(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unsqueeze(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_unsqueeze_(&mut self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unsqueeze_(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_unsqueeze_copy(&self, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unsqueeze_copy(c_tensors.as_mut_ptr(), self.c_tensor, dim));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_unsqueeze_copy_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_unsqueeze_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_bicubic2d(
&self,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bicubic2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_bicubic2d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bicubic2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_bicubic2d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bicubic2d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_bicubic2d_out(
&self,
out: &Tensor,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bicubic2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_bicubic2d_vec(
&self,
output_size: impl IntListOption,
align_corners: bool,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bicubic2d_vec(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_bilinear2d(
&self,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bilinear2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_bilinear2d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bilinear2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_bilinear2d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bilinear2d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_bilinear2d_out(
&self,
out: &Tensor,
output_size: impl IntList,
align_corners: bool,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bilinear2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_bilinear2d_vec(
&self,
output_size: impl IntListOption,
align_corners: bool,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bilinear2d_vec(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_linear1d(
&self,
output_size: impl IntList,
align_corners: bool,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_linear1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_linear1d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_linear1d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
if align_corners { 1 } else { 0 },
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_linear1d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_linear1d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
if align_corners { 1 } else { 0 },
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_linear1d_out(
&self,
out: &Tensor,
output_size: impl IntList,
align_corners: bool,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_linear1d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_linear1d_vec(
&self,
output_size: impl IntListOption,
align_corners: bool,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_linear1d_vec(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest1d(
&self,
output_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest1d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest1d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest1d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest1d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest1d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest1d_out(
&self,
out: &Tensor,
output_size: impl IntList,
scales: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales = scales.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest1d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scales.unwrap_or(std::f64::NAN),
scales.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest1d_vec(
&self,
output_size: impl IntListOption,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest1d_vec(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest2d(
&self,
output_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest2d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest2d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest2d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest2d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest2d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest2d_out(
&self,
out: &Tensor,
output_size: impl IntList,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest2d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest2d_vec(
&self,
output_size: impl IntListOption,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest2d_vec(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest3d(
&self,
output_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest3d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest3d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest3d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest3d_out(
&self,
out: &Tensor,
output_size: impl IntList,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_nearest3d_vec(
&self,
output_size: impl IntListOption,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest3d_vec(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_trilinear3d(
&self,
output_size: impl IntList,
align_corners: bool,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_trilinear3d(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_trilinear3d_backward(
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_trilinear3d_backward(
c_tensors.as_mut_ptr(),
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_trilinear3d_backward_grad_input(
grad_input: &Tensor,
grad_output: &Tensor,
output_size: impl IntList,
input_size: impl IntList,
align_corners: bool,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_trilinear3d_backward_grad_input(
c_tensors.as_mut_ptr(),
grad_input.c_tensor,
grad_output.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
input_size.as_ptr(),
input_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_trilinear3d_out(
&self,
out: &Tensor,
output_size: impl IntList,
align_corners: bool,
scales_d: impl Into<Option<f64>>,
scales_h: impl Into<Option<f64>>,
scales_w: impl Into<Option<f64>>,
) -> Result<Tensor, TchError> {
let scales_d = scales_d.into();
let scales_h = scales_h.into();
let scales_w = scales_w.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_trilinear3d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scales_d.unwrap_or(std::f64::NAN),
scales_d.is_none() as i8,
scales_h.unwrap_or(std::f64::NAN),
scales_h.is_none() as i8,
scales_w.unwrap_or(std::f64::NAN),
scales_w.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_upsample_trilinear3d_vec(
&self,
output_size: impl IntListOption,
align_corners: bool,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_trilinear3d_vec(
c_tensors.as_mut_ptr(),
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_value_selecting_reduction_backward(
grad: &Tensor,
dim: i64,
indices: &Tensor,
sizes: impl IntList,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_value_selecting_reduction_backward(
c_tensors.as_mut_ptr(),
grad.c_tensor,
dim,
indices.c_tensor,
sizes.as_ptr(),
sizes.len_i32(),
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_values(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_values(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_values_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_values_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_values_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_values_copy_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_vander(
x: &Tensor,
n: impl Into<Option<i64>>,
increasing: bool,
) -> Result<Tensor, TchError> {
let n = n.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_vander(
c_tensors.as_mut_ptr(),
x.c_tensor,
n.unwrap_or(0i64),
n.is_none() as i8,
if increasing { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_var(&self, unbiased: bool) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_var(
c_tensors.as_mut_ptr(),
self.c_tensor,
if unbiased { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_var_correction<S: Into<Scalar>>(
&self,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_var_correction(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
correction.into().c_scalar,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_var_correction_out<S: Into<Scalar>>(
&self,
out: &Tensor,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_var_correction_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
correction.into().c_scalar,
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_var_dim(
&self,
dim: impl IntListOption,
unbiased: bool,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_var_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_var_mean(&self, unbiased: bool) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_var_mean(
c_tensors.as_mut_ptr(),
self.c_tensor,
if unbiased { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_var_mean_correction<S: Into<Scalar>>(
&self,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_var_mean_correction(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
correction.into().c_scalar,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_var_mean_correction_out<S: Into<Scalar>>(
&self,
out0: &Tensor,
out1: &Tensor,
dim: impl IntListOption,
correction: S,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_var_mean_correction_out(
c_tensors.as_mut_ptr(),
out0.c_tensor,
out1.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
correction.into().c_scalar,
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_var_mean_dim(
&self,
dim: impl IntListOption,
unbiased: bool,
keepdim: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_var_mean_dim(
c_tensors.as_mut_ptr(),
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}
pub fn f_var_out(
&self,
out: &Tensor,
dim: impl IntListOption,
unbiased: bool,
keepdim: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_var_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dim.as_ptr(),
dim.len_i32(),
if unbiased { 1 } else { 0 },
if keepdim { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_vdot(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_vdot(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_vdot_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_vdot_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_view_(&self, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_view_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_as(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_view_as_complex(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_as_complex(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_view_as_complex_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_as_complex_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_view_as_complex_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_as_complex_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_view_as_real(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_as_real(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_view_as_real_copy(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_as_real_copy(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_view_as_real_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_as_real_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_view_copy(&self, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_copy(
c_tensors.as_mut_ptr(),
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_view_copy_dtype(&self, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_copy_dtype(
c_tensors.as_mut_ptr(),
self.c_tensor,
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_view_copy_dtype_out(&self, out: &Tensor, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_copy_dtype_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
dtype.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_view_copy_out(&self, out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_copy_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_view_dtype(&self, dtype: Kind) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_view_dtype(c_tensors.as_mut_ptr(), self.c_tensor, dtype.c_int()));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_vsplit(&self, sections: i64) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_vsplit(self.c_tensor, sections));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_vsplit_array(&self, indices: impl IntList) -> Result<Vec<Tensor>, TchError> {
let c_tensors =
unsafe_torch_err!(atg_vsplit_array(self.c_tensor, indices.as_ptr(), indices.len_i32()));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_vstack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_vstack(
c_tensors.as_mut_ptr(),
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_vstack_out<T: Borrow<Tensor>>(
out: &Tensor,
tensors: &[T],
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_vstack_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
ptr_list(tensors).as_ptr(),
tensors.len() as i32
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_where_(condition: &Tensor) -> Result<Vec<Tensor>, TchError> {
let c_tensors = unsafe_torch_err!(atg_where(condition.c_tensor));
let mut r__ = vec![];
let mut i = 0;
loop {
let c__ = unsafe { *c_tensors.add(i) };
if c__.is_null() {
break;
}
r__.push(Tensor { c_tensor: c__ });
i += 1;
}
unsafe { libc::free(c_tensors as *mut libc::c_void) }
Ok(r__)
}
pub fn f_where_scalar<S: Into<Scalar>>(
condition: &Tensor,
self_scalar: S,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_where_scalar(
c_tensors.as_mut_ptr(),
condition.c_tensor,
self_scalar.into().c_scalar,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_where_scalarother<S: Into<Scalar>>(
&self,
condition: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_where_scalarother(
c_tensors.as_mut_ptr(),
condition.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_where_scalarself<S: Into<Scalar>>(
condition: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_where_scalarself(
c_tensors.as_mut_ptr(),
condition.c_tensor,
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_where_self(&self, condition: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_where_self(
c_tensors.as_mut_ptr(),
condition.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_where_self_out(
&self,
out: &Tensor,
condition: &Tensor,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_where_self_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
condition.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_xlogy(&self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_xlogy_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_xlogy_outscalar_other<S: Into<Scalar>>(
&self,
out: &Tensor,
other: S,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy_outscalar_other(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_xlogy_outscalar_self<S: Into<Scalar>>(
out: &Tensor,
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy_outscalar_self(
c_tensors.as_mut_ptr(),
out.c_tensor,
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_xlogy_outtensor(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy_outtensor(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_xlogy_scalar_other<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy_scalar_other(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_xlogy_scalar_other_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy_scalar_other_(
c_tensors.as_mut_ptr(),
self.c_tensor,
other.into().c_scalar
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_xlogy_scalar_self<S: Into<Scalar>>(
self_scalar: S,
other: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_xlogy_scalar_self(
c_tensors.as_mut_ptr(),
self_scalar.into().c_scalar,
other.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_zero(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_zero(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_zero_(&mut self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_zero_(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_zero_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_zero_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_zeros(size: impl IntList, options: (Kind, Device)) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_zeros(
c_tensors.as_mut_ptr(),
size.as_ptr(),
size.len_i32(),
options.0.c_int(),
options.1.c_int()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_zeros_like(&self) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_zeros_like(c_tensors.as_mut_ptr(), self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_zeros_like_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_zeros_like_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
Ok(Tensor { c_tensor: c_tensors[0] })
}
pub fn f_zeros_out(out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_zeros_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
size.as_ptr(),
size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
}