text
stringlengths
12
786k
let __ilshift__ self other = let out__ = CArray . make t 1 in stubs___ilshift__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __ilshift__tensor_ self other = let out__ = CArray . make t 1 in stubs___ilshift__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __ior__ self other = let out__ = CArray . make t 1 in stubs___ior__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __ior__tensor_ self other = let out__ = CArray . make t 1 in stubs___ior__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __irshift__ self other = let out__ = CArray . make t 1 in stubs___irshift__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __irshift__tensor_ self other = let out__ = CArray . make t 1 in stubs___irshift__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __ixor__ self other = let out__ = CArray . make t 1 in stubs___ixor__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __ixor__tensor_ self other = let out__ = CArray . make t 1 in stubs___ixor__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __lshift__ self other = let out__ = CArray . make t 1 in stubs___lshift__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __lshift__tensor_ self other = let out__ = CArray . make t 1 in stubs___lshift__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __or__ self other = let out__ = CArray . make t 1 in stubs___or__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __or__tensor_ self other = let out__ = CArray . make t 1 in stubs___or__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __rshift__ self other = let out__ = CArray . make t 1 in stubs___rshift__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __rshift__tensor_ self other = let out__ = CArray . make t 1 in stubs___rshift__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __xor__ self other = let out__ = CArray . make t 1 in stubs___xor__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __xor__tensor_ self other = let out__ = CArray . make t 1 in stubs___xor__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _adaptive_avg_pool2d self ~ output_size = let out__ = CArray . make t 1 in stubs__adaptive_avg_pool2d ( CArray . start out__ ) self ( List . map Int64 . of_int output_size |> CArray . of_list int64_t |> CArray . start ) ( List . length output_size ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _adaptive_avg_pool2d_backward ~ grad_output self = let out__ = CArray . make t 1 in stubs__adaptive_avg_pool2d_backward ( CArray . start out__ ) grad_output self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _adaptive_avg_pool3d self ~ output_size = let out__ = CArray . make t 1 in stubs__adaptive_avg_pool3d ( CArray . start out__ ) self ( List . map Int64 . of_int output_size |> CArray . of_list int64_t |> CArray . start ) ( List . length output_size ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _adaptive_avg_pool3d_backward ~ grad_output self = let out__ = CArray . make t 1 in stubs__adaptive_avg_pool3d_backward ( CArray . start out__ ) grad_output self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _add_batch_dim self ~ batch_dim ~ level = let out__ = CArray . make t 1 in stubs__add_batch_dim ( CArray . start out__ ) self ( Int64 . of_int batch_dim ) ( Int64 . of_int level ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _add_relu self other = let out__ = CArray . make t 1 in stubs__add_relu ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _add_relu_ self other = let out__ = CArray . make t 1 in stubs__add_relu_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _add_relu_out ~ out self other = let out__ = CArray . make t 1 in stubs__add_relu_out ( CArray . start out__ ) out self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _add_relu_scalar self other = let out__ = CArray . make t 1 in stubs__add_relu_scalar ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _add_relu_scalar_ self other = let out__ = CArray . make t 1 in stubs__add_relu_scalar_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _aminmax self = let out__ = CArray . make t 2 in stubs__aminmax ( CArray . start out__ ) self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _aminmax_dim self ~ dim ~ keepdim = let out__ = CArray . make t 2 in stubs__aminmax_dim ( CArray . start out__ ) self ( Int64 . of_int dim ) ( if keepdim then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _amp_update_scale_ self ~ growth_tracker ~ found_inf ~ scale_growth_factor ~ scale_backoff_factor ~ growth_interval = let out__ = CArray . make t 1 in stubs__amp_update_scale_ ( CArray . start out__ ) self growth_tracker found_inf scale_growth_factor scale_backoff_factor ( Int64 . of_int growth_interval ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _baddbmm_mkl_ self ~ batch1 ~ batch2 = let out__ = CArray . make t 1 in stubs__baddbmm_mkl_ ( CArray . start out__ ) self batch1 batch2 ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_byte self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_byte ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_char self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_char ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_double self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_double ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_float self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_float ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_half self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_half ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_int self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_int ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_long self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_long ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_short self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_short ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cat tensors ~ dim = let out__ = CArray . make t 1 in stubs__cat ( CArray . start out__ ) ( CArray . of_list t tensors |> CArray . start ) ( List . length tensors ) ( Int64 . of_int dim ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cat_out ~ out tensors ~ dim = let out__ = CArray . make t 1 in stubs__cat_out ( CArray . start out__ ) out ( CArray . of_list t tensors |> CArray . start ) ( List . length tensors ) ( Int64 . of_int dim ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cdist_backward ~ grad ~ x1 ~ x2 ~ p ~ cdist = let out__ = CArray . make t 1 in stubs__cdist_backward ( CArray . start out__ ) grad x1 x2 p cdist ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cholesky_solve_helper self ~ a ~ upper = let out__ = CArray . make t 1 in stubs__cholesky_solve_helper ( CArray . start out__ ) self a ( if upper then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _coalesce self = let out__ = CArray . make t 1 in stubs__coalesce ( CArray . start out__ ) self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _coalesced_ self ~ coalesced = let out__ = CArray . make t 1 in stubs__coalesced_ ( CArray . start out__ ) self ( if coalesced then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _compute_linear_combination input ~ coefficients = let out__ = CArray . make t 1 in stubs__compute_linear_combination ( CArray . start out__ ) input coefficients ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _compute_linear_combination_out ~ out input ~ coefficients = let out__ = CArray . make t 1 in stubs__compute_linear_combination_out ( CArray . start out__ ) out input coefficients ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _conj self = let out__ = CArray . make t 1 in stubs__conj ( CArray . start out__ ) self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _conj_physical self = let out__ = CArray . make t 1 in stubs__conj_physical ( CArray . start out__ ) self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _conv_depthwise2d self ~ weight ~ kernel_size ~ bias ~ stride ~ padding ~ dilation = let out__ = CArray . make t 1 in stubs__conv_depthwise2d ( CArray . start out__ ) self weight ( List . map Int64 . of_int kernel_size |> CArray . of_list int64_t |> CArray . start ) ( List . length kernel_size ) ( match bias with | Some v -> v | None -> null ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) ( List . map Int64 . of_int padding |> CArray . of_list int64_t |> CArray . start ) ( List . length padding ) ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _conv_depthwise2d_backward ~ grad_input ~ grad_weight ~ grad_output self ~ weight ~ kernel_size ~ stride ~ padding ~ dilation = let out__ = CArray . make t 2 in stubs__conv_depthwise2d_backward ( CArray . start out__ ) grad_input grad_weight grad_output self weight ( List . map Int64 . of_int kernel_size |> CArray . of_list int64_t |> CArray . start ) ( List . length kernel_size ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) ( List . map Int64 . of_int padding |> CArray . of_list int64_t |> CArray . start ) ( List . length padding ) ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _conv_depthwise2d_out ~ out self ~ weight ~ kernel_size ~ bias ~ stride ~ padding ~ dilation = let out__ = CArray . make t 1 in stubs__conv_depthwise2d_out ( CArray . start out__ ) out self weight ( List . map Int64 . of_int kernel_size |> CArray . of_list int64_t |> CArray . start ) ( List . length kernel_size ) ( match bias with | Some v -> v | None -> null ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) ( List . map Int64 . of_int padding |> CArray . of_list int64_t |> CArray . start ) ( List . length padding ) ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _convert_indices_from_coo_to_csr self ~ size ~ out_int32 = let out__ = CArray . make t 1 in stubs__convert_indices_from_coo_to_csr ( CArray . start out__ ) self ( Int64 . of_int size ) ( if out_int32 then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _convert_indices_from_coo_to_csr_out ~ out self ~ size ~ out_int32 = let out__ = CArray . make t 1 in stubs__convert_indices_from_coo_to_csr_out ( CArray . start out__ ) out self ( Int64 . of_int size ) ( if out_int32 then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _convolution input ~ weight ~ bias ~ stride ~ padding ~ dilation ~ transposed ~ output_padding ~ groups ~ benchmark ~ deterministic ~ cudnn_enabled ~ allow_tf32 = let out__ = CArray . make t 1 in stubs__convolution ( CArray . start out__ ) input weight ( match bias with | Some v -> v | None -> null ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) ( List . map Int64 . of_int padding |> CArray . of_list int64_t |> CArray . start ) ( List . length padding ) ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ( if transposed then 1 else 0 ) ( List . map Int64 . of_int output_padding |> CArray . of_list int64_t |> CArray . start ) ( List . length output_padding ) ( Int64 . of_int groups ) ( if benchmark then 1 else 0 ) ( if deterministic then 1 else 0 ) ( if cudnn_enabled then 1 else 0 ) ( if allow_tf32 then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _convolution_deprecated input ~ weight ~ bias ~ stride ~ padding ~ dilation ~ transposed ~ output_padding ~ groups ~ benchmark ~ deterministic ~ cudnn_enabled = let out__ = CArray . make t 1 in stubs__convolution_deprecated ( CArray . start out__ ) input weight ( match bias with | Some v -> v | None -> null ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) ( List . map Int64 . of_int padding |> CArray . of_list int64_t |> CArray . start ) ( List . length padding ) ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ( if transposed then 1 else 0 ) ( List . map Int64 . of_int output_padding |> CArray . of_list int64_t |> CArray . start ) ( List . length output_padding ) ( Int64 . of_int groups ) ( if benchmark then 1 else 0 ) ( if deterministic then 1 else 0 ) ( if cudnn_enabled then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _convolution_mode input ~ weight ~ bias ~ stride ~ padding ~ dilation ~ groups = let out__ = CArray . make t 1 in stubs__convolution_mode ( CArray . start out__ ) input weight ( match bias with | Some v -> v | None -> null ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) padding ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ( Int64 . of_int groups ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _convolution_nogroup input ~ weight ~ bias ~ stride ~ padding ~ dilation ~ transposed ~ output_padding = let out__ = CArray . make t 1 in stubs__convolution_nogroup ( CArray . start out__ ) input weight ( match bias with | Some v -> v | None -> null ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) ( List . map Int64 . of_int padding |> CArray . of_list int64_t |> CArray . start ) ( List . length padding ) ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ( if transposed then 1 else 0 ) ( List . map Int64 . of_int output_padding |> CArray . of_list int64_t |> CArray . start ) ( List . length output_padding ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _copy_from self ~ dst ~ non_blocking = let out__ = CArray . make t 1 in stubs__copy_from ( CArray . start out__ ) self dst ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _copy_from_and_resize self ~ dst = let out__ = CArray . make t 1 in stubs__copy_from_and_resize ( CArray . start out__ ) self dst ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _ctc_loss ~ log_probs ~ targets ~ input_lengths ~ target_lengths ~ blank ~ zero_infinity = let out__ = CArray . make t 2 in stubs__ctc_loss ( CArray . start out__ ) log_probs targets ( List . map Int64 . of_int input_lengths |> CArray . of_list int64_t |> CArray . start ) ( List . length input_lengths ) ( List . map Int64 . of_int target_lengths |> CArray . of_list int64_t |> CArray . start ) ( List . length target_lengths ) ( Int64 . of_int blank ) ( if zero_infinity then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _ctc_loss_backward ~ grad ~ log_probs ~ targets ~ input_lengths ~ target_lengths ~ neg_log_likelihood ~ log_alpha ~ blank ~ zero_infinity = let out__ = CArray . make t 1 in stubs__ctc_loss_backward ( CArray . start out__ ) grad log_probs targets ( List . map Int64 . of_int input_lengths |> CArray . of_list int64_t |> CArray . start ) ( List . length input_lengths ) ( List . map Int64 . of_int target_lengths |> CArray . of_list int64_t |> CArray . start ) ( List . length target_lengths ) neg_log_likelihood log_alpha ( Int64 . of_int blank ) ( if zero_infinity then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cudnn_ctc_loss ~ log_probs ~ targets ~ input_lengths ~ target_lengths ~ blank ~ deterministic ~ zero_infinity = let out__ = CArray . make t 2 in stubs__cudnn_ctc_loss ( CArray . start out__ ) log_probs targets ( List . map Int64 . of_int input_lengths |> CArray . of_list int64_t |> CArray . start ) ( List . length input_lengths ) ( List . map Int64 . of_int target_lengths |> CArray . of_list int64_t |> CArray . start ) ( List . length target_lengths ) ( Int64 . of_int blank ) ( if deterministic then 1 else 0 ) ( if zero_infinity then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _cudnn_init_dropout_state ~ dropout ~ train ~ dropout_seed ~ options = let out__ = CArray . make t 1 in stubs__cudnn_init_dropout_state ( CArray . start out__ ) dropout ( if train then 1 else 0 ) ( Int64 . of_int dropout_seed ) ( Kind . packed_to_int ( fst options ) ) ( Device . to_int ( snd options ) ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cudnn_rnn input ~ weight ~ weight_stride0 ~ weight_buf ~ hx ~ cx ~ mode ~ hidden_size ~ proj_size ~ num_layers ~ batch_first ~ dropout ~ train ~ bidirectional ~ batch_sizes ~ dropout_state = let out__ = CArray . make t 5 in stubs__cudnn_rnn ( CArray . start out__ ) input ( CArray . of_list t weight |> CArray . start ) ( List . length weight ) ( Int64 . of_int weight_stride0 ) ( match weight_buf with | Some v -> v | None -> null ) hx ( match cx with | Some v -> v | None -> null ) ( Int64 . of_int mode ) ( Int64 . of_int hidden_size ) ( Int64 . of_int proj_size ) ( Int64 . of_int num_layers ) ( if batch_first then 1 else 0 ) dropout ( if train then 1 else 0 ) ( if bidirectional then 1 else 0 ) ( List . map Int64 . of_int batch_sizes |> CArray . of_list int64_t |> CArray . start ) ( List . length batch_sizes ) ( match dropout_state with | Some v -> v | None -> null ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; let t2 = CArray . get out__ 2 in Gc . finalise C . Tensor . free t2 ; let t3 = CArray . get out__ 3 in Gc . finalise C . Tensor . free t3 ; let t4 = CArray . get out__ 4 in Gc . finalise C . Tensor . free t4 ; t0 , t1 , t2 , t3 , t4
let _cudnn_rnn_flatten_weight ~ weight_arr ~ weight_stride0 ~ input_size ~ mode ~ hidden_size ~ proj_size ~ num_layers ~ batch_first ~ bidirectional = let out__ = CArray . make t 1 in stubs__cudnn_rnn_flatten_weight ( CArray . start out__ ) ( CArray . of_list t weight_arr |> CArray . start ) ( List . length weight_arr ) ( Int64 . of_int weight_stride0 ) ( Int64 . of_int input_size ) ( Int64 . of_int mode ) ( Int64 . of_int hidden_size ) ( Int64 . of_int proj_size ) ( Int64 . of_int num_layers ) ( if batch_first then 1 else 0 ) ( if bidirectional then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _det_lu_based_helper self = let out__ = CArray . make t 3 in stubs__det_lu_based_helper ( CArray . start out__ ) self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; let t2 = CArray . get out__ 2 in Gc . finalise C . Tensor . free t2 ; t0 , t1 , t2
let _det_lu_based_helper_backward_helper ~ det_grad ~ det self ~ lu ~ pivs = let out__ = CArray . make t 1 in stubs__det_lu_based_helper_backward_helper ( CArray . start out__ ) det_grad det self lu pivs ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _dim_arange ~ like ~ dim = let out__ = CArray . make t 1 in stubs__dim_arange ( CArray . start out__ ) like ( Int64 . of_int dim ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _dirichlet_grad ~ x ~ alpha ~ total = let out__ = CArray . make t 1 in stubs__dirichlet_grad ( CArray . start out__ ) x alpha total ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _embedding_bag ~ weight ~ indices ~ offsets ~ scale_grad_by_freq ~ mode ~ sparse ~ per_sample_weights ~ include_last_offset ~ padding_idx = let out__ = CArray . make t 4 in stubs__embedding_bag ( CArray . start out__ ) weight indices offsets ( if scale_grad_by_freq then 1 else 0 ) ( Int64 . of_int mode ) ( if sparse then 1 else 0 ) ( match per_sample_weights with | Some v -> v | None -> null ) ( if include_last_offset then 1 else 0 ) ( Int64 . of_int padding_idx ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; let t2 = CArray . get out__ 2 in Gc . finalise C . Tensor . free t2 ; let t3 = CArray . get out__ 3 in Gc . finalise C . Tensor . free t3 ; t0 , t1 , t2 , t3
let _embedding_bag_backward ~ grad ~ indices ~ offsets ~ offset2bag ~ bag_size ~ maximum_indices ~ num_weights ~ scale_grad_by_freq ~ mode ~ sparse ~ per_sample_weights ~ padding_idx = let out__ = CArray . make t 1 in stubs__embedding_bag_backward ( CArray . start out__ ) grad indices offsets offset2bag bag_size maximum_indices ( Int64 . of_int num_weights ) ( if scale_grad_by_freq then 1 else 0 ) ( Int64 . of_int mode ) ( if sparse then 1 else 0 ) ( match per_sample_weights with | Some v -> v | None -> null ) ( Int64 . of_int padding_idx ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _embedding_bag_dense_backward ~ grad ~ indices ~ offset2bag ~ bag_size ~ maximum_indices ~ num_weights ~ scale_grad_by_freq ~ mode ~ per_sample_weights ~ padding_idx = let out__ = CArray . make t 1 in stubs__embedding_bag_dense_backward ( CArray . start out__ ) grad indices offset2bag bag_size maximum_indices ( Int64 . of_int num_weights ) ( if scale_grad_by_freq then 1 else 0 ) ( Int64 . of_int mode ) ( match per_sample_weights with | Some v -> v | None -> null ) ( Int64 . of_int padding_idx ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _embedding_bag_forward_only ~ weight ~ indices ~ offsets ~ scale_grad_by_freq ~ mode ~ sparse ~ per_sample_weights ~ include_last_offset ~ padding_idx = let out__ = CArray . make t 4 in stubs__embedding_bag_forward_only ( CArray . start out__ ) weight indices offsets ( if scale_grad_by_freq then 1 else 0 ) ( Int64 . of_int mode ) ( if sparse then 1 else 0 ) ( match per_sample_weights with | Some v -> v | None -> null ) ( if include_last_offset then 1 else 0 ) ( Int64 . of_int padding_idx ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; let t2 = CArray . get out__ 2 in Gc . finalise C . Tensor . free t2 ; let t3 = CArray . get out__ 3 in Gc . finalise C . Tensor . free t3 ; t0 , t1 , t2 , t3
let _embedding_bag_per_sample_weights_backward ~ grad ~ weight ~ indices ~ offsets ~ offset2bag ~ mode ~ padding_idx = let out__ = CArray . make t 1 in stubs__embedding_bag_per_sample_weights_backward ( CArray . start out__ ) grad weight indices offsets offset2bag ( Int64 . of_int mode ) ( Int64 . of_int padding_idx ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _embedding_bag_sparse_backward ~ grad ~ indices ~ offsets ~ offset2bag ~ bag_size ~ num_weights ~ scale_grad_by_freq ~ mode ~ per_sample_weights ~ padding_idx = let out__ = CArray . make t 1 in stubs__embedding_bag_sparse_backward ( CArray . start out__ ) grad indices offsets offset2bag bag_size ( Int64 . of_int num_weights ) ( if scale_grad_by_freq then 1 else 0 ) ( Int64 . of_int mode ) ( match per_sample_weights with | Some v -> v | None -> null ) ( Int64 . of_int padding_idx ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _empty_affine_quantized ~ size ~ options ~ scale ~ zero_point = let out__ = CArray . make t 1 in stubs__empty_affine_quantized ( CArray . start out__ ) ( List . map Int64 . of_int size |> CArray . of_list int64_t |> CArray . start ) ( List . length size ) ( Kind . packed_to_int ( fst options ) ) ( Device . to_int ( snd options ) ) scale ( Int64 . of_int zero_point ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _empty_per_channel_affine_quantized ~ size ~ scales ~ zero_points ~ axis ~ options = let out__ = CArray . make t 1 in stubs__empty_per_channel_affine_quantized ( CArray . start out__ ) ( List . map Int64 . of_int size |> CArray . of_list int64_t |> CArray . start ) ( List . length size ) scales zero_points ( Int64 . of_int axis ) ( Kind . packed_to_int ( fst options ) ) ( Device . to_int ( snd options ) ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _euclidean_dist ~ x1 ~ x2 = let out__ = CArray . make t 1 in stubs__euclidean_dist ( CArray . start out__ ) x1 x2 ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _fake_quantize_learnable_per_channel_affine self ~ scale ~ zero_point ~ axis ~ quant_min ~ quant_max ~ grad_factor = let out__ = CArray . make t 1 in stubs__fake_quantize_learnable_per_channel_affine ( CArray . start out__ ) self scale zero_point ( Int64 . of_int axis ) ( Int64 . of_int quant_min ) ( Int64 . of_int quant_max ) grad_factor ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _fake_quantize_learnable_per_channel_affine_backward ~ grad self ~ scale ~ zero_point ~ axis ~ quant_min ~ quant_max ~ grad_factor = let out__ = CArray . make t 3 in stubs__fake_quantize_learnable_per_channel_affine_backward ( CArray . start out__ ) grad self scale zero_point ( Int64 . of_int axis ) ( Int64 . of_int quant_min ) ( Int64 . of_int quant_max ) grad_factor ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; let t2 = CArray . get out__ 2 in Gc . finalise C . Tensor . free t2 ; t0 , t1 , t2
let _fake_quantize_learnable_per_tensor_affine self ~ scale ~ zero_point ~ quant_min ~ quant_max ~ grad_factor = let out__ = CArray . make t 1 in stubs__fake_quantize_learnable_per_tensor_affine ( CArray . start out__ ) self scale zero_point ( Int64 . of_int quant_min ) ( Int64 . of_int quant_max ) grad_factor ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _fake_quantize_learnable_per_tensor_affine_backward ~ grad self ~ scale ~ zero_point ~ quant_min ~ quant_max ~ grad_factor = let out__ = CArray . make t 3 in stubs__fake_quantize_learnable_per_tensor_affine_backward ( CArray . start out__ ) grad self scale zero_point ( Int64 . of_int quant_min ) ( Int64 . of_int quant_max ) grad_factor ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; let t2 = CArray . get out__ 2 in Gc . finalise C . Tensor . free t2 ; t0 , t1 , t2
let _fake_quantize_per_tensor_affine_cachemask_tensor_qparams self ~ scale ~ zero_point ~ fake_quant_enabled ~ quant_min ~ quant_max = let out__ = CArray . make t 2 in stubs__fake_quantize_per_tensor_affine_cachemask_tensor_qparams ( CArray . start out__ ) self scale zero_point fake_quant_enabled ( Int64 . of_int quant_min ) ( Int64 . of_int quant_max ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _fft_c2c self ~ dim ~ normalization ~ forward = let out__ = CArray . make t 1 in stubs__fft_c2c ( CArray . start out__ ) self ( List . map Int64 . of_int dim |> CArray . of_list int64_t |> CArray . start ) ( List . length dim ) ( Int64 . of_int normalization ) ( if forward then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _fft_c2c_out ~ out self ~ dim ~ normalization ~ forward = let out__ = CArray . make t 1 in stubs__fft_c2c_out ( CArray . start out__ ) out self ( List . map Int64 . of_int dim |> CArray . of_list int64_t |> CArray . start ) ( List . length dim ) ( Int64 . of_int normalization ) ( if forward then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _fft_c2r self ~ dim ~ normalization ~ last_dim_size = let out__ = CArray . make t 1 in stubs__fft_c2r ( CArray . start out__ ) self ( List . map Int64 . of_int dim |> CArray . of_list int64_t |> CArray . start ) ( List . length dim ) ( Int64 . of_int normalization ) ( Int64 . of_int last_dim_size ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _fft_c2r_out ~ out self ~ dim ~ normalization ~ last_dim_size = let out__ = CArray . make t 1 in stubs__fft_c2r_out ( CArray . start out__ ) out self ( List . map Int64 . of_int dim |> CArray . of_list int64_t |> CArray . start ) ( List . length dim ) ( Int64 . of_int normalization ) ( Int64 . of_int last_dim_size ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _fft_r2c self ~ dim ~ normalization ~ onesided = let out__ = CArray . make t 1 in stubs__fft_r2c ( CArray . start out__ ) self ( List . map Int64 . of_int dim |> CArray . of_list int64_t |> CArray . start ) ( List . length dim ) ( Int64 . of_int normalization ) ( if onesided then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _fft_r2c_out ~ out self ~ dim ~ normalization ~ onesided = let out__ = CArray . make t 1 in stubs__fft_r2c_out ( CArray . start out__ ) out self ( List . map Int64 . of_int dim |> CArray . of_list int64_t |> CArray . start ) ( List . length dim ) ( Int64 . of_int normalization ) ( if onesided then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _fused_dropout self ~ p = let out__ = CArray . make t 2 in stubs__fused_dropout ( CArray . start out__ ) self p ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _fused_moving_avg_obs_fq_helper self ~ observer_on ~ fake_quant_on ~ running_min ~ running_max ~ scale ~ zero_point ~ averaging_const ~ quant_min ~ quant_max ~ ch_axis ~ per_row_fake_quant ~ symmetric_quant = let out__ = CArray . make t 2 in stubs__fused_moving_avg_obs_fq_helper ( CArray . start out__ ) self observer_on fake_quant_on running_min running_max scale zero_point averaging_const ( Int64 . of_int quant_min ) ( Int64 . of_int quant_max ) ( Int64 . of_int ch_axis ) ( if per_row_fake_quant then 1 else 0 ) ( if symmetric_quant then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _fw_primal self ~ level = let out__ = CArray . make t 1 in stubs__fw_primal ( CArray . start out__ ) self ( Int64 . of_int level ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _gather_sparse_backward self ~ dim ~ index ~ grad = let out__ = CArray . make t 1 in stubs__gather_sparse_backward ( CArray . start out__ ) self ( Int64 . of_int dim ) index grad ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _grid_sampler_2d_cpu_fallback input ~ grid ~ interpolation_mode ~ padding_mode ~ align_corners = let out__ = CArray . make t 1 in stubs__grid_sampler_2d_cpu_fallback ( CArray . start out__ ) input grid ( Int64 . of_int interpolation_mode ) ( Int64 . of_int padding_mode ) ( if align_corners then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _grid_sampler_2d_cpu_fallback_backward ~ grad_output input ~ grid ~ interpolation_mode ~ padding_mode ~ align_corners = let out__ = CArray . make t 2 in stubs__grid_sampler_2d_cpu_fallback_backward ( CArray . start out__ ) grad_output input grid ( Int64 . of_int interpolation_mode ) ( Int64 . of_int padding_mode ) ( if align_corners then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _index_copy_ self ~ dim ~ index ~ source = let out__ = CArray . make t 1 in stubs__index_copy_ ( CArray . start out__ ) self ( Int64 . of_int dim ) index source ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _index_put_impl_ self ~ indices ~ values ~ accumulate ~ unsafe = let out__ = CArray . make t 1 in stubs__index_put_impl_ ( CArray . start out__ ) self ( List . map ( function | Some x -> x | None -> null ) indices |> CArray . of_list t |> CArray . start ) ( List . length indices ) values ( if accumulate then 1 else 0 ) ( if unsafe then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _indices self = let out__ = CArray . make t 1 in stubs__indices ( CArray . start out__ ) self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _inverse_helper self = let out__ = CArray . make t 1 in stubs__inverse_helper ( CArray . start out__ ) self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _linalg_inv_out_helper_ self ~ infos_lu ~ infos_getri = let out__ = CArray . make t 1 in stubs__linalg_inv_out_helper_ ( CArray . start out__ ) self infos_lu infos_getri ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0