text
stringlengths
0
601k
let read_state fin = try let s = input_line fin in let i = String . index s ' , ' in let p1 = float_of_string ( String . sub s 0 i ) in let p2 = float_of_string ( String . sub s ( i + 1 ) ( String . length s - i - 1 ) ) in Some ( p1 , p2 ) with _ -> None
let draw_wall xpos = glPushMatrix ( ) ; glTranslate xpos 0 . 0 0 . 0 ; glScale wall_width wall_size wall_size ; glutSolidCube 1 . 0 ; glPopMatrix ( )
let draw_floor ( ) = glPushMatrix ( ) ; glTranslate ( distance . / 2 . 0 ) ( . - wall_size . / 2 . 0 ) 0 . 0 ; glScale ( w2 . - w1 . + wall_width ) wall_width wall_size ; glutSolidCube 1 . 0 ; glPopMatrix ( )
let draw_spring x1 x2 = let step_size = ( x2 . - x1 ) . / spring_ncoils in let length_degrees = spring_ncoils . * 360 . 0 in glPushMatrix ( ) ; glTranslate x1 0 . 0 0 . 0 ; glRotate 90 . 0 0 . 0 1 . 0 0 . 0 ; glScale 1 . 0 1 . 0 1 . 0 ; gleHelicoid spring_thickness spring_radius 0 . 0 0 . 0 step_size None None 0 . 0 length_degrees ; glPopMatrix ( )
let draw_ball x = glPushMatrix ( ) ; glTranslate x 0 . 0 0 . 0 ; glScale 1 . 0 1 . 0 1 . 0 ; glutSolidSphere ball_radius 24 24 ; glPopMatrix ( )
let display p1 p2 ( ) = glClear [ GL_COLOR_BUFFER_BIT ; GL_DEPTH_BUFFER_BIT ] ; glLoadIdentity ( ) ; glTranslate 0 . 0 0 . 0 ( . - 2 . 5 ) ; glRotate ( ! angley ) 1 . 0 0 . 0 0 . 0 ; glRotate ( ! anglex ) 0 . 0 1 . 0 0 . 0 ; glRotate ( ! anglez ) 0 . 0 0 . 0 1 . 0 ; glPushMatrix ( ) ; glTranslate left_offset 0 . 0 0 . 0 ; glLight ( GL_LIGHT 0 ) ( Light . GL_POSITION lightOnePosition ) ; glScale 0 . 5 0 . 5 0 . 5 ; glColor3 0 . 0 0 . 5 0 . 0 ; glMaterial GL_FRONT ( Material . GL_AMBIENT ( 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 ) ) ; draw_wall w1 ; draw_wall w2 ; draw_floor ( ) ; glMaterial GL_FRONT_AND_BACK ( Material . GL_SHININESS 1 . 0 ) ; glColor3 0 . 8 0 . 8 0 . 8 ; draw_spring w1 ( ! p1 . - ball_radius . * 1 . 5 ) ; draw_spring w2 ( ! p2 . + ball_radius . * 1 . 5 ) ; glMaterial GL_FRONT ( Material . GL_SPECULAR ( 0 . 0 , 0 . 0 , 0 . 0 , 0 . 0 ) ) ; glColor3 1 . 0 0 . 0 0 . 0 ; draw_ball ( ! p1 . - ball_radius ) ; glColor3 0 . 0 0 . 0 1 . 0 ; draw_ball ( ! p2 . + ball_radius ) ; glPopMatrix ( ) ; glutSwapBuffers ( )
let make_model_functions p1i p2i fin = let p1 = ref p1i in let p2 = ref p2i in let rec update ~ value ( ) : = match read_state fin with | None -> schedule_update ( ) | Some ( p1 ' , p2 ' ) -> begin p1 := p1 ' ; p2 := p2 ' ; schedule_update ( ) ; glutPostRedisplay ( ) end and schedule_update ( ) = glutTimerFunc ~ msecs : update_period ~ timer : update ~ value ( ) : in ( display p1 p2 , update , schedule_update )
let reshape ~ width : w ~ height : h = glViewport 0 0 w h ; glMatrixMode GL_PROJECTION ; glLoadIdentity ( ) ; let aspect = ( ( float w ) . / ( float ( max 1 h ) ) ) in gluPerspective ~ fovy : 60 . 0 ~ aspect ~ zNear : 0 . 5 ~ zFar : 100 . 0 ; glMatrixMode GL_MODELVIEW ; glutPostRedisplay ( )
let keyboard ~ key ~ x ~ y = match key with | ' q ' | ' \ 027 ' -> exit 0 | _ -> ( )
let special ~ key ~ x ~ y = match key with | GLUT_KEY_LEFT -> ( anglex := ! anglex . - 5 . 00 ; glutPostRedisplay ( ) ) | GLUT_KEY_RIGHT -> ( anglex := ! anglex . + 5 . 00 ; glutPostRedisplay ( ) ) | GLUT_KEY_UP -> ( angley := ! angley . - 5 . 00 ; glutPostRedisplay ( ) ) | GLUT_KEY_DOWN -> ( angley := ! angley . + 5 . 00 ; glutPostRedisplay ( ) ) | _ -> ( )
let drag_last = ref ( None : ( int * int ) option )
let mouse ~ button ~ state ~ x ~ y = match button , state with | GLUT_LEFT_BUTTON , GLUT_DOWN -> drag_last := Some ( x , y ) | GLUT_LEFT_BUTTON , GLUT_UP -> drag_last := None | _ -> ( )
let motion ~ x ~ y = match ! drag_last with | None -> ( ) | Some ( xo , yo ) -> anglex := ! anglex . + ( float ( xo - x ) . * drag_scale ) ; angley := ! angley . + ( float ( yo - y ) . * drag_scale ) ; drag_last := Some ( x , y ) ; glutPostRedisplay ( )
let gl_init ( ) = glShadeModel GL_SMOOTH ; glClearColor 0 . 0 0 . 0 0 . 3 0 . 0 ; glClearDepth 1 . 0 ; glEnable GL_DEPTH_TEST ; gleSetJoinStyle [ TUBE_NORM_EDGE ; TUBE_JN_ANGLE ; TUBE_JN_CAP ] ; glLight ( GL_LIGHT 0 ) ( Light . GL_POSITION lightOnePosition ) ; glLight ( GL_LIGHT 0 ) ( Light . GL_DIFFUSE lightDiffuse ) ; glLight ( GL_LIGHT 0 ) ( Light . GL_AMBIENT lightAmbient ) ; glEnable GL_LIGHT0 ; glEnable GL_LIGHTING ; glColorMaterial GL_FRONT_AND_BACK GL_AMBIENT_AND_DIFFUSE ; glEnable GL_COLOR_MATERIAL
let run_glut_loop p1i p2i fin = Random . self_init ( ) ; ignore ( glutInit Sys . argv ) ; glutInitDisplayMode [ GLUT_RGB ; GLUT_DOUBLE ; GLUT_DEPTH ] ; ignore ( glutCreateWindow " Sticky Springs Visualisation " ) ; glutReshapeWindow ~ width : 800 ~ height : 600 ; glutSetCursor GLUT_CURSOR_CROSSHAIR ; gl_init ( ) ; let ( display , timer , start_timer ) = make_model_functions p1i p2i fin in glutDisplayFunc ~ display ; glutKeyboardFunc ~ keyboard ; glutSpecialFunc ~ special ; glutReshapeFunc ~ reshape ; glutMouseFunc ~ mouse ; glutMotionFunc ~ motion ; start_timer ( ) ; glutMainLoop ( )
let standalone ( ) = Unix . set_nonblock Unix . stdin ; run_glut_loop 0 . 0 distance stdin
let create p1i p2i = let input , output = Unix . pipe ( ) in let outch = Unix . out_channel_of_descr output in match Unix . fork ( ) with | 0 -> ( Unix . close input ; outch ) | _ -> begin Unix . close output ; Unix . set_nonblock input ; run_glut_loop p1i p2i ( Unix . in_channel_of_descr input ) ; outch end
let update fout p1 p2 = Printf . fprintf fout " % e , % e \ n " p1 p2 ; flush fout
let makeCheckImage ( ) = let checkImage = Bigarray . Array3 . create Bigarray . int8_unsigned Bigarray . c_layout checkImageHeight checkImageWidth 4 in for i = 0 to pred checkImageHeight do for j = 0 to pred checkImageWidth do let ( = ) a b = if a = b then 1 else 0 in let c = ( ( ( i land 0x8 ) = 0 ) lxor ( ( j land 0x8 ) = 0 ) ) * 255 in checkImage . { i , j , 0 } <- c ; checkImage . { i , j , 1 } <- c ; checkImage . { i , j , 2 } <- c ; checkImage . { i , j , 3 } <- 255 ; done ; done ; ( Bigarray . genarray_of_array3 checkImage ) ; ;
let init ( ) = glClearColor 0 . 0 0 . 0 0 . 0 0 . 0 ; glShadeModel GL_FLAT ; glEnable GL_DEPTH_TEST ; let checkImage = makeCheckImage ( ) in glPixelStorei GL_UNPACK_ALIGNMENT 1 ; let texName = glGenTexture ( ) in glBindTexture BindTex . GL_TEXTURE_2D texName ; glTexParameter TexParam . GL_TEXTURE_2D ( TexParam . GL_TEXTURE_WRAP_S GL_REPEAT ) ; glTexParameter TexParam . GL_TEXTURE_2D ( TexParam . GL_TEXTURE_WRAP_T GL_REPEAT ) ; glTexParameter TexParam . GL_TEXTURE_2D ( TexParam . GL_TEXTURE_MAG_FILTER Mag . GL_NEAREST ) ; glTexParameter TexParam . GL_TEXTURE_2D ( TexParam . GL_TEXTURE_MIN_FILTER Min . GL_NEAREST ) ; glTexImage2D TexTarget . GL_TEXTURE_2D 0 InternalFormat . GL_RGBA checkImageWidth checkImageHeight GL_RGBA GL_UNSIGNED_BYTE checkImage ; ( texName ) ; ;
let display texName ( ) = glClear [ GL_COLOR_BUFFER_BIT ; GL_DEPTH_BUFFER_BIT ] ; glEnable GL_TEXTURE_2D ; glTexEnv TexEnv . GL_TEXTURE_ENV TexEnv . GL_TEXTURE_ENV_MODE TexEnv . GL_DECAL ; glBindTexture BindTex . GL_TEXTURE_2D texName ; glBegin GL_QUADS ; glTexCoord2 0 . 0 0 . 0 ; glVertex3 ( - 2 . 0 ) ( - 1 . 0 ) ( 0 . 0 ) ; glTexCoord2 0 . 0 3 . 0 ; glVertex3 ( - 2 . 0 ) ( 1 . 0 ) ( 0 . 0 ) ; glTexCoord2 3 . 0 3 . 0 ; glVertex3 ( 0 . 0 ) ( 1 . 0 ) ( 0 . 0 ) ; glTexCoord2 3 . 0 0 . 0 ; glVertex3 ( 0 . 0 ) ( - 1 . 0 ) ( 0 . 0 ) ; glTexCoord2 0 . 0 0 . 0 ; glVertex3 ( 1 . 0 ) ( - 1 . 0 ) ( 0 . 0 ) ; glTexCoord2 0 . 0 3 . 0 ; glVertex3 ( 1 . 0 ) ( 1 . 0 ) ( 0 . 0 ) ; glTexCoord2 3 . 0 3 . 0 ; glVertex3 ( 2 . 41421 ) ( 1 . 0 ) ( - 1 . 41421 ) ; glTexCoord2 3 . 0 0 . 0 ; glVertex3 ( 2 . 41421 ) ( - 1 . 0 ) ( - 1 . 41421 ) ; glEnd ( ) ; glFlush ( ) ; glDisable GL_TEXTURE_2D ; ; ;
let reshape ~ width : w ~ height : h = glViewport 0 0 w h ; glMatrixMode GL_PROJECTION ; glLoadIdentity ( ) ; gluPerspective 60 . 0 ( float w . / float h ) 1 . 0 30 . 0 ; glMatrixMode GL_MODELVIEW ; glLoadIdentity ( ) ; glTranslate 0 . 0 0 . 0 ( - 3 . 6 ) ; ; ;
let keyboard ~ key ~ x ~ y = match key with | ' s ' -> glTexParameter TexParam . GL_TEXTURE_2D ( TexParam . GL_TEXTURE_WRAP_S GL_CLAMP ) ; glutPostRedisplay ( ) ; | ' S ' -> glTexParameter TexParam . GL_TEXTURE_2D ( TexParam . GL_TEXTURE_WRAP_S GL_REPEAT ) ; glutPostRedisplay ( ) ; | ' t ' -> glTexParameter TexParam . GL_TEXTURE_2D ( TexParam . GL_TEXTURE_WRAP_T GL_CLAMP ) ; glutPostRedisplay ( ) ; | ' T ' -> glTexParameter TexParam . GL_TEXTURE_2D ( TexParam . GL_TEXTURE_WRAP_T GL_REPEAT ) ; glutPostRedisplay ( ) ; | ' \ 027 ' -> exit ( 0 ) ; | _ -> ( ) ; ;
let ( ) = let _ = glutInit Sys . argv in glutInitDisplayMode [ GLUT_SINGLE ; GLUT_RGB ; GLUT_DEPTH ] ; glutInitWindowSize 250 250 ; glutInitWindowPosition 100 100 ; let _ = glutCreateWindow Sys . argv . ( 0 ) in let texName = init ( ) in glutDisplayFunc ~ display ( : display texName ) ; glutReshapeFunc ~ reshape ; glutKeyboardFunc ~ keyboard ; glutMainLoop ( ) ; ; ;
type t = { width : int ; initial_indent : string ; subsequent_indent : string ; expand_tabs : bool ; replace_whitespace : bool ; fix_sentence_endings : bool ; break_long_words : bool ; break_on_hyphens : bool ; drop_whitespace : bool }
let wordsep_simple_re = Str . regexp " [ \ r \ n \ t ] " +
let munge_whitespace { expand_tabs ; replace_whitespace ; _ } text = let text = if expand_tabs then Str . global_replace ( Str . regexp " \ t " ) ( String . make 8 ' ' ) text else text in if replace_whitespace then Str . global_replace whitespace_re " " text else text let res = Str . full_split wordsep_simple_re s in List . map ( function | Str . Delim _ -> " " | Str . Text t -> t ) res let chunks = Array . of_list chunks in let patsearch s = try Str . search_forward sentence_end_re s 0 >= 0 with Not_found -> false in let rec inner i chunks = if i >= Array . length chunks - 1 then chunks else if chunks . ( i + 1 ) = " " && patsearch chunks . ( i ) then ( chunks . ( i + 1 ) <- " " ; inner ( i + 2 ) chunks ) else inner ( i + 1 ) chunks in Array . to_list ( inner 0 chunks ) let space_left = if width < 1 then 1 else width - cur_len in match chunks with | ( chunk :: chunks ) when w . break_long_words -> ( String . sub chunk 0 space_left :: cur_line , String . sub chunk space_left ( String . length chunk - space_left ) :: chunks ) | ( chunk :: chunks ) when cur_line = [ ] -> ( chunk :: cur_line , chunks ) | chunks -> ( cur_line , chunks )
let wrap_chunks w = let is_whitespace = function | " " -> true | s -> Str . string_match wordsep_simple_re s 0 in let pre_drop_whitespace lines = function | ( chunk :: chunks ) when w . drop_whitespace && lines <> [ ] && is_whitespace chunk -> chunks | chunks -> chunks in let post_drop_whitespace = function | ( chunk :: line ) when w . drop_whitespace && is_whitespace chunk -> line | line -> line in let rec current line len width = function | chunk :: chunks when String . length chunk + len <= width -> current ( chunk :: line ) ( len + String . length chunk ) width chunks | chunks -> ( line , len , chunks ) in let rec inner lines = function | [ ] -> List . rev lines | chunks -> let indent = match lines with | [ ] -> w . initial_indent | _ -> w . subsequent_indent in let width = w . width - String . length indent in let chunks = pre_drop_whitespace lines chunks in let ( cur_line , cur_len , chunks ) = current [ ] 0 width chunks in let ( cur_line , chunks ) = if chunks <> [ ] && String . length ( List . hd chunks ) > width then handle_long_word w chunks cur_line cur_len width else ( cur_line , chunks ) in match post_drop_whitespace cur_line with | [ ] -> inner lines chunks | cur_line -> inner ( ( indent ^ String . concat " " ( List . rev cur_line ) ) :: lines ) chunks in inner [ ]
let make ( ? initial_indent " " ) = ( ? subsequent_indent " " ) = ( ? expand_tabs = true ) ( ? replace_whitespace = true ) ( ? fix_sentence_endings = false ) ( ? break_long_words = true ) ( ? break_on_hyphens = true ) ( ? drop_whitespace = true ) width = if width <= 0 then raise ( Invalid_argument " width <= 0 " ) ; { width ; initial_indent ; subsequent_indent ; expand_tabs ; replace_whitespace ; fix_sentence_endings ; break_long_words ; break_on_hyphens ; drop_whitespace }
let wrap w text = let chunks = split w ( munge_whitespace w text ) in let chunks = if w . fix_sentence_endings then fix_sentence_endings w chunks else chunks in wrap_chunks w chunks
let fill w text = String . concat " \ n " ( wrap w text )
module C = Torch_bindings . C ( Torch_generated )
let to_tensor_list ptr = let rec loop ptr acc = let tensor = !@ ptr in if is_null tensor then acc else ( Gc . finalise C . Tensor . free tensor ; loop ( ptr +@ 1 ) ( tensor :: acc ) ) in let result = loop ptr [ ] in C . free ( to_voidp ptr ) ; List . rev result
let __and__ self other = let out__ = CArray . make t 1 in stubs___and__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __and__tensor_ self other = let out__ = CArray . make t 1 in stubs___and__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __iand__ self other = let out__ = CArray . make t 1 in stubs___iand__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __iand__tensor_ self other = let out__ = CArray . make t 1 in stubs___iand__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __ilshift__ self other = let out__ = CArray . make t 1 in stubs___ilshift__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __ilshift__tensor_ self other = let out__ = CArray . make t 1 in stubs___ilshift__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __ior__ self other = let out__ = CArray . make t 1 in stubs___ior__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __ior__tensor_ self other = let out__ = CArray . make t 1 in stubs___ior__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __irshift__ self other = let out__ = CArray . make t 1 in stubs___irshift__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __irshift__tensor_ self other = let out__ = CArray . make t 1 in stubs___irshift__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __ixor__ self other = let out__ = CArray . make t 1 in stubs___ixor__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __ixor__tensor_ self other = let out__ = CArray . make t 1 in stubs___ixor__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __lshift__ self other = let out__ = CArray . make t 1 in stubs___lshift__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __lshift__tensor_ self other = let out__ = CArray . make t 1 in stubs___lshift__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __or__ self other = let out__ = CArray . make t 1 in stubs___or__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __or__tensor_ self other = let out__ = CArray . make t 1 in stubs___or__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __rshift__ self other = let out__ = CArray . make t 1 in stubs___rshift__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __rshift__tensor_ self other = let out__ = CArray . make t 1 in stubs___rshift__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __xor__ self other = let out__ = CArray . make t 1 in stubs___xor__ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let __xor__tensor_ self other = let out__ = CArray . make t 1 in stubs___xor__tensor_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _adaptive_avg_pool2d self ~ output_size = let out__ = CArray . make t 1 in stubs__adaptive_avg_pool2d ( CArray . start out__ ) self ( List . map Int64 . of_int output_size |> CArray . of_list int64_t |> CArray . start ) ( List . length output_size ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _adaptive_avg_pool2d_backward ~ grad_output self = let out__ = CArray . make t 1 in stubs__adaptive_avg_pool2d_backward ( CArray . start out__ ) grad_output self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _adaptive_avg_pool3d self ~ output_size = let out__ = CArray . make t 1 in stubs__adaptive_avg_pool3d ( CArray . start out__ ) self ( List . map Int64 . of_int output_size |> CArray . of_list int64_t |> CArray . start ) ( List . length output_size ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _adaptive_avg_pool3d_backward ~ grad_output self = let out__ = CArray . make t 1 in stubs__adaptive_avg_pool3d_backward ( CArray . start out__ ) grad_output self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _add_batch_dim self ~ batch_dim ~ level = let out__ = CArray . make t 1 in stubs__add_batch_dim ( CArray . start out__ ) self ( Int64 . of_int batch_dim ) ( Int64 . of_int level ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _add_relu self other = let out__ = CArray . make t 1 in stubs__add_relu ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _add_relu_ self other = let out__ = CArray . make t 1 in stubs__add_relu_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _add_relu_out ~ out self other = let out__ = CArray . make t 1 in stubs__add_relu_out ( CArray . start out__ ) out self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _add_relu_scalar self other = let out__ = CArray . make t 1 in stubs__add_relu_scalar ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _add_relu_scalar_ self other = let out__ = CArray . make t 1 in stubs__add_relu_scalar_ ( CArray . start out__ ) self other ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _aminmax self = let out__ = CArray . make t 2 in stubs__aminmax ( CArray . start out__ ) self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _aminmax_dim self ~ dim ~ keepdim = let out__ = CArray . make t 2 in stubs__aminmax_dim ( CArray . start out__ ) self ( Int64 . of_int dim ) ( if keepdim then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _amp_update_scale_ self ~ growth_tracker ~ found_inf ~ scale_growth_factor ~ scale_backoff_factor ~ growth_interval = let out__ = CArray . make t 1 in stubs__amp_update_scale_ ( CArray . start out__ ) self growth_tracker found_inf scale_growth_factor scale_backoff_factor ( Int64 . of_int growth_interval ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _baddbmm_mkl_ self ~ batch1 ~ batch2 = let out__ = CArray . make t 1 in stubs__baddbmm_mkl_ ( CArray . start out__ ) self batch1 batch2 ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_byte self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_byte ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_char self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_char ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_double self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_double ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_float self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_float ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_half self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_half ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_int self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_int ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_long self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_long ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cast_short self ~ non_blocking = let out__ = CArray . make t 1 in stubs__cast_short ( CArray . start out__ ) self ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cat tensors ~ dim = let out__ = CArray . make t 1 in stubs__cat ( CArray . start out__ ) ( CArray . of_list t tensors |> CArray . start ) ( List . length tensors ) ( Int64 . of_int dim ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cat_out ~ out tensors ~ dim = let out__ = CArray . make t 1 in stubs__cat_out ( CArray . start out__ ) out ( CArray . of_list t tensors |> CArray . start ) ( List . length tensors ) ( Int64 . of_int dim ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cdist_backward ~ grad ~ x1 ~ x2 ~ p ~ cdist = let out__ = CArray . make t 1 in stubs__cdist_backward ( CArray . start out__ ) grad x1 x2 p cdist ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cholesky_solve_helper self ~ a ~ upper = let out__ = CArray . make t 1 in stubs__cholesky_solve_helper ( CArray . start out__ ) self a ( if upper then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _coalesce self = let out__ = CArray . make t 1 in stubs__coalesce ( CArray . start out__ ) self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _coalesced_ self ~ coalesced = let out__ = CArray . make t 1 in stubs__coalesced_ ( CArray . start out__ ) self ( if coalesced then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _compute_linear_combination input ~ coefficients = let out__ = CArray . make t 1 in stubs__compute_linear_combination ( CArray . start out__ ) input coefficients ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _compute_linear_combination_out ~ out input ~ coefficients = let out__ = CArray . make t 1 in stubs__compute_linear_combination_out ( CArray . start out__ ) out input coefficients ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _conj self = let out__ = CArray . make t 1 in stubs__conj ( CArray . start out__ ) self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _conj_physical self = let out__ = CArray . make t 1 in stubs__conj_physical ( CArray . start out__ ) self ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _conv_depthwise2d self ~ weight ~ kernel_size ~ bias ~ stride ~ padding ~ dilation = let out__ = CArray . make t 1 in stubs__conv_depthwise2d ( CArray . start out__ ) self weight ( List . map Int64 . of_int kernel_size |> CArray . of_list int64_t |> CArray . start ) ( List . length kernel_size ) ( match bias with | Some v -> v | None -> null ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) ( List . map Int64 . of_int padding |> CArray . of_list int64_t |> CArray . start ) ( List . length padding ) ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _conv_depthwise2d_backward ~ grad_input ~ grad_weight ~ grad_output self ~ weight ~ kernel_size ~ stride ~ padding ~ dilation = let out__ = CArray . make t 2 in stubs__conv_depthwise2d_backward ( CArray . start out__ ) grad_input grad_weight grad_output self weight ( List . map Int64 . of_int kernel_size |> CArray . of_list int64_t |> CArray . start ) ( List . length kernel_size ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) ( List . map Int64 . of_int padding |> CArray . of_list int64_t |> CArray . start ) ( List . length padding ) ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _conv_depthwise2d_out ~ out self ~ weight ~ kernel_size ~ bias ~ stride ~ padding ~ dilation = let out__ = CArray . make t 1 in stubs__conv_depthwise2d_out ( CArray . start out__ ) out self weight ( List . map Int64 . of_int kernel_size |> CArray . of_list int64_t |> CArray . start ) ( List . length kernel_size ) ( match bias with | Some v -> v | None -> null ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) ( List . map Int64 . of_int padding |> CArray . of_list int64_t |> CArray . start ) ( List . length padding ) ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _convert_indices_from_coo_to_csr self ~ size ~ out_int32 = let out__ = CArray . make t 1 in stubs__convert_indices_from_coo_to_csr ( CArray . start out__ ) self ( Int64 . of_int size ) ( if out_int32 then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _convert_indices_from_coo_to_csr_out ~ out self ~ size ~ out_int32 = let out__ = CArray . make t 1 in stubs__convert_indices_from_coo_to_csr_out ( CArray . start out__ ) out self ( Int64 . of_int size ) ( if out_int32 then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _convolution input ~ weight ~ bias ~ stride ~ padding ~ dilation ~ transposed ~ output_padding ~ groups ~ benchmark ~ deterministic ~ cudnn_enabled ~ allow_tf32 = let out__ = CArray . make t 1 in stubs__convolution ( CArray . start out__ ) input weight ( match bias with | Some v -> v | None -> null ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) ( List . map Int64 . of_int padding |> CArray . of_list int64_t |> CArray . start ) ( List . length padding ) ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ( if transposed then 1 else 0 ) ( List . map Int64 . of_int output_padding |> CArray . of_list int64_t |> CArray . start ) ( List . length output_padding ) ( Int64 . of_int groups ) ( if benchmark then 1 else 0 ) ( if deterministic then 1 else 0 ) ( if cudnn_enabled then 1 else 0 ) ( if allow_tf32 then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _convolution_deprecated input ~ weight ~ bias ~ stride ~ padding ~ dilation ~ transposed ~ output_padding ~ groups ~ benchmark ~ deterministic ~ cudnn_enabled = let out__ = CArray . make t 1 in stubs__convolution_deprecated ( CArray . start out__ ) input weight ( match bias with | Some v -> v | None -> null ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) ( List . map Int64 . of_int padding |> CArray . of_list int64_t |> CArray . start ) ( List . length padding ) ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ( if transposed then 1 else 0 ) ( List . map Int64 . of_int output_padding |> CArray . of_list int64_t |> CArray . start ) ( List . length output_padding ) ( Int64 . of_int groups ) ( if benchmark then 1 else 0 ) ( if deterministic then 1 else 0 ) ( if cudnn_enabled then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _convolution_mode input ~ weight ~ bias ~ stride ~ padding ~ dilation ~ groups = let out__ = CArray . make t 1 in stubs__convolution_mode ( CArray . start out__ ) input weight ( match bias with | Some v -> v | None -> null ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) padding ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ( Int64 . of_int groups ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _convolution_nogroup input ~ weight ~ bias ~ stride ~ padding ~ dilation ~ transposed ~ output_padding = let out__ = CArray . make t 1 in stubs__convolution_nogroup ( CArray . start out__ ) input weight ( match bias with | Some v -> v | None -> null ) ( List . map Int64 . of_int stride |> CArray . of_list int64_t |> CArray . start ) ( List . length stride ) ( List . map Int64 . of_int padding |> CArray . of_list int64_t |> CArray . start ) ( List . length padding ) ( List . map Int64 . of_int dilation |> CArray . of_list int64_t |> CArray . start ) ( List . length dilation ) ( if transposed then 1 else 0 ) ( List . map Int64 . of_int output_padding |> CArray . of_list int64_t |> CArray . start ) ( List . length output_padding ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _copy_from self ~ dst ~ non_blocking = let out__ = CArray . make t 1 in stubs__copy_from ( CArray . start out__ ) self dst ( if non_blocking then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _copy_from_and_resize self ~ dst = let out__ = CArray . make t 1 in stubs__copy_from_and_resize ( CArray . start out__ ) self dst ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _ctc_loss ~ log_probs ~ targets ~ input_lengths ~ target_lengths ~ blank ~ zero_infinity = let out__ = CArray . make t 2 in stubs__ctc_loss ( CArray . start out__ ) log_probs targets ( List . map Int64 . of_int input_lengths |> CArray . of_list int64_t |> CArray . start ) ( List . length input_lengths ) ( List . map Int64 . of_int target_lengths |> CArray . of_list int64_t |> CArray . start ) ( List . length target_lengths ) ( Int64 . of_int blank ) ( if zero_infinity then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _ctc_loss_backward ~ grad ~ log_probs ~ targets ~ input_lengths ~ target_lengths ~ neg_log_likelihood ~ log_alpha ~ blank ~ zero_infinity = let out__ = CArray . make t 1 in stubs__ctc_loss_backward ( CArray . start out__ ) grad log_probs targets ( List . map Int64 . of_int input_lengths |> CArray . of_list int64_t |> CArray . start ) ( List . length input_lengths ) ( List . map Int64 . of_int target_lengths |> CArray . of_list int64_t |> CArray . start ) ( List . length target_lengths ) neg_log_likelihood log_alpha ( Int64 . of_int blank ) ( if zero_infinity then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0
let _cudnn_ctc_loss ~ log_probs ~ targets ~ input_lengths ~ target_lengths ~ blank ~ deterministic ~ zero_infinity = let out__ = CArray . make t 2 in stubs__cudnn_ctc_loss ( CArray . start out__ ) log_probs targets ( List . map Int64 . of_int input_lengths |> CArray . of_list int64_t |> CArray . start ) ( List . length input_lengths ) ( List . map Int64 . of_int target_lengths |> CArray . of_list int64_t |> CArray . start ) ( List . length target_lengths ) ( Int64 . of_int blank ) ( if deterministic then 1 else 0 ) ( if zero_infinity then 1 else 0 ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; let t1 = CArray . get out__ 1 in Gc . finalise C . Tensor . free t1 ; t0 , t1
let _cudnn_init_dropout_state ~ dropout ~ train ~ dropout_seed ~ options = let out__ = CArray . make t 1 in stubs__cudnn_init_dropout_state ( CArray . start out__ ) dropout ( if train then 1 else 0 ) ( Int64 . of_int dropout_seed ) ( Kind . packed_to_int ( fst options ) ) ( Device . to_int ( snd options ) ) ; let t0 = CArray . get out__ 0 in Gc . finalise C . Tensor . free t0 ; t0