diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
new file mode 100644 <nl> index 000000000000 . . 23c2cc5d898d <nl> mmm / dev / null <nl> ppp b / validation - test / compiler_crashers / 28339 - swift - typechecker - addimplicitconstructors . swift <nl> <nl> + / / This source file is part of the Swift . org open source project <nl> + / / Copyright ( c ) 2014 - 2016 Apple Inc . and the Swift project authors <nl> + / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + / / <nl> + / / See http : / / swift . org / LICENSE . txt for license information <nl> + / / See http : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + <nl> + / / RUN : not - - crash % target - swift - frontend % s - parse <nl> + / / REQUIRES : asserts <nl> + var d { protocol A { { } class a : A { } typealias e : A . E { { } } class A : a <nl>
[ swiftc ] Add 💥 case ( 😢 → 54 , 😀 → 5095 ) triggered in swift : : TypeChecker : : addImplicitConstructors ( … )
apple/swift
611c6f577ca832be1b8efb9bf9c3ddf5dd09ce32
2016-07-02T04:42:06Z
mmm a / cocos / 2d / CCLabel . cpp <nl> ppp b / cocos / 2d / CCLabel . cpp <nl> void Label : : createShadowSpriteForSystemFont ( ) <nl> { <nl> _shadowNode - > setBlendFunc ( _blendFunc ) ; <nl> } <nl> + _shadowNode - > setCameraMask ( getCameraMask ( ) ) ; <nl> _shadowNode - > setAnchorPoint ( Vec2 : : ANCHOR_BOTTOM_LEFT ) ; <nl> _shadowNode - > setPosition ( _shadowOffset . width , _shadowOffset . height ) ; <nl> Node : : addChild ( _shadowNode , 0 , Node : : INVALID_TAG ) ; <nl> void Label : : visit ( Renderer * renderer , const Mat4 & parentTransform , uint32_t pare <nl> _shadowDirty = false ; <nl> } <nl> <nl> - if ( ! isVisitableByVisitingCamera ( ) ) <nl> + if ( ! _textSprite & & ! isVisitableByVisitingCamera ( ) ) <nl> { <nl> return ; <nl> } <nl> void Label : : visit ( Renderer * renderer , const Mat4 & parentTransform , uint32_t pare <nl> / / IMPORTANT : <nl> / / To ease the migration to v3 . 0 , we still support the Mat4 stack , <nl> / / but it is deprecated and your code should not rely on it <nl> - Director * director = Director : : getInstance ( ) ; <nl> - CCASSERT ( nullptr ! = director , " Director is null when seting matrix stack " ) ; <nl> + _director - > pushMatrix ( MATRIX_STACK_TYPE : : MATRIX_STACK_MODELVIEW ) ; <nl> + _director - > loadMatrix ( MATRIX_STACK_TYPE : : MATRIX_STACK_MODELVIEW , _modelViewTransform ) ; <nl> <nl> - director - > pushMatrix ( MATRIX_STACK_TYPE : : MATRIX_STACK_MODELVIEW ) ; <nl> - director - > loadMatrix ( MATRIX_STACK_TYPE : : MATRIX_STACK_MODELVIEW , _modelViewTransform ) ; <nl> - <nl> - <nl> if ( _textSprite ) <nl> { <nl> if ( _shadowNode ) <nl> void Label : : visit ( Renderer * renderer , const Mat4 & parentTransform , uint32_t pare <nl> draw ( renderer , _modelViewTransform , flags ) ; <nl> } <nl> <nl> - director - > popMatrix ( MATRIX_STACK_TYPE : : MATRIX_STACK_MODELVIEW ) ; <nl> + _director - > popMatrix ( MATRIX_STACK_TYPE : : MATRIX_STACK_MODELVIEW ) ; <nl> <nl> / / FIX ME : Why need to set _orderOfArrival to 0 ? ? <nl> / / Please refer to https : / / github . com / cocos2d / cocos2d - x / pull / 6920 <nl>
Merge pull request from WenhaiLin / v3 - label - sf - position
cocos2d/cocos2d-x
d246c33974ad95c7a14275da5ecfb0cbe0b56b6a
2015-04-14T01:59:09Z
mmm a / src / assembler . cc <nl> ppp b / src / assembler . cc <nl> double power_double_int ( double x , int y ) { <nl> <nl> <nl> double power_double_double ( double x , double y ) { <nl> - # if ( defined ( __MINGW64_VERSION_MAJOR ) & & \ <nl> - ( ! defined ( __MINGW64_VERSION_RC ) | | __MINGW64_VERSION_RC < 1 ) ) | | \ <nl> - defined ( V8_OS_AIX ) <nl> - / / MinGW64 and AIX have a custom implementation for pow . This handles certain <nl> - / / special cases that are different . <nl> - if ( ( x = = 0 . 0 | | std : : isinf ( x ) ) & & y ! = 0 . 0 & & std : : isfinite ( y ) ) { <nl> - double f ; <nl> - double result = ( ( x = = 0 . 0 ) ^ ( y > 0 ) ) ? V8_INFINITY : 0 ; <nl> - / * retain sign if odd integer exponent * / <nl> - return ( ( std : : modf ( y , & f ) = = 0 . 0 ) & & ( static_cast < int64_t > ( y ) & 1 ) ) <nl> - ? copysign ( result , x ) <nl> - : result ; <nl> - } <nl> - <nl> - if ( x = = 2 . 0 ) { <nl> - int y_int = static_cast < int > ( y ) ; <nl> - if ( y = = y_int ) { <nl> - return std : : ldexp ( 1 . 0 , y_int ) ; <nl> - } <nl> - } <nl> - # endif <nl> - <nl> / / The checks for special cases can be dropped in ia32 because it has already <nl> / / been done in generated code before bailing out here . <nl> if ( std : : isnan ( y ) | | ( ( x = = 1 | | x = = - 1 ) & & std : : isinf ( y ) ) ) { <nl> return std : : numeric_limits < double > : : quiet_NaN ( ) ; <nl> } <nl> - return std : : pow ( x , y ) ; <nl> + return Pow ( x , y ) ; <nl> } <nl> <nl> <nl> mmm a / src / parsing / parser . cc <nl> ppp b / src / parsing / parser . cc <nl> bool ParserTraits : : ShortcutNumericLiteralBinaryExpression ( <nl> return true ; <nl> } <nl> case Token : : EXP : { <nl> - double value = std : : pow ( x_val , y_val ) ; <nl> + double value = Pow ( x_val , y_val ) ; <nl> int int_value = static_cast < int > ( value ) ; <nl> * x = factory - > NewNumberLiteral ( <nl> int_value = = value & & value ! = - 0 . 0 ? int_value : value , pos , <nl> mmm a / src / utils . h <nl> ppp b / src / utils . h <nl> inline double Floor ( double x ) { <nl> return std : : floor ( x ) ; <nl> } <nl> <nl> + inline double Pow ( double x , double y ) { <nl> + # if ( defined ( __MINGW64_VERSION_MAJOR ) & & \ <nl> + ( ! defined ( __MINGW64_VERSION_RC ) | | __MINGW64_VERSION_RC < 1 ) ) | | \ <nl> + defined ( V8_OS_AIX ) <nl> + / / MinGW64 and AIX have a custom implementation for pow . This handles certain <nl> + / / special cases that are different . <nl> + if ( ( x = = 0 . 0 | | std : : isinf ( x ) ) & & y ! = 0 . 0 & & std : : isfinite ( y ) ) { <nl> + double f ; <nl> + double result = ( ( x = = 0 . 0 ) ^ ( y > 0 ) ) ? V8_INFINITY : 0 ; <nl> + / * retain sign if odd integer exponent * / <nl> + return ( ( std : : modf ( y , & f ) = = 0 . 0 ) & & ( static_cast < int64_t > ( y ) & 1 ) ) <nl> + ? copysign ( result , x ) <nl> + : result ; <nl> + } <nl> + <nl> + if ( x = = 2 . 0 ) { <nl> + int y_int = static_cast < int > ( y ) ; <nl> + if ( y = = y_int ) { <nl> + return std : : ldexp ( 1 . 0 , y_int ) ; <nl> + } <nl> + } <nl> + # endif <nl> + return std : : pow ( x , y ) ; <nl> + } <nl> <nl> / / TODO ( svenpanne ) Clean up the whole power - of - 2 mess . <nl> inline int32_t WhichPowerOf2Abs ( int32_t x ) { <nl>
[ es7 ] Fix " implement exponentiation operator proposal " for AIX .
v8/v8
2e4280f25af492dd5dd4bd57c621c15082b7f28c
2016-04-25T19:35:22Z
mmm a / src / libsampler / sampler . cc <nl> ppp b / src / libsampler / sampler . cc <nl> void * ThreadKey ( pthread_t thread_id ) { <nl> <nl> / / Returns hash value for hash map . <nl> uint32_t ThreadHash ( pthread_t thread_id ) { <nl> - # if V8_OS_MACOSX <nl> + # if V8_OS_BSD <nl> return static_cast < uint32_t > ( reinterpret_cast < intptr_t > ( thread_id ) ) ; <nl> # else <nl> return static_cast < uint32_t > ( thread_id ) ; <nl>
Fix compilation on BSD platforms
v8/v8
bcac03e69bba48218671768e7fea85ca7246d3c0
2016-08-25T12:28:53Z
mmm a / dbms / src / Processors / Executors / ParallelPipelineExecutor . cpp <nl> ppp b / dbms / src / Processors / Executors / ParallelPipelineExecutor . cpp <nl> <nl> # include < Common / EventCounter . h > <nl> - # include < common / ThreadPool . h > <nl> + # include < Common / ThreadPool . h > <nl> # include < Processors / Executors / ParallelPipelineExecutor . h > <nl> # include < Processors / Executors / traverse . h > <nl> <nl> ParallelPipelineExecutor : : Status ParallelPipelineExecutor : : prepare ( ) <nl> for ( auto & element : processors ) <nl> { <nl> if ( element - > prepare ( ) = = Status : : NeedData ) <nl> - throw Exception ( " Pipeline stuck : " + element - > getName ( ) + " processor needs input data but no one is going to generate it " ) ; <nl> + throw Exception ( " Pipeline stuck : " + element - > getName ( ) + " processor needs input data but no one is going to generate it " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> if ( element - > prepare ( ) = = Status : : PortFull ) <nl> - throw Exception ( " Pipeline stuck : " + element - > getName ( ) + " processor has data in output port but no one is going to consume it " ) ; <nl> + throw Exception ( " Pipeline stuck : " + element - > getName ( ) + " processor has data in output port but no one is going to consume it " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> } <nl> <nl> return Status : : Finished ; <nl> ParallelPipelineExecutor : : Status ParallelPipelineExecutor : : prepare ( ) <nl> void ParallelPipelineExecutor : : schedule ( EventCounter & watch ) <nl> { <nl> if ( ! current_processor ) <nl> - throw Exception ( " Bad pipeline " ) ; <nl> + throw Exception ( " Bad pipeline " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> <nl> if ( current_status = = Status : : Async ) <nl> { <nl> mmm a / dbms / src / Processors / Executors / ParallelPipelineExecutor . h <nl> ppp b / dbms / src / Processors / Executors / ParallelPipelineExecutor . h <nl> <nl> # include < mutex > <nl> # include < Processors / IProcessor . h > <nl> <nl> - <nl> - class ThreadPool ; <nl> + template < typename > <nl> + class ThreadPoolImpl ; <nl> + class ThreadFromGlobalPool ; <nl> + using ThreadPool = ThreadPoolImpl < ThreadFromGlobalPool > ; <nl> <nl> namespace DB <nl> { <nl> mmm a / dbms / src / Processors / Executors / SequentialPipelineExecutor . cpp <nl> ppp b / dbms / src / Processors / Executors / SequentialPipelineExecutor . cpp <nl> SequentialPipelineExecutor : : Status SequentialPipelineExecutor : : prepare ( ) <nl> for ( auto & element : processors ) <nl> { <nl> if ( element - > prepare ( ) = = Status : : NeedData ) <nl> - throw Exception ( " Pipeline stuck : " + element - > getName ( ) + " processor needs input data but no one is going to generate it " ) ; <nl> + throw Exception ( " Pipeline stuck : " + element - > getName ( ) + " processor needs input data but no one is going to generate it " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> if ( element - > prepare ( ) = = Status : : PortFull ) <nl> - throw Exception ( " Pipeline stuck : " + element - > getName ( ) + " processor has data in output port but no one is going to consume it " ) ; <nl> + throw Exception ( " Pipeline stuck : " + element - > getName ( ) + " processor has data in output port but no one is going to consume it " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> } <nl> <nl> return Status : : Finished ; <nl> SequentialPipelineExecutor : : Status SequentialPipelineExecutor : : prepare ( ) <nl> void SequentialPipelineExecutor : : work ( ) <nl> { <nl> if ( ! current_processor ) <nl> - throw Exception ( " Bad pipeline " ) ; <nl> + throw Exception ( " Bad pipeline " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> <nl> current_processor - > work ( ) ; <nl> } <nl> void SequentialPipelineExecutor : : work ( ) <nl> void SequentialPipelineExecutor : : schedule ( EventCounter & watch ) <nl> { <nl> if ( ! current_processor ) <nl> - throw Exception ( " Bad pipeline " ) ; <nl> + throw Exception ( " Bad pipeline " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> <nl> current_processor - > schedule ( watch ) ; <nl> } <nl> mmm a / dbms / src / Processors / Formats / IInputFormat . h <nl> ppp b / dbms / src / Processors / Formats / IInputFormat . h <nl> class ReadBuffer ; <nl> class IInputFormat : public ISource <nl> { <nl> private : <nl> + <nl> + / / / Skip GCC warning : ‘ maybe_unused ’ attribute ignored <nl> + # pragma GCC diagnostic push <nl> + # pragma GCC diagnostic ignored " - Wattributes " <nl> + <nl> ReadBuffer & in [ [ maybe_unused ] ] ; <nl> <nl> + # pragma GCC diagnostic pop <nl> + <nl> public : <nl> IInputFormat ( Block header , ReadBuffer & in ) <nl> : ISource ( std : : move ( header ) ) , in ( in ) <nl> mmm a / dbms / src / Processors / Formats / IRowOutputFormat . h <nl> ppp b / dbms / src / Processors / Formats / IRowOutputFormat . h <nl> class IRowOutputFormat : public IOutputFormat <nl> <nl> public : <nl> IRowOutputFormat ( Block header , WriteBuffer & out ) <nl> - : IOutputFormat ( header , out ) , types ( header . getTypes ( ) ) <nl> + : IOutputFormat ( header , out ) , types ( header . getDataTypes ( ) ) <nl> { <nl> } <nl> <nl> mmm a / dbms / src / Processors / IProcessor . h <nl> ppp b / dbms / src / Processors / IProcessor . h <nl> class IProcessor <nl> OutputPorts outputs ; <nl> <nl> public : <nl> - IProcessor ( ) { } <nl> + IProcessor ( ) = default ; <nl> <nl> IProcessor ( InputPorts inputs_ , OutputPorts outputs_ ) <nl> : inputs ( std : : move ( inputs_ ) ) , outputs ( std : : move ( outputs_ ) ) <nl> class IProcessor <nl> * / <nl> virtual void work ( ) <nl> { <nl> - throw Exception ( " Method ' work ' is not implemented for " + getName ( ) + " processor " ) ; <nl> + throw Exception ( " Method ' work ' is not implemented for " + getName ( ) + " processor " , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> } <nl> <nl> / * * You may call this method if ' prepare ' returned Async . <nl> class IProcessor <nl> * / <nl> virtual void schedule ( EventCounter & / * watch * / ) <nl> { <nl> - throw Exception ( " Method ' schedule ' is not implemented for " + getName ( ) + " processor " ) ; <nl> + throw Exception ( " Method ' schedule ' is not implemented for " + getName ( ) + " processor " , ErrorCodes : : NOT_IMPLEMENTED ) ; <nl> } <nl> <nl> - virtual ~ IProcessor ( ) { } <nl> + virtual ~ IProcessor ( ) = default ; <nl> <nl> auto & getInputs ( ) { return inputs ; } <nl> auto & getOutputs ( ) { return outputs ; } <nl> mmm a / dbms / src / Processors / Port . cpp <nl> ppp b / dbms / src / Processors / Port . cpp <nl> namespace DB <nl> void connect ( OutputPort & output , InputPort & input ) <nl> { <nl> if ( input . state | | output . state ) <nl> - throw Exception ( " Port is already connected " ) ; <nl> + throw Exception ( " Port is already connected " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> <nl> input . output_port = & output ; <nl> output . input_port = & input ; <nl> mmm a / dbms / src / Processors / Port . h <nl> ppp b / dbms / src / Processors / Port . h <nl> class Port <nl> void assumeConnected ( ) const <nl> { <nl> if ( ! isConnected ( ) ) <nl> - throw Exception ( " Port is not connected " ) ; <nl> + throw Exception ( " Port is not connected " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> } <nl> <nl> bool hasData ( ) const <nl> class Port <nl> IProcessor & getProcessor ( ) <nl> { <nl> if ( ! processor ) <nl> - throw Exception ( " Port does not belong to Processor " ) ; <nl> + throw Exception ( " Port does not belong to Processor " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> return * processor ; <nl> } <nl> <nl> const IProcessor & getProcessor ( ) const <nl> { <nl> if ( ! processor ) <nl> - throw Exception ( " Port does not belong to Processor " ) ; <nl> + throw Exception ( " Port does not belong to Processor " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> return * processor ; <nl> } <nl> } ; <nl> class InputPort : public Port <nl> Block pull ( ) <nl> { <nl> if ( ! hasData ( ) ) <nl> - throw Exception ( " Port has no data " ) ; <nl> + throw Exception ( " Port has no data " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> <nl> return std : : move ( state - > data ) ; <nl> } <nl> class OutputPort : public Port <nl> void push ( Block block ) <nl> { <nl> if ( hasData ( ) ) <nl> - throw Exception ( " Port already has data " ) ; <nl> + throw Exception ( " Port already has data " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> <nl> state - > data = std : : move ( block ) ; <nl> } <nl> mmm a / dbms / src / Processors / tests / processors_test . cpp <nl> ppp b / dbms / src / Processors / tests / processors_test . cpp <nl> <nl> # include < Processors / printPipeline . h > <nl> <nl> # include < Columns / ColumnsNumber . h > <nl> - # include < common / ThreadPool . h > <nl> + # include < Common / ThreadPool . h > <nl> # include < Common / EventCounter . h > <nl> # include < DataTypes / DataTypesNumber . h > <nl> # include < IO / WriteBufferFromFileDescriptor . h > <nl> class SleepyNumbersSource : public IProcessor <nl> OutputPort & getPort ( ) { return output ; } <nl> <nl> private : <nl> - ThreadPool pool { 1 } ; <nl> + ThreadPool pool { 1 , 1 , 0 } ; <nl> Block current_block ; <nl> std : : atomic_bool active { false } ; <nl> <nl> class PrintSink : public ISink <nl> private : <nl> String prefix ; <nl> WriteBufferFromFileDescriptor out { STDOUT_FILENO } ; <nl> + FormatSettings settings ; <nl> <nl> void consume ( Block block ) override <nl> { <nl> class PrintSink : public ISink <nl> { <nl> if ( column_num ! = 0 ) <nl> writeChar ( ' \ t ' , out ) ; <nl> - getPort ( ) . getHeader ( ) . getByPosition ( column_num ) . type - > serializeText ( * block . getByPosition ( column_num ) . column , row_num , out ) ; <nl> + getPort ( ) . getHeader ( ) . getByPosition ( column_num ) . type - > serializeText ( * block . getByPosition ( column_num ) . column , row_num , out , settings ) ; <nl> } <nl> writeChar ( ' \ n ' , out ) ; <nl> } <nl> try <nl> <nl> printPipeline ( { source0 , source1 , source2 , source3 , source4 , limit0 , limit3 , limit4 , limit , queue , concat , fork , print_after_concat , resize , sink } ) ; <nl> <nl> - ThreadPool pool ( 4 , 10 ) ; <nl> + ThreadPool pool ( 4 , 4 , 10 ) ; <nl> ParallelPipelineExecutor executor ( { sink , print_after_concat } , pool ) ; <nl> / / SequentialPipelineExecutor executor ( { sink } ) ; <nl> <nl> try <nl> else if ( status = = IProcessor : : Status : : Wait ) <nl> watch . wait ( ) ; <nl> else <nl> - throw Exception ( " Bad status " ) ; <nl> + throw Exception ( " Bad status " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> } <nl> <nl> return 0 ; <nl>
Fix build .
ClickHouse/ClickHouse
904647f02e7be5fc2024e1ba28f845f352a3ba5a
2019-02-15T16:18:12Z
mmm a / third_party / mlir / BUILD <nl> ppp b / third_party / mlir / BUILD <nl> cc_binary ( <nl> ) <nl> <nl> cc_binary ( <nl> - name = " tools / libcuda - runtime - wrappers . so " , <nl> + name = " tools / mlir - cuda - runner / libcuda - runtime - wrappers . so " , <nl> srcs = [ " tools / mlir - cuda - runner / cuda - runtime - wrappers . cpp " ] , <nl> linkshared = True , <nl> deps = [ <nl> cc_binary ( <nl> name = " mlir - cuda - runner " , <nl> srcs = [ " tools / mlir - cuda - runner / mlir - cuda - runner . cpp " ] , <nl> data = [ <nl> - " : tools / libcuda - runtime - wrappers . so " , <nl> + " : tools / mlir - cuda - runner / libcuda - runtime - wrappers . so " , <nl> " @ local_config_mlir / / test / mlir - cpu - runner : libmlir_runner_utils . so " , <nl> ] , <nl> deps = [ <nl> mmm a / third_party / mlir / test / CMakeLists . txt <nl> ppp b / third_party / mlir / test / CMakeLists . txt <nl> llvm_canonicalize_cmake_booleans ( <nl> <nl> # Passed to lit . site . cfg . py . in to set up the path where to find the libraries <nl> # for linalg integration tests . <nl> - set ( MLIR_DIALECT_LINALG_INTEGRATION_TEST_LIB_DIR $ { CMAKE_LIBRARY_OUTPUT_DIRECTORY } ) <nl> + set ( MLIR_TEST_LIB_DIR $ { CMAKE_LIBRARY_OUTPUT_DIRECTORY } ) <nl> <nl> # Passed to lit . site . cfg . py . in to set up the path where to find the libraries <nl> # for the mlir cuda runner tests . <nl> - set ( MLIR_CUDA_WRAPPER_LIBRARY_DIR $ { CMAKE_LIBRARY_OUTPUT_DIRECTORY } ) <nl> + set ( MLIR_TOOLS_LIB_DIR $ { CMAKE_LIBRARY_OUTPUT_DIRECTORY } ) <nl> <nl> configure_lit_site_cfg ( <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / lit . site . cfg . py . in <nl> mmm a / third_party / mlir / test / lit . cfg . py <nl> ppp b / third_party / mlir / test / lit . cfg . py <nl> <nl> ToolSubst ( ' toy - ch3 ' , unresolved = ' ignore ' ) , <nl> ToolSubst ( ' toy - ch4 ' , unresolved = ' ignore ' ) , <nl> ToolSubst ( ' toy - ch5 ' , unresolved = ' ignore ' ) , <nl> - ToolSubst ( ' % linalg_test_lib_dir ' , config . linalg_test_lib_dir , unresolved = ' ignore ' ) , <nl> - ToolSubst ( ' % cuda_wrapper_library_dir ' , config . cuda_wrapper_library_dir , unresolved = ' ignore ' ) <nl> + ToolSubst ( ' % test_lib_dir ' , config . test_lib_dir , unresolved = ' ignore ' ) , <nl> + ToolSubst ( ' % tools_lib_dir ' , config . tools_lib_dir , unresolved = ' ignore ' ) <nl> ] ) <nl> <nl> llvm_config . add_tool_substitutions ( tools , tool_dirs ) <nl> mmm a / third_party / mlir / test / lit . site . cfg . py . in <nl> ppp b / third_party / mlir / test / lit . site . cfg . py . in <nl> config . host_arch = " @ HOST_ARCH @ " <nl> config . mlir_src_root = " @ MLIR_SOURCE_DIR @ " <nl> config . mlir_obj_root = " @ MLIR_BINARY_DIR @ " <nl> config . mlir_tools_dir = " @ MLIR_TOOLS_DIR @ " <nl> - config . linalg_test_lib_dir = " @ MLIR_DIALECT_LINALG_INTEGRATION_TEST_LIB_DIR @ " <nl> + config . test_lib_dir = " @ MLIR_TEST_LIB_DIR @ " <nl> config . build_examples = @ LLVM_BUILD_EXAMPLES @ <nl> config . run_cuda_tests = @ MLIR_CUDA_CONVERSIONS_ENABLED @ <nl> - config . cuda_wrapper_library_dir = " @ MLIR_CUDA_WRAPPER_LIBRARY_DIR @ " <nl> + config . tools_lib_dir = " @ MLIR_TOOLS_LIB_DIR @ " <nl> config . enable_cuda_runner = @ MLIR_CUDA_RUNNER_ENABLED @ <nl> <nl> # Support substitution of the tools_dir with user parameters . This is <nl>
Changing directory shortcut for CPU / GPU runner utils .
tensorflow/tensorflow
2968b9175b40c18784b603e5100501b8d7a8f0cb
2019-11-25T21:05:43Z
mmm a / tensorflow / contrib / metrics / kernels / BUILD <nl> ppp b / tensorflow / contrib / metrics / kernels / BUILD <nl> package ( default_visibility = [ " / / tensorflow : __subpackages__ " ] ) <nl> cc_library ( <nl> name = " set_kernels " , <nl> srcs = [ " set_kernels . cc " ] , <nl> + copts = [ " - Wno - sign - compare " ] , <nl> deps = [ <nl> " / / tensorflow / core : framework_headers_lib " , <nl> " / / third_party / eigen3 " , <nl> mmm a / tensorflow / contrib / tfprof / tools / tfprof / internal / BUILD <nl> ppp b / tensorflow / contrib / tfprof / tools / tfprof / internal / BUILD <nl> cc_library ( <nl> name = " tfprof_utils " , <nl> srcs = [ " tfprof_utils . cc " ] , <nl> hdrs = [ " tfprof_utils . h " ] , <nl> + copts = [ " - Wno - sign - compare " ] , <nl> deps = [ <nl> " : tfprof_options " , <nl> " / / tensorflow / core : lib " , <nl> cc_library ( <nl> name = " tfprof_tensor " , <nl> srcs = [ " tfprof_tensor . cc " ] , <nl> hdrs = [ " tfprof_tensor . h " ] , <nl> + copts = [ " - Wno - sign - compare " ] , <nl> deps = [ <nl> " / / tensorflow / contrib / tfprof / tools / tfprof : protos_all_cc " , <nl> " / / tensorflow / core : framework " , <nl> mmm a / tensorflow / core / kernels / BUILD <nl> ppp b / tensorflow / core / kernels / BUILD <nl> cc_library ( <nl> name = " ops_util " , <nl> srcs = [ " ops_util . cc " ] , <nl> hdrs = [ " ops_util . h " ] , <nl> + copts = [ " - Wno - sign - compare " ] , <nl> deps = [ <nl> " / / tensorflow / core : framework " , <nl> " / / tensorflow / core : lib " , <nl> cc_library ( <nl> name = " save_restore_tensor " , <nl> srcs = [ " save_restore_tensor . cc " ] , <nl> hdrs = [ " save_restore_tensor . h " ] , <nl> + copts = [ " - Wno - sign - compare " ] , <nl> deps = [ <nl> " : bounds_check " , <nl> " / / tensorflow / core : framework " , <nl> mmm a / tensorflow / core / platform / default / build_config . bzl <nl> ppp b / tensorflow / core / platform / default / build_config . bzl <nl> def tf_proto_library_cc ( name , srcs = [ ] , has_services = None , <nl> srcs = srcs + tf_deps ( deps , " _proto_srcs " ) , <nl> deps = deps + [ " @ protobuf / / : cc_wkt_protos " ] , <nl> cc_libs = cc_libs + [ " @ protobuf / / : protobuf " ] , <nl> + copts = [ " - Wno - unused - but - set - variable " , " - Wno - sign - compare " ] , <nl> protoc = " @ protobuf / / : protoc " , <nl> default_runtime = " @ protobuf / / : protobuf " , <nl> use_grpc_plugin = use_grpc_plugin , <nl> mmm a / tensorflow / core / util / tensor_bundle / BUILD <nl> ppp b / tensorflow / core / util / tensor_bundle / BUILD <nl> cc_library ( <nl> name = " tensor_bundle " , <nl> srcs = [ " tensor_bundle . cc " ] , <nl> hdrs = [ " tensor_bundle . h " ] , <nl> + copts = [ " - Wno - sign - compare " ] , <nl> deps = [ <nl> " : naming " , <nl> " / / tensorflow / core : core_cpu_internal " , <nl> mmm a / tensorflow / python / BUILD <nl> ppp b / tensorflow / python / BUILD <nl> cc_library ( <nl> cc_binary ( <nl> name = " framework / test_file_system . so " , <nl> srcs = [ " framework / test_file_system . cc " ] , <nl> + copts = [ " - Wno - sign - compare " ] , <nl> linkopts = select ( { <nl> " / / conditions : default " : [ <nl> " - lm " , <nl> cc_library ( <nl> name = " cpp_shape_inference " , <nl> srcs = [ " framework / cpp_shape_inference . cc " ] , <nl> hdrs = [ " framework / cpp_shape_inference . h " ] , <nl> + copts = [ " - Wno - sign - compare " ] , <nl> visibility = [ " / / visibility : public " ] , <nl> deps = [ <nl> " : numpy_lib " , <nl> mmm a / tensorflow / tensorflow . bzl <nl> ppp b / tensorflow / tensorflow . bzl <nl> def if_not_mobile ( a ) : <nl> } ) <nl> <nl> def tf_copts ( ) : <nl> - return ( [ " - fno - exceptions " , <nl> - " - DEIGEN_AVOID_STL_ARRAY " , <nl> - " - Iexternal / gemmlowp " , ] + <nl> + return ( [ " - DEIGEN_AVOID_STL_ARRAY " , <nl> + " - Iexternal / gemmlowp " , <nl> + " - Wno - sign - compare " , <nl> + " - fno - exceptions " ] + <nl> if_cuda ( [ " - DGOOGLE_CUDA = 1 " ] ) + <nl> if_android_arm ( [ " - mfpu = neon " ] ) + <nl> select ( { <nl> def tf_copts ( ) : <nl> " - O2 " , <nl> ] , <nl> " / / tensorflow : darwin " : [ ] , <nl> - " / / tensorflow : ios " : [ " - std = c + + 11 " , ] , <nl> + " / / tensorflow : ios " : [ " - std = c + + 11 " ] , <nl> " / / conditions : default " : [ " - pthread " ] } ) ) <nl> <nl> def tf_opts_nortti_if_android ( ) : <nl> def tf_py_wrap_cc ( name , srcs , swig_includes = [ ] , deps = [ ] , copts = [ ] , * * kwargs ) : <nl> native . cc_binary ( <nl> name = cc_library_name , <nl> srcs = [ module_name + " . cc " ] , <nl> - copts = ( copts + [ " - Wno - self - assign " , " - Wno - write - strings " ] <nl> + copts = ( copts + [ " - Wno - self - assign " , <nl> + " - Wno - sign - compare " , <nl> + " - Wno - write - strings " ] <nl> + tf_extension_copts ( ) ) , <nl> linkopts = tf_extension_linkopts ( ) + extra_linkopts , <nl> linkstatic = 1 , <nl> mmm a / zlib . BUILD <nl> ppp b / zlib . BUILD <nl> cc_library ( <nl> " zutil . h " , <nl> ] , <nl> hdrs = [ " zlib . h " ] , <nl> + copts = [ " - Wno - implicit - function - declaration " ] , <nl> includes = [ " . " ] , <nl> ) <nl>
Suppress many compiler warnings
tensorflow/tensorflow
91d8a6f6a774ace46aab8c982b9cb44539a5b74a
2016-10-19T04:22:40Z
mmm a / doc / command_line_tool . md <nl> ppp b / doc / command_line_tool . md <nl> $ cmake - DgRPC_BUILD_TESTS = ON . . / . . <nl> Finally you can build the command line tool with the command : <nl> <nl> ` ` ` <nl> + # run from cmake / build directory <nl> $ make grpc_cli <nl> ` ` ` <nl> <nl> - To speed up compilation time on linux , you can use make with following flag : <nl> - <nl> - ` ` ` <nl> - $ make grpc_cli - j $ ( nproc ) <nl> - ` ` ` <nl> - <nl> The main file can be found at <nl> https : / / github . com / grpc / grpc / blob / master / test / cpp / util / grpc_cli . cc <nl> <nl>
A few more improvements
grpc/grpc
7740ed281d858cc2674ed64b6632867e9ac0ce8a
2020-05-13T13:51:50Z
mmm a / Happy_number . cpp <nl> ppp b / Happy_number . cpp <nl> <nl> and this sum turns out to be 1 * / <nl> # include < iostream > <nl> using namespace std ; <nl> + <nl> int main ( ) <nl> { <nl> int n , k , s = 0 , d ; <nl> int main ( ) <nl> cout < < n < < " is a happy number " < < endl ; <nl> else <nl> cout < < n < < " is not a happy number " < < endl ; <nl> + return 0 ; <nl> } <nl>
Merge pull request from rsenwar / patch - 6
TheAlgorithms/C-Plus-Plus
a378f472ee7e541c5c19c9b4719869ee7523873a
2019-02-09T10:19:47Z
mmm a / Source / Common / Include / Sequences . h <nl> ppp b / Source / Common / Include / Sequences . h <nl> namespace Microsoft { namespace MSR { namespace CNTK { <nl> m_numParallelSequences = numParallelSequences ; <nl> m_numTimeSteps = numTimeSteps ; <nl> / / allocate lookup tables ( note : except at the start , these don ' t really allocate new memory most of the time ) <nl> - # if 1 <nl> + # if 0 <nl> if ( ( m_distanceToStart . GetNumRows ( ) ! = m_numParallelSequences | | m_distanceToStart . GetNumCols ( ) ! = m_numTimeSteps ) & & m_numTimeSteps > 0 ) / / sanity check for debugging a regression <nl> fprintf ( stderr , " MBLayout : : Init : Resizing m_distanceToStart from % d x % d to % d x % d \ n " , <nl> ( int ) m_distanceToStart . GetNumRows ( ) , ( int ) m_distanceToStart . GetNumCols ( ) , ( int ) m_numParallelSequences , ( int ) m_numTimeSteps ) ; / / ( I really want to know about actual allocations , but this is a necessary condition for them ) <nl> mmm a / Source / ComputationNetworkLib / LinearAlgebraNodes . h <nl> ppp b / Source / ComputationNetworkLib / LinearAlgebraNodes . h <nl> namespace Microsoft { namespace MSR { namespace CNTK { <nl> <nl> virtual void / * ComputationNode : : * / ForwardProp ( const FrameRange & fr ) override <nl> { <nl> - # if 1 / / TODO : use # if 0 until this is working <nl> + # if 0 / / TODO : use # if 0 until this is working <nl> auto args = GetTensorsForwardBinary ( fr ) ; <nl> args [ 2 ] . DoSumOf ( 0 . 0f , args [ 0 ] , args [ 1 ] , 1 . 0f ) ; <nl> # else <nl>
disabled the prototypical use of the new tensor addition in the PlusNode again
microsoft/CNTK
4da5273738c9d440b3125681152c328563456e35
2015-12-19T04:02:29Z
mmm a / code / mathematical - algorithms / exponentiation_power / Exponent . java <nl> ppp b / code / mathematical - algorithms / exponentiation_power / Exponent . java <nl> public static int exponentBySquare ( int num , int power ) { <nl> <nl> return temp ; <nl> } <nl> + public static void main ( String [ ] args ) { <nl> + System . out . println ( exponentBySquare ( 2 , 9 ) ) ; <nl> + } <nl> } <nl> \ No newline at end of file <nl>
Added main function
OpenGenus/cosmos
419c4c6bc65ca6456baa5b4ced74c2c77d7dd55a
2017-10-03T15:50:53Z
mmm a / bazel / repositories . bzl <nl> ppp b / bazel / repositories . bzl <nl> def envoy_api_deps ( skip_targets ) : <nl> native . git_repository ( <nl> name = " envoy_api " , <nl> remote = REPO_LOCATIONS [ " envoy_api " ] , <nl> - commit = " 43e63201717bd6498660600d4898f5da7627ad8a " , <nl> + commit = " 86de1f257534b931232629b855a3ddbabda7bdc5 " , <nl> ) <nl> api_bind_targets = [ <nl> " address " , <nl> def envoy_api_deps ( skip_targets ) : <nl> " cds " , <nl> " eds " , <nl> " health_check " , <nl> + " lds " , <nl> " protocol " , <nl> " rds " , <nl> " tls_context " , <nl> mmm a / include / envoy / server / BUILD <nl> ppp b / include / envoy / server / BUILD <nl> envoy_cc_library ( <nl> envoy_cc_library ( <nl> name = " listener_manager_interface " , <nl> hdrs = [ " listener_manager . h " ] , <nl> + external_deps = [ " envoy_lds " ] , <nl> deps = [ <nl> " : drain_manager_interface " , <nl> " : filter_config_interface " , <nl> " : guarddog_interface " , <nl> - " / / include / envoy / json : json_object_interface " , <nl> " / / include / envoy / network : filter_interface " , <nl> " / / include / envoy / network : listen_socket_interface " , <nl> " / / include / envoy / ssl : context_interface " , <nl> + " / / source / common / protobuf " , <nl> ] , <nl> ) <nl> mmm a / include / envoy / server / listener_manager . h <nl> ppp b / include / envoy / server / listener_manager . h <nl> <nl> # pragma once <nl> <nl> - # include " envoy / json / json_object . h " <nl> # include " envoy / network / filter . h " <nl> # include " envoy / network / listen_socket . h " <nl> # include " envoy / server / drain_manager . h " <nl> <nl> # include " envoy / server / guarddog . h " <nl> # include " envoy / ssl / context . h " <nl> <nl> + # include " common / protobuf / protobuf . h " <nl> + <nl> + # include " api / lds . pb . h " <nl> + <nl> namespace Envoy { <nl> namespace Server { <nl> <nl> class ListenerComponentFactory { <nl> <nl> / * * <nl> * Creates a list of filter factories . <nl> - * @ param filters supplies the JSON configuration . <nl> + * @ param filters supplies the proto configuration . <nl> * @ param context supplies the factory creation context . <nl> * @ return std : : vector < Configuration : : NetworkFilterFactoryCb > the list of filter factories . <nl> * / <nl> virtual std : : vector < Configuration : : NetworkFilterFactoryCb > <nl> - createFilterFactoryList ( const std : : vector < Json : : ObjectSharedPtr > & filters , <nl> + createFilterFactoryList ( const Protobuf : : RepeatedPtrField < envoy : : api : : v2 : : Filter > & filters , <nl> Configuration : : FactoryContext & context ) PURE ; <nl> <nl> / * * <nl> class ListenerManager { <nl> * should be updated . The new listener must have the same configured address . The old listener <nl> * will be gracefully drained once the new listener is ready to take traffic ( e . g . when RDS has <nl> * been initialized ) . <nl> - * @ param json supplies the configuration JSON . <nl> + * @ param config supplies the configuration proto . <nl> * @ return TRUE if a listener was added or FALSE if the listener was not updated because it is <nl> * a duplicate of the existing listener . This routine will throw an EnvoyException if <nl> * there is a fundamental error preventing the listener from being added or updated . <nl> * / <nl> - virtual bool addOrUpdateListener ( const Json : : Object & json ) PURE ; <nl> + virtual bool addOrUpdateListener ( const envoy : : api : : v2 : : Listener & config ) PURE ; <nl> <nl> / * * <nl> * @ return std : : vector < std : : reference_wrapper < Listener > > a list of the currently loaded listeners . <nl> mmm a / source / common / config / BUILD <nl> ppp b / source / common / config / BUILD <nl> envoy_cc_library ( <nl> ] , <nl> ) <nl> <nl> + envoy_cc_library ( <nl> + name = " lds_json_lib " , <nl> + srcs = [ " lds_json . cc " ] , <nl> + hdrs = [ " lds_json . h " ] , <nl> + external_deps = [ " envoy_lds " ] , <nl> + deps = [ <nl> + " : json_utility_lib " , <nl> + " : tls_context_json_lib " , <nl> + " / / include / envoy / json : json_object_interface " , <nl> + " / / source / common / common : assert_lib " , <nl> + " / / source / common / json : config_schemas_lib " , <nl> + " / / source / common / network : utility_lib " , <nl> + ] , <nl> + ) <nl> + <nl> envoy_cc_library ( <nl> name = " metadata_lib " , <nl> srcs = [ " metadata . cc " ] , <nl> envoy_cc_library ( <nl> hdrs = [ " tls_context_json . h " ] , <nl> external_deps = [ " envoy_tls_context " ] , <nl> deps = [ <nl> + " : json_utility_lib " , <nl> " / / include / envoy / json : json_object_interface " , <nl> " / / source / common / common : utility_lib " , <nl> ] , <nl> new file mode 100644 <nl> index 00000000000 . . 1981ab92a47 <nl> mmm / dev / null <nl> ppp b / source / common / config / lds_json . cc <nl> <nl> + # include " common / config / lds_json . h " <nl> + <nl> + # include " common / common / assert . h " <nl> + # include " common / config / json_utility . h " <nl> + # include " common / config / tls_context_json . h " <nl> + # include " common / json / config_schemas . h " <nl> + # include " common / network / utility . h " <nl> + <nl> + namespace Envoy { <nl> + namespace Config { <nl> + <nl> + void LdsJson : : translateListener ( const Json : : Object & json_listener , <nl> + envoy : : api : : v2 : : Listener & listener ) { <nl> + json_listener . validateSchema ( Json : : Schema : : LISTENER_SCHEMA ) ; <nl> + <nl> + / / TODO ( htuch ) : Figure out if we really want UnresolvedAddress here . . . <nl> + Network : : Address : : InstanceConstSharedPtr listener_address = <nl> + Network : : Utility : : resolveUrl ( json_listener . getString ( " address " ) ) ; <nl> + auto * named_address = listener . mutable_address ( ) - > mutable_named_address ( ) ; <nl> + named_address - > set_address ( listener_address - > ip ( ) - > addressAsString ( ) ) ; <nl> + named_address - > mutable_port ( ) - > set_value ( listener_address - > ip ( ) - > port ( ) ) ; <nl> + <nl> + auto * filter_chain = listener . mutable_filter_chains ( ) - > Add ( ) ; <nl> + if ( json_listener . hasObject ( " ssl_context " ) ) { <nl> + TlsContextJson : : translateDownstreamTlsContext ( * json_listener . getObject ( " ssl_context " ) , <nl> + * filter_chain - > mutable_tls_context ( ) ) ; <nl> + } <nl> + <nl> + for ( const auto & json_filter : json_listener . getObjectArray ( " filters " , true ) ) { <nl> + auto * filter = filter_chain - > mutable_filters ( ) - > Add ( ) ; <nl> + JSON_UTIL_SET_STRING ( * json_filter , * filter , name ) ; <nl> + JSON_UTIL_SET_STRING ( * json_filter , * filter - > mutable_deprecated_v1 ( ) , type ) ; <nl> + <nl> + const auto status = Protobuf : : util : : JsonStringToMessage ( <nl> + json_filter - > getObject ( " config " ) - > asJsonString ( ) , filter - > mutable_config ( ) ) ; <nl> + / / JSON schema has already validated that this is a valid JSON object . <nl> + ASSERT ( status . ok ( ) ) ; <nl> + UNREFERENCED_PARAMETER ( status ) ; <nl> + } <nl> + <nl> + JSON_UTIL_SET_BOOL ( json_listener , * filter_chain , use_proxy_proto ) ; <nl> + <nl> + JSON_UTIL_SET_BOOL ( json_listener , listener , use_original_dst ) ; <nl> + JSON_UTIL_SET_INTEGER ( json_listener , listener , per_connection_buffer_limit_bytes ) ; <nl> + JSON_UTIL_SET_STRING ( json_listener , listener , name ) ; <nl> + <nl> + JSON_UTIL_SET_BOOL ( json_listener , * listener . mutable_deprecated_v1 ( ) , bind_to_port ) ; <nl> + } <nl> + <nl> + } / / namespace Config <nl> + } / / namespace Envoy <nl> new file mode 100644 <nl> index 00000000000 . . 04716ce7c5f <nl> mmm / dev / null <nl> ppp b / source / common / config / lds_json . h <nl> <nl> + # pragma once <nl> + <nl> + # include " envoy / json / json_object . h " <nl> + <nl> + # include " api / lds . pb . h " <nl> + <nl> + namespace Envoy { <nl> + namespace Config { <nl> + <nl> + class LdsJson { <nl> + public : <nl> + / * * <nl> + * Translate a v1 JSON Listener to v2 envoy : : api : : v2 : : Listener . <nl> + * @ param json_listener source v1 JSON Listener object . <nl> + * @ param listener destination v2 envoy : : api : : v2 : : Listener . <nl> + * / <nl> + static void translateListener ( const Json : : Object & json_listener , <nl> + envoy : : api : : v2 : : Listener & listener ) ; <nl> + } ; <nl> + <nl> + } / / namespace Config <nl> + } / / namespace Envoy <nl> mmm a / source / common / config / tls_context_json . cc <nl> ppp b / source / common / config / tls_context_json . cc <nl> <nl> # include " common / config / tls_context_json . h " <nl> <nl> # include " common / common / utility . h " <nl> + # include " common / config / json_utility . h " <nl> <nl> namespace Envoy { <nl> namespace Config { <nl> <nl> + void TlsContextJson : : translateDownstreamTlsContext ( <nl> + const Json : : Object & json_tls_context , <nl> + envoy : : api : : v2 : : DownstreamTlsContext & downstream_tls_context ) { <nl> + translateCommonTlsContext ( json_tls_context , * downstream_tls_context . mutable_common_tls_context ( ) ) ; <nl> + translateTlsCertificate ( json_tls_context , <nl> + * downstream_tls_context . mutable_tls_certificates ( ) - > Add ( ) ) ; <nl> + JSON_UTIL_SET_BOOL ( json_tls_context , downstream_tls_context , require_client_certificate ) ; <nl> + } <nl> + <nl> void TlsContextJson : : translateUpstreamTlsContext ( <nl> const Json : : Object & json_tls_context , <nl> envoy : : api : : v2 : : UpstreamTlsContext & upstream_tls_context ) { <nl> mmm a / source / common / config / tls_context_json . h <nl> ppp b / source / common / config / tls_context_json . h <nl> namespace Config { <nl> <nl> class TlsContextJson { <nl> public : <nl> + / * * <nl> + * Translate a v1 JSON TLS context to v2 envoy : : api : : v2 : : DownstreamTlsContext . <nl> + * @ param json_tls_context source v1 JSON TLS context object . <nl> + * @ param downstream_tls_context destination v2 envoy : : api : : v2 : : Cluster . <nl> + * / <nl> + static void <nl> + translateDownstreamTlsContext ( const Json : : Object & json_tls_context , <nl> + envoy : : api : : v2 : : DownstreamTlsContext & downstream_tls_context ) ; <nl> + <nl> / * * <nl> * Translate a v1 JSON TLS context to v2 envoy : : api : : v2 : : UpstreamTlsContext . <nl> * @ param json_tls_context source v1 JSON TLS context object . <nl> mmm a / source / common / ssl / context_config_impl . cc <nl> ppp b / source / common / ssl / context_config_impl . cc <nl> ClientContextConfigImpl : : ClientContextConfigImpl ( const Json : : Object & config ) <nl> return upstream_tls_context ; <nl> } ( ) ) { } <nl> <nl> + ServerContextConfigImpl : : ServerContextConfigImpl ( const envoy : : api : : v2 : : DownstreamTlsContext & config ) <nl> + : ContextConfigImpl ( config . common_tls_context ( ) , config . tls_certificates ( ) [ 0 ] ) , <nl> + require_client_certificate_ ( <nl> + PROTOBUF_GET_WRAPPED_OR_DEFAULT ( config , require_client_certificate , false ) ) { <nl> + / / TODO ( htuch ) : Handle multiple certs # 1319 , add constraint for now to ensure we have at least one <nl> + / / cert # 1308 . <nl> + ASSERT ( config . tls_certificates ( ) . size ( ) = = 1 ) ; <nl> + } <nl> + <nl> ServerContextConfigImpl : : ServerContextConfigImpl ( const Json : : Object & config ) <nl> - : ContextConfigImpl ( <nl> - [ & config ] { <nl> - envoy : : api : : v2 : : CommonTlsContext common_tls_context ; <nl> - Config : : TlsContextJson : : translateCommonTlsContext ( config , common_tls_context ) ; <nl> - return common_tls_context ; <nl> - } ( ) , <nl> - [ & config ] { <nl> - envoy : : api : : v2 : : TlsCertificate tls_certificate ; <nl> - Config : : TlsContextJson : : translateTlsCertificate ( config , tls_certificate ) ; <nl> - return tls_certificate ; <nl> - } ( ) ) , <nl> - require_client_certificate_ ( config . getBoolean ( " require_client_certificate " , false ) ) { } <nl> + : ServerContextConfigImpl ( [ & config ] { <nl> + envoy : : api : : v2 : : DownstreamTlsContext downstream_tls_context ; <nl> + Config : : TlsContextJson : : translateDownstreamTlsContext ( config , downstream_tls_context ) ; <nl> + return downstream_tls_context ; <nl> + } ( ) ) { } <nl> <nl> } / / namespace Ssl <nl> } / / namespace Envoy <nl> mmm a / source / common / ssl / context_config_impl . h <nl> ppp b / source / common / ssl / context_config_impl . h <nl> class ClientContextConfigImpl : public ContextConfigImpl , public ClientContextCo <nl> <nl> class ServerContextConfigImpl : public ContextConfigImpl , public ServerContextConfig { <nl> public : <nl> + ServerContextConfigImpl ( const envoy : : api : : v2 : : DownstreamTlsContext & config ) ; <nl> ServerContextConfigImpl ( const Json : : Object & config ) ; <nl> <nl> / / Ssl : : ServerContextConfig <nl> mmm a / source / server / BUILD <nl> ppp b / source / server / BUILD <nl> envoy_cc_library ( <nl> name = " configuration_lib " , <nl> srcs = [ " configuration_impl . cc " ] , <nl> hdrs = [ " configuration_impl . h " ] , <nl> - external_deps = [ " envoy_bootstrap " ] , <nl> + external_deps = [ <nl> + " envoy_bootstrap " , <nl> + " envoy_lds " , <nl> + ] , <nl> deps = [ <nl> " : lds_api_lib " , <nl> " / / include / envoy / http : filter_interface " , <nl> envoy_cc_library ( <nl> " / / source / common / common : assert_lib " , <nl> " / / source / common / common : logger_lib " , <nl> " / / source / common / common : utility_lib " , <nl> + " / / source / common / config : lds_json_lib " , <nl> " / / source / common / json : config_schemas_lib " , <nl> " / / source / common / network : utility_lib " , <nl> " / / source / common / ratelimit : ratelimit_lib " , <nl> envoy_cc_library ( <nl> name = " lds_api_lib " , <nl> srcs = [ " lds_api . cc " ] , <nl> hdrs = [ " lds_api . h " ] , <nl> + external_deps = [ " envoy_lds " ] , <nl> deps = [ <nl> " / / include / envoy / init : init_interface " , <nl> " / / include / envoy / server : listener_manager_interface " , <nl> " / / include / envoy / stats : stats_macros " , <nl> + " / / source / common / config : lds_json_lib " , <nl> " / / source / common / config : utility_lib " , <nl> " / / source / common / http : rest_api_fetcher_lib " , <nl> " / / source / common / json : config_schemas_lib " , <nl> envoy_cc_library ( <nl> name = " listener_manager_lib " , <nl> srcs = [ " listener_manager_impl . cc " ] , <nl> hdrs = [ " listener_manager_impl . h " ] , <nl> + external_deps = [ " envoy_lds " ] , <nl> deps = [ <nl> " : configuration_lib " , <nl> " : drain_manager_lib " , <nl> envoy_cc_library ( <nl> " / / include / envoy / server : filter_config_interface " , <nl> " / / include / envoy / server : listener_manager_interface " , <nl> " / / include / envoy / server : worker_interface " , <nl> - " / / source / common / json : config_schemas_lib " , <nl> - " / / source / common / json : json_validator_lib " , <nl> " / / source / common / network : listen_socket_lib " , <nl> " / / source / common / network : utility_lib " , <nl> + " / / source / common / protobuf : utility_lib " , <nl> " / / source / common / ssl : context_config_lib " , <nl> ] , <nl> ) <nl> mmm a / source / server / config_validation / server . h <nl> ppp b / source / server / config_validation / server . h <nl> class ValidationInstance : Logger : : Loggable < Logger : : Id : : main > , <nl> <nl> / / Server : : ListenerComponentFactory <nl> std : : vector < Configuration : : NetworkFilterFactoryCb > <nl> - createFilterFactoryList ( const std : : vector < Json : : ObjectSharedPtr > & filters , <nl> + createFilterFactoryList ( const Protobuf : : RepeatedPtrField < envoy : : api : : v2 : : Filter > & filters , <nl> Configuration : : FactoryContext & context ) override { <nl> return ProdListenerComponentFactory : : createFilterFactoryList_ ( filters , * this , context ) ; <nl> } <nl> mmm a / source / server / configuration_impl . cc <nl> ppp b / source / server / configuration_impl . cc <nl> <nl> <nl> # include " common / common / assert . h " <nl> # include " common / common / utility . h " <nl> + # include " common / config / lds_json . h " <nl> # include " common / json / config_schemas . h " <nl> # include " common / ratelimit / ratelimit_impl . h " <nl> # include " common / tracing / http_tracer_impl . h " <nl> <nl> + # include " api / lds . pb . h " <nl> # include " spdlog / spdlog . h " <nl> <nl> namespace Envoy { <nl> void MainImpl : : initialize ( const Json : : Object & json , const envoy : : api : : v2 : : Bootst <nl> ENVOY_LOG ( info , " loading { } listener ( s ) " , listeners . size ( ) ) ; <nl> for ( size_t i = 0 ; i < listeners . size ( ) ; i + + ) { <nl> ENVOY_LOG ( info , " listener # { } : " , i ) ; <nl> - server . listenerManager ( ) . addOrUpdateListener ( * listeners [ i ] ) ; <nl> + envoy : : api : : v2 : : Listener listener ; <nl> + Config : : LdsJson : : translateListener ( * listeners [ i ] , listener ) ; <nl> + server . listenerManager ( ) . addOrUpdateListener ( listener ) ; <nl> } <nl> <nl> if ( json . hasObject ( " lds " ) ) { <nl> mmm a / source / server / lds_api . cc <nl> ppp b / source / server / lds_api . cc <nl> <nl> <nl> # include < functional > <nl> <nl> + # include " common / config / lds_json . h " <nl> # include " common / config / utility . h " <nl> # include " common / http / headers . h " <nl> # include " common / json / config_schemas . h " <nl> # include " common / json / json_loader . h " <nl> <nl> + # include " api / lds . pb . h " <nl> + <nl> namespace Envoy { <nl> namespace Server { <nl> <nl> void LdsApi : : parseResponse ( const Http : : Message & response ) { <nl> listeners_to_remove . emplace ( listener . get ( ) . name ( ) , listener ) ; <nl> } <nl> <nl> - for ( const auto & listener : json_listeners ) { <nl> - const std : : string listener_name = listener - > getString ( " name " ) ; <nl> + for ( const auto & json_listener : json_listeners ) { <nl> + const std : : string listener_name = json_listener - > getString ( " name " ) ; <nl> listeners_to_remove . erase ( listener_name ) ; <nl> - if ( listener_manager_ . addOrUpdateListener ( * listener ) ) { <nl> + envoy : : api : : v2 : : Listener listener ; <nl> + Config : : LdsJson : : translateListener ( * json_listener , listener ) ; <nl> + if ( listener_manager_ . addOrUpdateListener ( listener ) ) { <nl> ENVOY_LOG ( info , " lds : add / update listener ' { } ' " , listener_name ) ; <nl> } else { <nl> ENVOY_LOG ( debug , " lds : add / update listener ' { } ' skipped " , listener_name ) ; <nl> mmm a / source / server / listener_manager_impl . cc <nl> ppp b / source / server / listener_manager_impl . cc <nl> <nl> # include " envoy / registry / registry . h " <nl> <nl> # include " common / common / assert . h " <nl> - # include " common / json / config_schemas . h " <nl> # include " common / network / listen_socket_impl . h " <nl> # include " common / network / utility . h " <nl> + # include " common / protobuf / utility . h " <nl> # include " common / ssl / context_config_impl . h " <nl> <nl> # include " server / configuration_impl . h " / / TODO ( mattklein123 ) : Remove post 1 . 4 . 0 <nl> namespace Server { <nl> <nl> std : : vector < Configuration : : NetworkFilterFactoryCb > <nl> ProdListenerComponentFactory : : createFilterFactoryList_ ( <nl> - const std : : vector < Json : : ObjectSharedPtr > & filters , Server : : Instance & server , <nl> + const Protobuf : : RepeatedPtrField < envoy : : api : : v2 : : Filter > & filters , Server : : Instance & server , <nl> Configuration : : FactoryContext & context ) { <nl> std : : vector < Configuration : : NetworkFilterFactoryCb > ret ; <nl> - for ( size_t i = 0 ; i < filters . size ( ) ; i + + ) { <nl> - const std : : string string_type = filters [ i ] - > getString ( " type " ) ; <nl> - const std : : string string_name = filters [ i ] - > getString ( " name " ) ; <nl> - Json : : ObjectSharedPtr config = filters [ i ] - > getObject ( " config " ) ; <nl> + for ( ssize_t i = 0 ; i < filters . size ( ) ; i + + ) { <nl> + const std : : string string_type = filters [ i ] . deprecated_v1 ( ) . type ( ) ; <nl> + const std : : string string_name = filters [ i ] . name ( ) ; <nl> + const auto & proto_config = filters [ i ] . config ( ) ; <nl> ENVOY_LOG ( info , " filter # { } : " , i ) ; <nl> - ENVOY_LOG ( info , " type : { } " , string_type ) ; <nl> ENVOY_LOG ( info , " name : { } " , string_name ) ; <nl> <nl> + Protobuf : : util : : JsonOptions json_options ; <nl> + ProtobufTypes : : String json_config ; <nl> + const auto status = <nl> + Protobuf : : util : : MessageToJsonString ( proto_config , & json_config , json_options ) ; <nl> + / / This should always succeed unless something crash - worthy such as out - of - memory . <nl> + RELEASE_ASSERT ( status . ok ( ) ) ; <nl> + UNREFERENCED_PARAMETER ( status ) ; <nl> + const Json : : ObjectSharedPtr filter_config = Json : : Factory : : loadFromString ( json_config ) ; <nl> + <nl> / / Map filter type string to enum . <nl> Configuration : : NetworkFilterType type ; <nl> if ( string_type = = " read " ) { <nl> ProdListenerComponentFactory : : createFilterFactoryList_ ( <nl> Configuration : : NamedNetworkFilterConfigFactory * factory = <nl> Registry : : FactoryRegistry < Configuration : : NamedNetworkFilterConfigFactory > : : getFactory ( <nl> string_name ) ; <nl> - if ( factory ! = nullptr & & factory - > type ( ) = = type ) { <nl> + if ( factory ! = nullptr ) { <nl> Configuration : : NetworkFilterFactoryCb callback = <nl> - factory - > createFilterFactory ( * config , context ) ; <nl> + factory - > createFilterFactory ( * filter_config , context ) ; <nl> ret . push_back ( callback ) ; <nl> } else { <nl> / / DEPRECATED <nl> ProdListenerComponentFactory : : createFilterFactoryList_ ( <nl> for ( Configuration : : NetworkFilterConfigFactory * config_factory : <nl> Configuration : : MainImpl : : filterConfigFactories ( ) ) { <nl> Configuration : : NetworkFilterFactoryCb callback = <nl> - config_factory - > tryCreateFilterFactory ( type , string_name , * config , server ) ; <nl> + config_factory - > tryCreateFilterFactory ( type , string_name , * filter_config , server ) ; <nl> if ( callback ) { <nl> ret . push_back ( callback ) ; <nl> found_filter = true ; <nl> ProdListenerComponentFactory : : createFilterFactoryList_ ( <nl> } <nl> <nl> if ( ! found_filter ) { <nl> - throw EnvoyException ( <nl> - fmt : : format ( " unable to create filter factory for ' { } ' / ' { } ' " , string_name , string_type ) ) ; <nl> + throw EnvoyException ( fmt : : format ( " unable to create filter factory for ' { } ' " , string_name ) ) ; <nl> } <nl> } <nl> } <nl> DrainManagerPtr ProdListenerComponentFactory : : createDrainManager ( ) { <nl> return DrainManagerPtr { new DrainManagerImpl ( server_ ) } ; <nl> } <nl> <nl> - ListenerImpl : : ListenerImpl ( const Json : : Object & json , ListenerManagerImpl & parent , <nl> + ListenerImpl : : ListenerImpl ( const envoy : : api : : v2 : : Listener & config , ListenerManagerImpl & parent , <nl> const std : : string & name , bool workers_started , uint64_t hash ) <nl> - : Json : : Validator ( json , Json : : Schema : : LISTENER_SCHEMA ) , parent_ ( parent ) , <nl> - address_ ( Network : : Utility : : resolveUrl ( json . getString ( " address " ) ) ) , <nl> + : parent_ ( parent ) , <nl> + / / TODO ( htuch ) : Cleanup this translation , does it need to be UnresolvedAddress ? Validate not <nl> + / / pipe . <nl> + address_ ( <nl> + Network : : Utility : : parseInternetAddress ( config . address ( ) . named_address ( ) . address ( ) , <nl> + config . address ( ) . named_address ( ) . port ( ) . value ( ) ) ) , <nl> global_scope_ ( parent_ . server_ . stats ( ) . createScope ( " " ) ) , <nl> - bind_to_port_ ( json . getBoolean ( " bind_to_port " , true ) ) , <nl> - use_proxy_proto_ ( json . getBoolean ( " use_proxy_proto " , false ) ) , <nl> - use_original_dst_ ( json . getBoolean ( " use_original_dst " , false ) ) , <nl> + bind_to_port_ ( PROTOBUF_GET_WRAPPED_OR_DEFAULT ( config . deprecated_v1 ( ) , bind_to_port , true ) ) , <nl> + use_proxy_proto_ ( <nl> + PROTOBUF_GET_WRAPPED_OR_DEFAULT ( config . filter_chains ( ) [ 0 ] , use_proxy_proto , false ) ) , <nl> + use_original_dst_ ( PROTOBUF_GET_WRAPPED_OR_DEFAULT ( config , use_original_dst , false ) ) , <nl> per_connection_buffer_limit_bytes_ ( <nl> - json . getInteger ( " per_connection_buffer_limit_bytes " , 1024 * 1024 ) ) , <nl> + PROTOBUF_GET_WRAPPED_OR_DEFAULT ( config , per_connection_buffer_limit_bytes , 1024 * 1024 ) ) , <nl> listener_tag_ ( parent_ . factory_ . nextListenerTag ( ) ) , name_ ( name ) , <nl> workers_started_ ( workers_started ) , hash_ ( hash ) , <nl> local_drain_manager_ ( parent . factory_ . createDrainManager ( ) ) { <nl> + / / TODO ( htuch ) : Support multiple filter chains # 1280 , add constraint to ensure we have at least on <nl> + / / filter chain # 1308 . <nl> + ASSERT ( config . filter_chains ( ) . size ( ) = = 1 ) ; <nl> + const auto & filter_chain = config . filter_chains ( ) [ 0 ] ; <nl> <nl> / / ' : ' is a reserved char in statsd . Do the translation here to avoid costly inline translations <nl> / / later . <nl> ListenerImpl : : ListenerImpl ( const Json : : Object & json , ListenerManagerImpl & parent <nl> std : : replace ( final_stat_name . begin ( ) , final_stat_name . end ( ) , ' : ' , ' _ ' ) ; <nl> listener_scope_ = parent_ . server_ . stats ( ) . createScope ( final_stat_name ) ; <nl> <nl> - if ( json . hasObject ( " ssl_context " ) ) { <nl> - Ssl : : ServerContextConfigImpl context_config ( * json . getObject ( " ssl_context " ) ) ; <nl> + if ( filter_chain . has_tls_context ( ) ) { <nl> + Ssl : : ServerContextConfigImpl context_config ( filter_chain . tls_context ( ) ) ; <nl> ssl_context_ = parent_ . server_ . sslContextManager ( ) . createSslServerContext ( * listener_scope_ , <nl> context_config ) ; <nl> } <nl> <nl> - filter_factories_ = <nl> - parent_ . factory_ . createFilterFactoryList ( json . getObjectArray ( " filters " ) , * this ) ; <nl> + filter_factories_ = parent_ . factory_ . createFilterFactoryList ( filter_chain . filters ( ) , * this ) ; <nl> } <nl> <nl> ListenerImpl : : ~ ListenerImpl ( ) { <nl> ListenerManagerStats ListenerManagerImpl : : generateStats ( Stats : : Scope & scope ) { <nl> POOL_GAUGE_PREFIX ( scope , final_prefix ) ) } ; <nl> } <nl> <nl> - bool ListenerManagerImpl : : addOrUpdateListener ( const Json : : Object & json ) { <nl> - const std : : string name = json . getString ( " name " , server_ . random ( ) . uuid ( ) ) ; <nl> - const uint64_t hash = json . hash ( ) ; <nl> + bool ListenerManagerImpl : : addOrUpdateListener ( const envoy : : api : : v2 : : Listener & config ) { <nl> + const std : : string name = config . name ( ) . empty ( ) ? server_ . random ( ) . uuid ( ) : config . name ( ) ; <nl> + const uint64_t hash = MessageUtil : : hash ( config ) ; <nl> ENVOY_LOG ( debug , " begin add / update listener : name = { } hash = { } " , name , hash ) ; <nl> <nl> auto existing_active_listener = getListenerByName ( active_listeners_ , name ) ; <nl> bool ListenerManagerImpl : : addOrUpdateListener ( const Json : : Object & json ) { <nl> return false ; <nl> } <nl> <nl> - ListenerImplPtr new_listener ( new ListenerImpl ( json , * this , name , workers_started_ , hash ) ) ; <nl> + ListenerImplPtr new_listener ( new ListenerImpl ( config , * this , name , workers_started_ , hash ) ) ; <nl> ListenerImpl & new_listener_ref = * new_listener ; <nl> <nl> / / We mandate that a listener with the same name must have the same configured address . This <nl> mmm a / source / server / listener_manager_impl . h <nl> ppp b / source / server / listener_manager_impl . h <nl> <nl> # include " envoy / server / worker . h " <nl> <nl> # include " common / common / logger . h " <nl> - # include " common / json / json_validator . h " <nl> <nl> # include " server / init_manager_impl . h " <nl> <nl> + # include " api / lds . pb . h " <nl> + <nl> namespace Envoy { <nl> namespace Server { <nl> <nl> class ProdListenerComponentFactory : public ListenerComponentFactory , <nl> * Static worker for createFilterFactoryList ( ) that can be used directly in tests . <nl> * / <nl> static std : : vector < Configuration : : NetworkFilterFactoryCb > <nl> - createFilterFactoryList_ ( const std : : vector < Json : : ObjectSharedPtr > & filters , <nl> + createFilterFactoryList_ ( const Protobuf : : RepeatedPtrField < envoy : : api : : v2 : : Filter > & filters , <nl> Server : : Instance & server , Configuration : : FactoryContext & context ) ; <nl> <nl> / / Server : : ListenSocketFactory <nl> std : : vector < Configuration : : NetworkFilterFactoryCb > <nl> - createFilterFactoryList ( const std : : vector < Json : : ObjectSharedPtr > & filters , <nl> + createFilterFactoryList ( const Protobuf : : RepeatedPtrField < envoy : : api : : v2 : : Filter > & filters , <nl> Configuration : : FactoryContext & context ) override { <nl> return createFilterFactoryList_ ( filters , server_ , context ) ; <nl> } <nl> class ListenerManagerImpl : public ListenerManager , Logger : : Loggable < Logger : : Id : <nl> void onListenerWarmed ( ListenerImpl & listener ) ; <nl> <nl> / / Server : : ListenerManager <nl> - bool addOrUpdateListener ( const Json : : Object & json ) override ; <nl> + bool addOrUpdateListener ( const envoy : : api : : v2 : : Listener & config ) override ; <nl> std : : vector < std : : reference_wrapper < Listener > > listeners ( ) override ; <nl> uint64_t numConnections ( ) override ; <nl> bool removeListener ( const std : : string & listener_name ) override ; <nl> class ListenerManagerImpl : public ListenerManager , Logger : : Loggable < Logger : : Id : <nl> / / initializing all listeners after workers are started . <nl> <nl> / * * <nl> - * Maps JSON config to runtime config for a listener with a network filter chain . <nl> + * Maps proto config to runtime config for a listener with a network filter chain . <nl> * / <nl> class ListenerImpl : public Listener , <nl> public Configuration : : FactoryContext , <nl> public Network : : DrainDecision , <nl> public Network : : FilterChainFactory , <nl> - Json : : Validator , <nl> Logger : : Loggable < Logger : : Id : : config > { <nl> public : <nl> / * * <nl> * Create a new listener . <nl> - * @ param json supplies the configuration JSON . <nl> + * @ param config supplies the configuration proto . <nl> * @ param parent supplies the owning manager . <nl> * @ param name supplies the listener name . <nl> * @ param workers_started supplies whether the listener is being added before or after workers <nl> * have been started . This controls various behavior related to init management . <nl> * @ param hash supplies the hash to use for duplicate checking . <nl> * / <nl> - ListenerImpl ( const Json : : Object & json , ListenerManagerImpl & parent , const std : : string & name , <nl> - bool workers_started , uint64_t hash ) ; <nl> + ListenerImpl ( const envoy : : api : : v2 : : Listener & config , ListenerManagerImpl & parent , <nl> + const std : : string & name , bool workers_started , uint64_t hash ) ; <nl> ~ ListenerImpl ( ) ; <nl> <nl> / * * <nl> mmm a / test / config_test / config_test . cc <nl> ppp b / test / config_test / config_test . cc <nl> class ConfigTest { <nl> } ) ) ; <nl> ON_CALL ( server_ , listenerManager ( ) ) . WillByDefault ( ReturnRef ( listener_manager_ ) ) ; <nl> ON_CALL ( component_factory_ , createFilterFactoryList ( _ , _ ) ) <nl> - . WillByDefault ( Invoke ( [ & ] ( const std : : vector < Json : : ObjectSharedPtr > & filters , <nl> + . WillByDefault ( Invoke ( [ & ] ( const Protobuf : : RepeatedPtrField < envoy : : api : : v2 : : Filter > & filters , <nl> Server : : Configuration : : FactoryContext & context ) <nl> - > std : : vector < Server : : Configuration : : NetworkFilterFactoryCb > { <nl> return Server : : ProdListenerComponentFactory : : createFilterFactoryList_ ( filters , server_ , <nl> mmm a / test / integration / BUILD <nl> ppp b / test / integration / BUILD <nl> envoy_cc_test ( <nl> ] , <nl> deps = [ <nl> " : integration_lib " , <nl> + " / / test / server : utility_lib " , <nl> ] , <nl> ) <nl> <nl> mmm a / test / integration / echo_integration_test . cc <nl> ppp b / test / integration / echo_integration_test . cc <nl> <nl> # include " test / integration / integration . h " <nl> # include " test / integration / utility . h " <nl> + # include " test / server / utility . h " <nl> <nl> namespace Envoy { <nl> class EchoIntegrationTest : public BaseIntegrationTest , <nl> TEST_P ( EchoIntegrationTest , Hello ) { <nl> } <nl> <nl> TEST_P ( EchoIntegrationTest , AddRemoveListener ) { <nl> - std : : string json = R " EOF ( <nl> + const std : : string json = TestEnvironment : : substitute ( R " EOF ( <nl> { <nl> " name " : " new_listener " , <nl> " address " : " tcp : / / { { ip_loopback_address } } : 0 " , <nl> TEST_P ( EchoIntegrationTest , AddRemoveListener ) { <nl> { " type " : " read " , " name " : " echo " , " config " : { } } <nl> ] <nl> } <nl> - ) EOF " ; <nl> + ) EOF " , <nl> + GetParam ( ) ) ; <nl> <nl> / / Add the listener . <nl> ConditionalInitializer listener_added_by_worker ; <nl> ConditionalInitializer listener_added_by_manager ; <nl> test_server_ - > setOnWorkerListenerAddedCb ( <nl> [ & listener_added_by_worker ] ( ) - > void { listener_added_by_worker . setReady ( ) ; } ) ; <nl> - Json : : ObjectSharedPtr loader = TestEnvironment : : jsonLoadFromString ( json , GetParam ( ) ) ; <nl> - test_server_ - > server ( ) . dispatcher ( ) . post ( [ this , loader , & listener_added_by_manager ] ( ) - > void { <nl> - EXPECT_TRUE ( test_server_ - > server ( ) . listenerManager ( ) . addOrUpdateListener ( * loader ) ) ; <nl> + test_server_ - > server ( ) . dispatcher ( ) . post ( [ this , json , & listener_added_by_manager ] ( ) - > void { <nl> + EXPECT_TRUE ( test_server_ - > server ( ) . listenerManager ( ) . addOrUpdateListener ( <nl> + Server : : parseListenerFromJson ( json ) ) ) ; <nl> listener_added_by_manager . setReady ( ) ; <nl> } ) ; <nl> listener_added_by_worker . waitReady ( ) ; <nl> TEST_P ( EchoIntegrationTest , AddRemoveListener ) { <nl> ConditionalInitializer listener_removed ; <nl> test_server_ - > setOnWorkerListenerRemovedCb ( <nl> [ & listener_removed ] ( ) - > void { listener_removed . setReady ( ) ; } ) ; <nl> - test_server_ - > server ( ) . dispatcher ( ) . post ( [ this , loader ] ( ) - > void { <nl> + test_server_ - > server ( ) . dispatcher ( ) . post ( [ this ] ( ) - > void { <nl> EXPECT_TRUE ( test_server_ - > server ( ) . listenerManager ( ) . removeListener ( " new_listener " ) ) ; <nl> } ) ; <nl> listener_removed . waitReady ( ) ; <nl> mmm a / test / mocks / server / mocks . h <nl> ppp b / test / mocks / server / mocks . h <nl> class MockListenerComponentFactory : public ListenerComponentFactory { <nl> <nl> DrainManagerPtr createDrainManager ( ) override { return DrainManagerPtr { createDrainManager_ ( ) } ; } <nl> <nl> - MOCK_METHOD2 ( createFilterFactoryList , std : : vector < Configuration : : NetworkFilterFactoryCb > ( <nl> - const std : : vector < Json : : ObjectSharedPtr > & filters , <nl> - Configuration : : FactoryContext & context ) ) ; <nl> + MOCK_METHOD2 ( createFilterFactoryList , <nl> + std : : vector < Configuration : : NetworkFilterFactoryCb > ( <nl> + const Protobuf : : RepeatedPtrField < envoy : : api : : v2 : : Filter > & filters , <nl> + Configuration : : FactoryContext & context ) ) ; <nl> MOCK_METHOD2 ( createListenSocket , <nl> Network : : ListenSocketSharedPtr ( Network : : Address : : InstanceConstSharedPtr address , <nl> bool bind_to_port ) ) ; <nl> class MockListenerManager : public ListenerManager { <nl> MockListenerManager ( ) ; <nl> ~ MockListenerManager ( ) ; <nl> <nl> - MOCK_METHOD1 ( addOrUpdateListener , bool ( const Json : : Object & json ) ) ; <nl> + MOCK_METHOD1 ( addOrUpdateListener , bool ( const envoy : : api : : v2 : : Listener & config ) ) ; <nl> MOCK_METHOD0 ( listeners , std : : vector < std : : reference_wrapper < Listener > > ( ) ) ; <nl> MOCK_METHOD0 ( numConnections , uint64_t ( ) ) ; <nl> MOCK_METHOD1 ( removeListener , bool ( const std : : string & listener_name ) ) ; <nl> mmm a / test / server / BUILD <nl> ppp b / test / server / BUILD <nl> licenses ( [ " notice " ] ) # Apache 2 <nl> load ( <nl> " / / bazel : envoy_build_system . bzl " , <nl> " envoy_cc_test " , <nl> + " envoy_cc_test_library " , <nl> " envoy_package " , <nl> ) <nl> <nl> envoy_cc_test ( <nl> srcs = [ " listener_manager_impl_test . cc " ] , <nl> data = [ " / / test / common / ssl / test_data : certs " ] , <nl> deps = [ <nl> + " : utility_lib " , <nl> " / / source / server : listener_manager_lib " , <nl> " / / test / mocks / server : server_mocks " , <nl> " / / test / test_common : environment_lib " , <nl> envoy_cc_test ( <nl> ] , <nl> ) <nl> <nl> + envoy_cc_test_library ( <nl> + name = " utility_lib " , <nl> + hdrs = [ " utility . h " ] , <nl> + deps = [ <nl> + " / / source / common / config : lds_json_lib " , <nl> + " / / source / common / json : json_loader_lib " , <nl> + ] , <nl> + ) <nl> + <nl> envoy_cc_test ( <nl> name = " worker_impl_test " , <nl> srcs = [ " worker_impl_test . cc " ] , <nl> mmm a / test / server / lds_api_test . cc <nl> ppp b / test / server / lds_api_test . cc <nl> class LdsApiTest : public testing : : Test { <nl> <nl> void expectAdd ( const std : : string & listener_name , bool updated ) { <nl> EXPECT_CALL ( listener_manager_ , addOrUpdateListener ( _ ) ) <nl> - . WillOnce ( Invoke ( [ listener_name , updated ] ( const Json : : Object & config ) - > bool { <nl> - EXPECT_EQ ( listener_name , config . getString ( " name " ) ) ; <nl> + . WillOnce ( Invoke ( [ listener_name , updated ] ( const envoy : : api : : v2 : : Listener & config ) - > bool { <nl> + EXPECT_EQ ( listener_name , config . name ( ) ) ; <nl> return updated ; <nl> } ) ) ; <nl> } <nl> TEST_F ( LdsApiTest , Basic ) { <nl> { <nl> " listeners " : [ <nl> { <nl> - " name " : " listener1 " <nl> + " name " : " listener1 " , <nl> + " address " : " tcp : / / 0 . 0 . 0 . 0 : 1 " , <nl> + " filters " : [ ] <nl> } , <nl> { <nl> - " name " : " listener2 " <nl> + " name " : " listener2 " , <nl> + " address " : " tcp : / / 0 . 0 . 0 . 0 : 2 " , <nl> + " filters " : [ ] <nl> } <nl> ] <nl> } <nl> TEST_F ( LdsApiTest , Basic ) { <nl> { <nl> " listeners " : [ <nl> { <nl> - " name " : " listener1 " <nl> + " name " : " listener1 " , <nl> + " address " : " tcp : / / 0 . 0 . 0 . 0 : 1 " , <nl> + " filters " : [ ] <nl> } , <nl> { <nl> - " name " : " listener3 " <nl> + " name " : " listener3 " , <nl> + " address " : " tcp : / / 0 . 0 . 0 . 0 : 3 " , <nl> + " filters " : [ ] <nl> } <nl> ] <nl> } <nl> mmm a / test / server / listener_manager_impl_test . cc <nl> ppp b / test / server / listener_manager_impl_test . cc <nl> <nl> # include " server / listener_manager_impl . h " <nl> <nl> # include " test / mocks / server / mocks . h " <nl> + # include " test / server / utility . h " <nl> # include " test / test_common / environment . h " <nl> # include " test / test_common / utility . h " <nl> <nl> class ListenerManagerImplTest : public testing : : Test { <nl> EXPECT_CALL ( listener_factory_ , createDrainManager_ ( ) ) <nl> . WillOnce ( Return ( raw_listener - > drain_manager_ ) ) ; <nl> EXPECT_CALL ( listener_factory_ , createFilterFactoryList ( _ , _ ) ) <nl> - . WillOnce ( Invoke ( [ raw_listener , need_init ] ( const std : : vector < Json : : ObjectSharedPtr > & , <nl> - Configuration : : FactoryContext & context ) <nl> - - > std : : vector < Configuration : : NetworkFilterFactoryCb > { <nl> - std : : shared_ptr < ListenerHandle > notifier ( raw_listener ) ; <nl> - raw_listener - > context_ = & context ; <nl> - if ( need_init ) { <nl> - context . initManager ( ) . registerTarget ( notifier - > target_ ) ; <nl> - } <nl> - return { [ notifier ] ( Network : : FilterManager & ) - > void { } } ; <nl> - } ) ) ; <nl> + . WillOnce ( Invoke ( <nl> + [ raw_listener , need_init ] ( const Protobuf : : RepeatedPtrField < envoy : : api : : v2 : : Filter > & , <nl> + Configuration : : FactoryContext & context ) <nl> + - > std : : vector < Configuration : : NetworkFilterFactoryCb > { <nl> + std : : shared_ptr < ListenerHandle > notifier ( raw_listener ) ; <nl> + raw_listener - > context_ = & context ; <nl> + if ( need_init ) { <nl> + context . initManager ( ) . registerTarget ( notifier - > target_ ) ; <nl> + } <nl> + return { [ notifier ] ( Network : : FilterManager & ) - > void { } } ; <nl> + } ) ) ; <nl> <nl> return raw_listener ; <nl> } <nl> class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { <nl> ListenerManagerImplWithRealFiltersTest ( ) { <nl> / / Use real filter loading by default . <nl> ON_CALL ( listener_factory_ , createFilterFactoryList ( _ , _ ) ) <nl> - . WillByDefault ( Invoke ( [ this ] ( const std : : vector < Json : : ObjectSharedPtr > & filters , <nl> - Configuration : : FactoryContext & context ) <nl> + . WillByDefault ( Invoke ( [ this ] ( <nl> + const Protobuf : : RepeatedPtrField < envoy : : api : : v2 : : Filter > & filters , <nl> + Configuration : : FactoryContext & context ) <nl> - > std : : vector < Configuration : : NetworkFilterFactoryCb > { <nl> return ProdListenerComponentFactory : : createFilterFactoryList_ ( filters , server_ , context ) ; <nl> } ) ) ; <nl> class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { <nl> } ; <nl> <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , EmptyFilter ) { <nl> - std : : string json = R " EOF ( <nl> + const std : : string json = R " EOF ( <nl> { <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> " filters " : [ ] <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( json ) ; <nl> EXPECT_CALL ( server_ . random_ , uuid ( ) ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> - manager_ - > addOrUpdateListener ( * loader ) ; <nl> + manager_ - > addOrUpdateListener ( parseListenerFromJson ( json ) ) ; <nl> EXPECT_EQ ( 1U , manager_ - > listeners ( ) . size ( ) ) ; <nl> } <nl> <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , DefaultListenerPerConnectionBufferLimit ) { <nl> - std : : string json = R " EOF ( <nl> + const std : : string json = R " EOF ( <nl> { <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> " filters " : [ ] <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( json ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> - manager_ - > addOrUpdateListener ( * loader ) ; <nl> + manager_ - > addOrUpdateListener ( parseListenerFromJson ( json ) ) ; <nl> EXPECT_EQ ( 1024 * 1024U , manager_ - > listeners ( ) . back ( ) . get ( ) . perConnectionBufferLimitBytes ( ) ) ; <nl> } <nl> <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , SetListenerPerConnectionBufferLimit ) { <nl> - std : : string json = R " EOF ( <nl> + const std : : string json = R " EOF ( <nl> { <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> " filters " : [ ] , <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , SetListenerPerConnectionBufferLim <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( json ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> - manager_ - > addOrUpdateListener ( * loader ) ; <nl> + manager_ - > addOrUpdateListener ( parseListenerFromJson ( json ) ) ; <nl> EXPECT_EQ ( 8192U , manager_ - > listeners ( ) . back ( ) . get ( ) . perConnectionBufferLimitBytes ( ) ) ; <nl> } <nl> <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , SslContext ) { <nl> - std : : string json = R " EOF ( <nl> + const std : : string json = TestEnvironment : : substitute ( R " EOF ( <nl> { <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> " filters " : [ ] , <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , SslContext ) { <nl> ] <nl> } <nl> } <nl> - ) EOF " ; <nl> + ) EOF " , <nl> + Network : : Address : : IpVersion : : v4 ) ; <nl> <nl> - Json : : ObjectSharedPtr loader = TestEnvironment : : jsonLoadFromString ( json ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> - manager_ - > addOrUpdateListener ( * loader ) ; <nl> + manager_ - > addOrUpdateListener ( parseListenerFromJson ( json ) ) ; <nl> EXPECT_NE ( nullptr , manager_ - > listeners ( ) . back ( ) . get ( ) . sslContext ( ) ) ; <nl> } <nl> <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , BadListenerConfig ) { <nl> - std : : string json = R " EOF ( <nl> + const std : : string json = R " EOF ( <nl> { <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> " filters " : [ ] , <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , BadListenerConfig ) { <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( json ) ; <nl> - EXPECT_THROW ( manager_ - > addOrUpdateListener ( * loader ) , Json : : Exception ) ; <nl> + EXPECT_THROW ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( json ) ) , Json : : Exception ) ; <nl> } <nl> <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , BadFilterConfig ) { <nl> - std : : string json = R " EOF ( <nl> + const std : : string json = R " EOF ( <nl> { <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> " filters " : [ <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , BadFilterConfig ) { <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( json ) ; <nl> - EXPECT_THROW ( manager_ - > addOrUpdateListener ( * loader ) , Json : : Exception ) ; <nl> + EXPECT_THROW ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( json ) ) , Json : : Exception ) ; <nl> } <nl> <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , BadFilterName ) { <nl> - std : : string json = R " EOF ( <nl> + const std : : string json = R " EOF ( <nl> { <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> " filters " : [ <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , BadFilterName ) { <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( json ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( manager_ - > addOrUpdateListener ( * loader ) , EnvoyException , <nl> - " unable to create filter factory for ' invalid ' / ' write ' " ) ; <nl> - } <nl> - <nl> - TEST_F ( ListenerManagerImplWithRealFiltersTest , BadFilterType ) { <nl> - std : : string json = R " EOF ( <nl> - { <nl> - " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> - " filters " : [ <nl> - { <nl> - " type " : " write " , <nl> - " name " : " echo " , <nl> - " config " : { } <nl> - } <nl> - ] <nl> - } <nl> - ) EOF " ; <nl> - <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( json ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( manager_ - > addOrUpdateListener ( * loader ) , EnvoyException , <nl> - " unable to create filter factory for ' echo ' / ' write ' " ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( json ) ) , <nl> + EnvoyException , " unable to create filter factory for ' invalid ' " ) ; <nl> } <nl> <nl> class TestStatsConfigFactory : public Configuration : : NamedNetworkFilterConfigFactory { <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , StatsScopeTest ) { <nl> Registry : : RegisterFactory < TestStatsConfigFactory , Configuration : : NamedNetworkFilterConfigFactory > <nl> registered ; <nl> <nl> - std : : string json = R " EOF ( <nl> + const std : : string json = R " EOF ( <nl> { <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> " bind_to_port " : false , <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , StatsScopeTest ) { <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( json ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , false ) ) ; <nl> - manager_ - > addOrUpdateListener ( * loader ) ; <nl> + manager_ - > addOrUpdateListener ( parseListenerFromJson ( json ) ) ; <nl> manager_ - > listeners ( ) . front ( ) . get ( ) . listenerScope ( ) . counter ( " foo " ) . inc ( ) ; <nl> <nl> EXPECT_EQ ( 1UL , server_ . stats_store_ . counter ( " bar " ) . value ( ) ) ; <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , DeprecatedFilterConfigFactoryRegi <nl> / / Register the config factory <nl> Configuration : : RegisterNetworkFilterConfigFactory < TestDeprecatedEchoConfigFactory > registered ; <nl> <nl> - std : : string json = R " EOF ( <nl> + const std : : string json = R " EOF ( <nl> { <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> " filters " : [ <nl> TEST_F ( ListenerManagerImplWithRealFiltersTest , DeprecatedFilterConfigFactoryRegi <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( json ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> - manager_ - > addOrUpdateListener ( * loader ) ; <nl> + manager_ - > addOrUpdateListener ( parseListenerFromJson ( json ) ) ; <nl> } <nl> <nl> TEST_F ( ListenerManagerImplTest , AddListenerAddressNotMatching ) { <nl> InSequence s ; <nl> <nl> / / Add foo listener . <nl> - std : : string listener_foo_json = R " EOF ( <nl> + const std : : string listener_foo_json = R " EOF ( <nl> { <nl> " name " : " foo " , <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> TEST_F ( ListenerManagerImplTest , AddListenerAddressNotMatching ) { <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( listener_foo_json ) ; <nl> ListenerHandle * listener_foo = expectListenerCreate ( false ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_json ) ) ) ; <nl> checkStats ( 1 , 0 , 0 , 0 , 1 , 0 ) ; <nl> <nl> / / Update foo listener , but with a different address . Should throw . <nl> - std : : string listener_foo_different_address_json = R " EOF ( <nl> + const std : : string listener_foo_different_address_json = R " EOF ( <nl> { <nl> " name " : " foo " , <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1235 " , <nl> TEST_F ( ListenerManagerImplTest , AddListenerAddressNotMatching ) { <nl> } <nl> ) EOF " ; <nl> <nl> - loader = Json : : Factory : : loadFromString ( listener_foo_different_address_json ) ; <nl> ListenerHandle * listener_foo_different_address = expectListenerCreate ( false ) ; <nl> EXPECT_CALL ( * listener_foo_different_address , onDestroy ( ) ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( manager_ - > addOrUpdateListener ( * loader ) , EnvoyException , <nl> - " error updating listener : ' foo ' has a different address " <nl> - " ' 127 . 0 . 0 . 1 : 1235 ' from existing listener " ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( <nl> + manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_different_address_json ) ) , <nl> + EnvoyException , <nl> + " error updating listener : ' foo ' has a different address " <nl> + " ' 127 . 0 . 0 . 1 : 1235 ' from existing listener " ) ; <nl> <nl> EXPECT_CALL ( * listener_foo , onDestroy ( ) ) ; <nl> } <nl> TEST_F ( ListenerManagerImplTest , AddOrUpdateListener ) { <nl> InSequence s ; <nl> <nl> / / Add foo listener . <nl> - std : : string listener_foo_json = R " EOF ( <nl> + const std : : string listener_foo_json = R " EOF ( <nl> { <nl> " name " : " foo " , <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> TEST_F ( ListenerManagerImplTest , AddOrUpdateListener ) { <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( listener_foo_json ) ; <nl> ListenerHandle * listener_foo = expectListenerCreate ( false ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_json ) ) ) ; <nl> checkStats ( 1 , 0 , 0 , 0 , 1 , 0 ) ; <nl> <nl> / / Update duplicate should be a NOP . <nl> - EXPECT_FALSE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_FALSE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_json ) ) ) ; <nl> checkStats ( 1 , 0 , 0 , 0 , 1 , 0 ) ; <nl> <nl> / / Update foo listener . Should share socket . <nl> - std : : string listener_foo_update1_json = R " EOF ( <nl> + const std : : string listener_foo_update1_json = R " EOF ( <nl> { <nl> " name " : " foo " , <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> TEST_F ( ListenerManagerImplTest , AddOrUpdateListener ) { <nl> } <nl> ) EOF " ; <nl> <nl> - loader = Json : : Factory : : loadFromString ( listener_foo_update1_json ) ; <nl> ListenerHandle * listener_foo_update1 = expectListenerCreate ( false ) ; <nl> EXPECT_CALL ( * listener_foo , onDestroy ( ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_update1_json ) ) ) ; <nl> checkStats ( 1 , 1 , 0 , 0 , 1 , 0 ) ; <nl> <nl> / / Start workers . <nl> TEST_F ( ListenerManagerImplTest , AddOrUpdateListener ) { <nl> worker_ - > callAddCompletion ( true ) ; <nl> <nl> / / Update duplicate should be a NOP . <nl> - EXPECT_FALSE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_FALSE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_update1_json ) ) ) ; <nl> checkStats ( 1 , 1 , 0 , 0 , 1 , 0 ) ; <nl> <nl> / / Update foo . Should go into warming , have an immediate warming callback , and start immediate <nl> / / removal . <nl> - loader = Json : : Factory : : loadFromString ( listener_foo_json ) ; <nl> ListenerHandle * listener_foo_update2 = expectListenerCreate ( false ) ; <nl> EXPECT_CALL ( * worker_ , addListener ( _ , _ ) ) ; <nl> EXPECT_CALL ( * worker_ , stopListener ( _ ) ) ; <nl> EXPECT_CALL ( * listener_foo_update1 - > drain_manager_ , startDrainSequence ( _ ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_json ) ) ) ; <nl> worker_ - > callAddCompletion ( true ) ; <nl> checkStats ( 1 , 2 , 0 , 0 , 1 , 1 ) ; <nl> <nl> TEST_F ( ListenerManagerImplTest , AddOrUpdateListener ) { <nl> checkStats ( 1 , 2 , 0 , 0 , 1 , 0 ) ; <nl> <nl> / / Add bar listener . <nl> - std : : string listener_bar_json = R " EOF ( <nl> + const std : : string listener_bar_json = R " EOF ( <nl> { <nl> " name " : " bar " , <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1235 " , <nl> TEST_F ( ListenerManagerImplTest , AddOrUpdateListener ) { <nl> } <nl> ) EOF " ; <nl> <nl> - loader = Json : : Factory : : loadFromString ( listener_bar_json ) ; <nl> ListenerHandle * listener_bar = expectListenerCreate ( false ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> EXPECT_CALL ( * worker_ , addListener ( _ , _ ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_bar_json ) ) ) ; <nl> EXPECT_EQ ( 2UL , manager_ - > listeners ( ) . size ( ) ) ; <nl> worker_ - > callAddCompletion ( true ) ; <nl> checkStats ( 2 , 2 , 0 , 0 , 2 , 0 ) ; <nl> <nl> / / Add baz listener , this time requiring initializing . <nl> - std : : string listener_baz_json = R " EOF ( <nl> + const std : : string listener_baz_json = R " EOF ( <nl> { <nl> " name " : " baz " , <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1236 " , <nl> TEST_F ( ListenerManagerImplTest , AddOrUpdateListener ) { <nl> } <nl> ) EOF " ; <nl> <nl> - loader = Json : : Factory : : loadFromString ( listener_baz_json ) ; <nl> ListenerHandle * listener_baz = expectListenerCreate ( true ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> EXPECT_CALL ( listener_baz - > target_ , initialize ( _ ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_baz_json ) ) ) ; <nl> EXPECT_EQ ( 2UL , manager_ - > listeners ( ) . size ( ) ) ; <nl> checkStats ( 3 , 2 , 0 , 1 , 2 , 0 ) ; <nl> <nl> / / Update a duplicate baz that is currently warming . <nl> - EXPECT_FALSE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_FALSE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_baz_json ) ) ) ; <nl> checkStats ( 3 , 2 , 0 , 1 , 2 , 0 ) ; <nl> <nl> / / Update baz while it is warming . <nl> - std : : string listener_baz_update1_json = R " EOF ( <nl> + const std : : string listener_baz_update1_json = R " EOF ( <nl> { <nl> " name " : " baz " , <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1236 " , <nl> TEST_F ( ListenerManagerImplTest , AddOrUpdateListener ) { <nl> } <nl> ) EOF " ; <nl> <nl> - loader = Json : : Factory : : loadFromString ( listener_baz_update1_json ) ; <nl> ListenerHandle * listener_baz_update1 = expectListenerCreate ( true ) ; <nl> EXPECT_CALL ( * listener_baz , onDestroy ( ) ) . WillOnce ( Invoke ( [ listener_baz ] ( ) - > void { <nl> / / Call the initialize callback during destruction like RDS will . <nl> listener_baz - > target_ . callback_ ( ) ; <nl> } ) ) ; <nl> EXPECT_CALL ( listener_baz_update1 - > target_ , initialize ( _ ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_baz_update1_json ) ) ) ; <nl> EXPECT_EQ ( 2UL , manager_ - > listeners ( ) . size ( ) ) ; <nl> checkStats ( 3 , 3 , 0 , 1 , 2 , 0 ) ; <nl> <nl> TEST_F ( ListenerManagerImplTest , AddDrainingListener ) { <nl> manager_ - > startWorkers ( guard_dog_ ) ; <nl> <nl> / / Add foo listener directly into active . <nl> - std : : string listener_foo_json = R " EOF ( <nl> + const std : : string listener_foo_json = R " EOF ( <nl> { <nl> " name " : " foo " , <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> TEST_F ( ListenerManagerImplTest , AddDrainingListener ) { <nl> new Network : : Address : : Ipv4Instance ( " 127 . 0 . 0 . 1 " , 1234 ) ) ; <nl> ON_CALL ( * listener_factory_ . socket_ , localAddress ( ) ) . WillByDefault ( Return ( local_address ) ) ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( listener_foo_json ) ; <nl> ListenerHandle * listener_foo = expectListenerCreate ( false ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> EXPECT_CALL ( * worker_ , addListener ( _ , _ ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_json ) ) ) ; <nl> worker_ - > callAddCompletion ( true ) ; <nl> checkStats ( 1 , 0 , 0 , 0 , 1 , 0 ) ; <nl> <nl> TEST_F ( ListenerManagerImplTest , AddDrainingListener ) { <nl> checkStats ( 1 , 0 , 1 , 0 , 0 , 1 ) ; <nl> <nl> / / Add foo again . We should use the socket from draining . <nl> - loader = Json : : Factory : : loadFromString ( listener_foo_json ) ; <nl> ListenerHandle * listener_foo2 = expectListenerCreate ( false ) ; <nl> EXPECT_CALL ( * worker_ , addListener ( _ , _ ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_json ) ) ) ; <nl> worker_ - > callAddCompletion ( true ) ; <nl> checkStats ( 2 , 0 , 1 , 0 , 1 , 1 ) ; <nl> <nl> TEST_F ( ListenerManagerImplTest , CantBindSocket ) { <nl> EXPECT_CALL ( * worker_ , start ( _ ) ) ; <nl> manager_ - > startWorkers ( guard_dog_ ) ; <nl> <nl> - std : : string listener_foo_json = R " EOF ( <nl> + const std : : string listener_foo_json = R " EOF ( <nl> { <nl> " name " : " foo " , <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> TEST_F ( ListenerManagerImplTest , CantBindSocket ) { <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( listener_foo_json ) ; <nl> ListenerHandle * listener_foo = expectListenerCreate ( true ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) <nl> . WillOnce ( Throw ( EnvoyException ( " can ' t bind " ) ) ) ; <nl> EXPECT_CALL ( * listener_foo , onDestroy ( ) ) ; <nl> - EXPECT_THROW ( manager_ - > addOrUpdateListener ( * loader ) , EnvoyException ) ; <nl> + EXPECT_THROW ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_json ) ) , <nl> + EnvoyException ) ; <nl> } <nl> <nl> TEST_F ( ListenerManagerImplTest , ListenerDraining ) { <nl> TEST_F ( ListenerManagerImplTest , ListenerDraining ) { <nl> EXPECT_CALL ( * worker_ , start ( _ ) ) ; <nl> manager_ - > startWorkers ( guard_dog_ ) ; <nl> <nl> - std : : string listener_foo_json = R " EOF ( <nl> + const std : : string listener_foo_json = R " EOF ( <nl> { <nl> " name " : " foo " , <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> TEST_F ( ListenerManagerImplTest , ListenerDraining ) { <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( listener_foo_json ) ; <nl> ListenerHandle * listener_foo = expectListenerCreate ( false ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> EXPECT_CALL ( * worker_ , addListener ( _ , _ ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_json ) ) ) ; <nl> worker_ - > callAddCompletion ( true ) ; <nl> checkStats ( 1 , 0 , 0 , 0 , 1 , 0 ) ; <nl> <nl> TEST_F ( ListenerManagerImplTest , RemoveListener ) { <nl> EXPECT_FALSE ( manager_ - > removeListener ( " unknown " ) ) ; <nl> <nl> / / Add foo listener into warming . <nl> - std : : string listener_foo_json = R " EOF ( <nl> + const std : : string listener_foo_json = R " EOF ( <nl> { <nl> " name " : " foo " , <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> TEST_F ( ListenerManagerImplTest , RemoveListener ) { <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( listener_foo_json ) ; <nl> ListenerHandle * listener_foo = expectListenerCreate ( true ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> EXPECT_CALL ( listener_foo - > target_ , initialize ( _ ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_json ) ) ) ; <nl> EXPECT_EQ ( 0UL , manager_ - > listeners ( ) . size ( ) ) ; <nl> checkStats ( 1 , 0 , 0 , 1 , 0 , 0 ) ; <nl> <nl> TEST_F ( ListenerManagerImplTest , RemoveListener ) { <nl> listener_foo = expectListenerCreate ( true ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> EXPECT_CALL ( listener_foo - > target_ , initialize ( _ ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_json ) ) ) ; <nl> checkStats ( 2 , 0 , 1 , 1 , 0 , 0 ) ; <nl> EXPECT_CALL ( * worker_ , addListener ( _ , _ ) ) ; <nl> listener_foo - > target_ . callback_ ( ) ; <nl> TEST_F ( ListenerManagerImplTest , RemoveListener ) { <nl> checkStats ( 2 , 0 , 1 , 0 , 1 , 0 ) ; <nl> <nl> / / Update foo into warming . <nl> - std : : string listener_foo_update1_json = R " EOF ( <nl> + const std : : string listener_foo_update1_json = R " EOF ( <nl> { <nl> " name " : " foo " , <nl> " address " : " tcp : / / 127 . 0 . 0 . 1 : 1234 " , <nl> TEST_F ( ListenerManagerImplTest , RemoveListener ) { <nl> } <nl> ) EOF " ; <nl> <nl> - loader = Json : : Factory : : loadFromString ( listener_foo_update1_json ) ; <nl> ListenerHandle * listener_foo_update1 = expectListenerCreate ( true ) ; <nl> EXPECT_CALL ( listener_foo_update1 - > target_ , initialize ( _ ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_update1_json ) ) ) ; <nl> EXPECT_EQ ( 1UL , manager_ - > listeners ( ) . size ( ) ) ; <nl> checkStats ( 2 , 1 , 1 , 1 , 1 , 0 ) ; <nl> <nl> TEST_F ( ListenerManagerImplTest , AddListenerFailure ) { <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( listener_foo_json ) ; <nl> ListenerHandle * listener_foo = expectListenerCreate ( false ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , true ) ) ; <nl> EXPECT_CALL ( * worker_ , addListener ( _ , _ ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_json ) ) ) ; <nl> <nl> EXPECT_CALL ( * worker_ , stopListener ( _ ) ) ; <nl> EXPECT_CALL ( * listener_foo - > drain_manager_ , startDrainSequence ( _ ) ) ; <nl> TEST_F ( ListenerManagerImplTest , DuplicateAddressDontBind ) { <nl> } <nl> ) EOF " ; <nl> <nl> - Json : : ObjectSharedPtr loader = Json : : Factory : : loadFromString ( listener_foo_json ) ; <nl> ListenerHandle * listener_foo = expectListenerCreate ( true ) ; <nl> EXPECT_CALL ( listener_factory_ , createListenSocket ( _ , false ) ) ; <nl> EXPECT_CALL ( listener_foo - > target_ , initialize ( _ ) ) ; <nl> - EXPECT_TRUE ( manager_ - > addOrUpdateListener ( * loader ) ) ; <nl> + EXPECT_TRUE ( manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_foo_json ) ) ) ; <nl> <nl> / / Add bar with same non - binding address . Should fail . <nl> const std : : string listener_bar_json = R " EOF ( <nl> TEST_F ( ListenerManagerImplTest , DuplicateAddressDontBind ) { <nl> } <nl> ) EOF " ; <nl> <nl> - loader = Json : : Factory : : loadFromString ( listener_bar_json ) ; <nl> ListenerHandle * listener_bar = expectListenerCreate ( true ) ; <nl> EXPECT_CALL ( * listener_bar , onDestroy ( ) ) ; <nl> EXPECT_THROW_WITH_MESSAGE ( <nl> - manager_ - > addOrUpdateListener ( * loader ) , EnvoyException , <nl> + manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_bar_json ) ) , EnvoyException , <nl> " error adding listener : ' bar ' has duplicate address ' 0 . 0 . 0 . 0 : 1234 ' as existing listener " ) ; <nl> <nl> / / Move foo to active and then try to add again . This should still fail . <nl> TEST_F ( ListenerManagerImplTest , DuplicateAddressDontBind ) { <nl> listener_bar = expectListenerCreate ( true ) ; <nl> EXPECT_CALL ( * listener_bar , onDestroy ( ) ) ; <nl> EXPECT_THROW_WITH_MESSAGE ( <nl> - manager_ - > addOrUpdateListener ( * loader ) , EnvoyException , <nl> + manager_ - > addOrUpdateListener ( parseListenerFromJson ( listener_bar_json ) ) , EnvoyException , <nl> " error adding listener : ' bar ' has duplicate address ' 0 . 0 . 0 . 0 : 1234 ' as existing listener " ) ; <nl> <nl> EXPECT_CALL ( * listener_foo , onDestroy ( ) ) ; <nl> new file mode 100644 <nl> index 00000000000 . . c37b988c72d <nl> mmm / dev / null <nl> ppp b / test / server / utility . h <nl> <nl> + # pragma once <nl> + <nl> + # include " common / config / lds_json . h " <nl> + # include " common / json / json_loader . h " <nl> + <nl> + namespace Envoy { <nl> + namespace Server { <nl> + namespace { <nl> + <nl> + inline envoy : : api : : v2 : : Listener parseListenerFromJson ( const std : : string & json_string ) { <nl> + envoy : : api : : v2 : : Listener listener ; <nl> + auto json_object_ptr = Json : : Factory : : loadFromString ( json_string ) ; <nl> + Config : : LdsJson : : translateListener ( * json_object_ptr , listener ) ; <nl> + return listener ; <nl> + } <nl> + <nl> + } / / namespace <nl> + } / / namespace Server <nl> + } / / namespace Envoy <nl>
config : Listener v1 JSON - > proto translation . ( )
envoyproxy/envoy
65066e1798a9cabc7914fd51e6258adbd0a5034c
2017-08-16T00:43:24Z
mmm a / src / bittorrent . cpp <nl> ppp b / src / bittorrent . cpp <nl> QTorrentHandle Bittorrent : : addMagnetUri ( QString magnet_uri , bool resumed ) { <nl> qDebug ( " Resuming magnet URI : % s " , qPrintable ( hash ) ) ; <nl> / / Load metadata <nl> if ( QFile : : exists ( torrentBackup . path ( ) + QDir : : separator ( ) + hash + QString ( " . torrent " ) ) ) <nl> - return addTorrent ( torrentBackup . path ( ) + QDir : : separator ( ) + hash + QString ( " . torrent " ) , false , false , true ) ; <nl> + return addTorrent ( torrentBackup . path ( ) + QDir : : separator ( ) + hash + QString ( " . torrent " ) , false , QString ( ) , true ) ; <nl> } else { <nl> qDebug ( " Adding new magnet URI " ) ; <nl> } <nl>
Fix compilation warning
qbittorrent/qBittorrent
ee30a75b5731b28634a980e07a61ce718c5dd329
2010-08-22T18:11:19Z
mmm a / language / Finnish / strings . xml <nl> ppp b / language / Finnish / strings . xml <nl> <nl>  < ? xml version = " 1 . 0 " encoding = " utf - 8 " standalone = " yes " ? > <nl> < ! - - Language file translated with Team XBMC Translator - - > <nl> < ! - - Translator : Mika Pirinen - - > <nl> - < ! - - Date of translation : 15 / 12 / 2009 - - > <nl> - < ! - - Finnish strings based on English strings revision 25728 - - > <nl> + < ! - - Date of translation : 19 / 12 / 2009 - - > <nl> + < ! - - Finnish strings based on English strings revision 25792 - - > <nl> < strings > <nl> < string id = " 0 " > Ohjelmat < / string > <nl> < string id = " 1 " > Kuvat < / string > <nl> <nl> < string id = " 110 " > Luo pienoiskuvakkeet < / string > <nl> < string id = " 111 " > Kirjanmerkit < / string > <nl> < string id = " 112 " > Tauko < / string > <nl> - < string id = " 113 " > < / string > <nl> - < string id = " 114 " > < / string > <nl> + <nl> < string id = " 115 " > Kopioi < / string > <nl> < string id = " 116 " > Siirrä < / string > <nl> < string id = " 117 " > Poista < / string > <nl> <nl> < string id = " 168 " > Kellonajan päivitys internetistä < / string > <nl> < string id = " 169 " > Näytön resoluutio < / string > <nl> < string id = " 170 " > Säädä näytön päivitysnopeus elokuvaan sopivaksi < / string > <nl> - < string id = " 171 " > < / string > <nl> + <nl> < string id = " 172 " > Julkaistu < / string > <nl> - < string id = " 173 " > < / string > <nl> - < string id = " 174 " > < / string > <nl> + <nl> < string id = " 175 " > Musiikkityylit < / string > <nl> < string id = " 176 " > Tyylilaji < / string > <nl> - < string id = " 177 " > < / string > <nl> - < string id = " 178 " > < / string > <nl> + <nl> < string id = " 179 " > Kappale < / string > <nl> < string id = " 180 " > Kesto < / string > <nl> < string id = " 181 " > Valitse albumi < / string > <nl> <nl> < string id = " 196 " > Valitse elokuva : < / string > <nl> < string id = " 197 " > Haetaan tietoja % s : sta < / string > <nl> < string id = " 198 " > Noudetaan elokuvatietoja < / string > <nl> - < string id = " 199 " > < / string > <nl> - < string id = " 200 " > < / string > <nl> - < string id = " 201 " > < / string > <nl> + <nl> < string id = " 202 " > Iskulause < / string > <nl> < string id = " 203 " > Juoni < / string > <nl> - < string id = " 204 " > < / string > <nl> + <nl> < string id = " 205 " > Äänestysmäärä : < / string > <nl> < string id = " 206 " > Näyttelijät < / string > <nl> < string id = " 207 " > Juoni < / string > <nl> <nl> < string id = " 221 " > Verkkoa ei ole kytketty < / string > <nl> < string id = " 222 " > Peruuta < / string > <nl> < string id = " 224 " > Nopeus < / string > <nl> - < string id = " 225 " > Siirtymäaika < / string > <nl> + <nl> < string id = " 226 " > Testikuviot . . . < / string > <nl> < string id = " 227 " > Hae ääni - CD - levyn kappaletiedot internetistä < / string > <nl> < string id = " 228 " > Sekoita soittolista ladattaessa < / string > <nl> <nl> < string id = " 237 " > Pienennys < / string > <nl> < string id = " 238 " > Suurennus < / string > <nl> < string id = " 239 " > Tyhjennä soittolista lopuksi < / string > <nl> - < string id = " 240 " > DVD - levyjen automaattinen käynnistys < / string > <nl> - < string id = " 241 " > VCD - / SVCD - levyjen automaattinen käynnistys < / string > <nl> - < string id = " 242 " > CD - levyjen automaattinen käynnistys < / string > <nl> - < string id = " 243 " > Xbox - levyjen automaattinen käynnistys < / string > <nl> - < string id = " 244 " > Toista videot automaattisesti < / string > <nl> - < string id = " 245 " > Toista musiikki automaattisesti < / string > <nl> - < string id = " 246 " > Toista kuvat automaattisesti < / string > <nl> + <nl> < string id = " 247 " > Skriptit < / string > <nl> < string id = " 248 " > Kieli < / string > <nl> < string id = " 249 " > Musiikki < / string > <nl> <nl> < string id = " 415 " > Ladataan pienoiskuvaketta . . . < / string > <nl> < string id = " 416 " > Ei saatavilla < / string > <nl> < string id = " 417 " > Suuret kuvak . < / string > <nl> - < string id = " 418 " > Kirjasto käytössä < / string > <nl> - < string id = " 419 " > < / string > <nl> - < string id = " 420 " > < / string > <nl> - < string id = " 421 " > < / string > <nl> + <nl> < string id = " 422 " > Poista albumin tiedot < / string > <nl> < string id = " 423 " > Poista CD - tiedot < / string > <nl> < string id = " 424 " > Valitse < / string > <nl> <nl> < string id = " 431 " > Ei välimuistia < / string > <nl> < string id = " 432 " > Poista elokuva kirjastosta < / string > <nl> < string id = " 433 " > Haluatko varmasti poistaa ' % s : n ' ? < / string > <nl> - < string id = " 434 " > < / string > <nl> - < string id = " 435 " > < / string > <nl> - < string id = " 436 " > < / string > <nl> + <nl> < string id = " 437 " > Siirrettävä levy < / string > <nl> < string id = " 438 " > Avataan tiedosto < / string > <nl> < string id = " 439 " > Välimuistit < / string > <nl> <nl> < string id = " 480 " > Käyttöliittymä < / string > <nl> < string id = " 481 " > Ääniasetukset < / string > <nl> < string id = " 482 " > Tietoja XBMC : stä < / string > <nl> - < string id = " 483 " > < / string > <nl> - < string id = " 484 " > < / string > <nl> + <nl> < string id = " 485 " > Poista albumin tiedot < / string > <nl> < string id = " 486 " > Jatkuva toisto päällä < / string > <nl> < string id = " 487 " > Kappaleen toisto < / string > <nl> <nl> < string id = " 513 " > Päävalikko < / string > <nl> < string id = " 514 " > Manuaaliset asetukset < / string > <nl> < string id = " 515 " > Lajityyppi < / string > <nl> - < string id = " 516 " > Avaa yksittäispakatut tiedostot automaattisesti < / string > <nl> + <nl> < string id = " 517 " > Viimeksi toistetut albumit < / string > <nl> < string id = " 518 " > Käynnistä < / string > <nl> < string id = " 519 " > Käynnistä järjestelmässä . . . < / string > <nl> <nl> < string id = " 534 " > Näytä : % s < / string > <nl> < string id = " 535 " > Lista < / string > <nl> < string id = " 536 " > Kuvakkeet < / string > <nl> - < string id = " 537 " > Leveä < / string > <nl> + < string id = " 537 " > Suuri lista < / string > <nl> < string id = " 538 " > Suuret kuvak . < / string > <nl> - < string id = " 539 " > Ruudut < / string > <nl> - < string id = " 540 " > Leveä < / string > <nl> - < string id = " 541 " > Albumi - kuvakkeet < / string > <nl> + < string id = " 539 " > Leveä < / string > <nl> + < string id = " 540 " > Suuri ja leveä < / string > <nl> + < string id = " 541 " > Albumikuvakkeet < / string > <nl> < string id = " 542 " > DVD - kuvakkeet < / string > <nl> < string id = " 543 " > DVD < / string > <nl> < string id = " 544 " > Mediatiedot < / string > <nl> <nl> < string id = " 656 " > Valitse soittolista < / string > <nl> < string id = " 657 " > Valitse kansio < / string > <nl> < string id = " 658 " > Kappaleen tiedot < / string > <nl> - < string id = " 659 " > < / string > <nl> + <nl> < string id = " 660 " > Voimakkuuden vahvistus < / string > <nl> < string id = " 661 " > Valitse vientikansio < / string > <nl> < string id = " 662 " > Valittu tiedosto ei ole enää saatavilla < / string > <nl> <nl> < string id = " 701 " > Poistetaan vanhat kappaleet kirjastosta < / string > <nl> < string id = " 702 " > Tämä polku on luettu aiemmin < / string > <nl> < string id = " 705 " > Verkko < / string > <nl> - < string id = " 706 " > HTTP - välipalvelin < / string > <nl> - < string id = " 707 " > HTTP - välipalvelimen portti < / string > <nl> + < string id = " 706 " > - Palvelin < / string > <nl> + <nl> < string id = " 708 " > Internet yhteys HTTP - välipalvelimen kautta < / string > <nl> - < string id = " 709 " > HTTP - välipalvelimen käyttäjätunnus < / string > <nl> - < string id = " 710 " > HTTP - välipalvelimen salasana < / string > <nl> + <nl> < string id = " 711 " > Internet - asetukset < / string > <nl> < string id = " 712 " > Portti ei kelpaa . Arvon on oltava 1 ja 65535 välillä . < / string > <nl> < string id = " 713 " > HTTP - välipalvelin < / string > <nl> - < string id = " 714 " > < / string > <nl> + <nl> < string id = " 715 " > Verkko - osoitteen määritys < / string > <nl> < string id = " 716 " > Automaattinen ( DHCP ) < / string > <nl> < string id = " 717 " > Manuaalinen ( Kiinteä ) < / string > <nl> < string id = " 718 " > Oletusasetukset ( Dashboard ) < / string > <nl> - < string id = " 719 " > IP - osoite < / string > <nl> - < string id = " 720 " > Aliverkon peite < / string > <nl> - < string id = " 721 " > Oletusyhdyskäytävä < / string > <nl> - < string id = " 722 " > Nimipalvelin ( DNS ) < / string > <nl> + < string id = " 719 " > - IP - osoite < / string > <nl> + < string id = " 720 " > - Aliverkon peite < / string > <nl> + < string id = " 721 " > - Oletusyhdyskäytävä < / string > <nl> + < string id = " 722 " > - Nimipalvelin ( DNS ) < / string > <nl> < string id = " 723 " > Tallenna ja uudelleenkäynnistä < / string > <nl> < string id = " 724 " > Osoite on oltava muodossa AAA . BBB . CCC . DDD < / string > <nl> < string id = " 725 " > ja numerot väliltä 0 - 255 . < / string > <nl> <nl> < string id = " 749 " > Peilikuva < / string > <nl> < string id = " 750 " > Oletko varma asiasta ? < / string > <nl> < string id = " 751 " > Poistetaan sijainti < / string > <nl> - < string id = " 752 " > < / string > <nl> - < string id = " 753 " > < / string > <nl> + <nl> < string id = " 754 " > Lisää linkki ohjelmaan < / string > <nl> < string id = " 755 " > Muokkaa ohjelman polkua < / string > <nl> < string id = " 756 " > Muokkaa ohjelman nimeä < / string > <nl> < string id = " 757 " > Muokkaa polun syvyyttä < / string > <nl> - < string id = " 758 " > < / string > <nl> + <nl> < string id = " 759 " > Suuri luettelo < / string > <nl> < string id = " 760 " > Keltainen < / string > <nl> < string id = " 761 " > Valkoinen < / string > <nl> <nl> < string id = " 788 " > Verkkoliitäntä poistettu käytöstä . < / string > <nl> < string id = " 789 " > Langattoman verkon nimi ( ESSID ) < / string > <nl> <nl> - < string id = " 790 " > Ohjaustapahtumat < / string > <nl> < string id = " 791 " > Tämän järjestelmän ohjelmat voi ohjata XBMC : aa < / string > <nl> < string id = " 792 " > Portti < / string > <nl> < string id = " 793 " > Porttialue < / string > <nl> <nl> < string id = " 851 " > Sallittu portin alue on 1 - 65535 < / string > <nl> < string id = " 852 " > Sallittu portin alue on 1024 - 65535 < / string > <nl> <nl> - < string id = " 998 " > < / string > <nl> - < string id = " 999 " > < / string > <nl> < string id = " 1000 " > Näytönsäästäjän esikatselu < / string > <nl> < string id = " 1001 " > Ei saada yhteyttä < / string > <nl> < string id = " 1002 " > XBMC ei saanut yhteyttä verkko - osoitteeseen . < / string > <nl> < string id = " 1003 " > Saattaa olla , ettei verkkoyhteyttä ole . < / string > <nl> < string id = " 1004 " > Haluatko silti lisätä sijainnin ? < / string > <nl> - < string id = " 1005 " > < / string > <nl> + <nl> < string id = " 1006 " > IP - osoite < / string > <nl> < string id = " 1007 " > Lisää verkko - osoite < / string > <nl> < string id = " 1008 " > Protokolla < / string > <nl> <nl> < string id = " 1045 " > Lisäosan asetukset < / string > <nl> < string id = " 1046 " > Yhteyspisteet < / string > <nl> < string id = " 1047 " > Jokin muu . . . < / string > <nl> - < string id = " 1048 " > - Käyttäjätunnus < / string > <nl> + < string id = " 1048 " > - Käyttäjätunnus < / string > <nl> < string id = " 1049 " > Skripti - asetukset < / string > <nl> < string id = " 1050 " > Singlet < / string > <nl> <nl> <nl> < string id = " 1250 " > Tunnistus < / string > <nl> < string id = " 1251 " > Järjestelmän automaattitunnistus < / string > <nl> < string id = " 1252 " > Lempinimi < / string > <nl> - < string id = " 1253 " > < / string > <nl> + <nl> < string id = " 1254 " > Kysy ennen yhdistämistä < / string > <nl> < string id = " 1255 " > Lähetä FTP - käyttäjätunnus ja salasana < / string > <nl> < string id = " 1256 " > Pingauksen väli < / string > <nl> <nl> <nl> < string id = " 2050 " > Kesto < / string > <nl> <nl> - < string id = " 2100 " > Skripti virhe ! : % s < / string > <nl> + < string id = " 2100 " > Skripti virhe ! : % s < / string > <nl> <nl> < string id = " 4501 " > LCD / VFD käytössä < / string > <nl> <nl> <nl> < string id = " 10018 " > Asetukset - Verkko < / string > <nl> < string id = " 10019 " > Asetukset - Käyttöliittymä < / string > <nl> < string id = " 10020 " > Skriptit < / string > <nl> - < string id = " 10021 " > < / string > <nl> - < string id = " 10022 " > < / string > <nl> - < string id = " 10023 " > < / string > <nl> - < string id = " 10025 " > < / string > <nl> + <nl> < string id = " 10028 " > Videot / Soittolista < / string > <nl> < string id = " 10034 " > Asetukset - Profiilit < / string > <nl> <nl> <nl> < string id = " 10509 " > Verkkopelaaminen < / string > <nl> < string id = " 10510 " > Lisäosat < / string > <nl> < string id = " 10511 " > Järjestelmän tiedot < / string > <nl> - < string id = " 10512 " > < / string > <nl> - < string id = " 10513 " > < / string > <nl> - < string id = " 10514 " > < / string > <nl> - < string id = " 10515 " > < / string > <nl> + <nl> < string id = " 10516 " > Musiikki - Kirjasto < / string > <nl> < string id = " 10517 " > Musiikki - Nyt toistetaan < / string > <nl> - < string id = " 10518 " > < / string > <nl> - < string id = " 10519 " > < / string > <nl> - < string id = " 10520 " > < / string > <nl> - < string id = " 10521 " > < / string > <nl> + <nl> < string id = " 10522 " > Videot - Nyt toistetaan < / string > <nl> < string id = " 10523 " > Albumin tiedot < / string > <nl> < string id = " 10524 " > Elokuvan tiedot < / string > <nl> <nl> < string id = " 12004 " > Skriptit / Tiedot < / string > <nl> < string id = " 12005 " > Kokoruudun video < / string > <nl> < string id = " 12006 " > Äänen visualisointi < / string > <nl> - < string id = " 12007 " > < / string > <nl> + <nl> < string id = " 12008 " > Tiedostojen pinoamis - dialogi < / string > <nl> < string id = " 12009 " > Luo indeksi uudestaan . . . < / string > <nl> < string id = " 12010 " > Palaa Musiikki - valikkoon < / string > <nl> <nl> < string id = " 12021 " > Aloita alusta < / string > <nl> < string id = " 12022 " > Jatka % s : sta < / string > <nl> < string id = " 12023 " > Tarkastetaan nykyisiä trainereita . . . < / string > <nl> - < string id = " 12024 " > Luo pienoiskuvakkeet videoille , joilla ei ole kansikuvaa < / string > <nl> <nl> < string id = " 12310 " > 0 < / string > <nl> < string id = " 12311 " > 1 < / string > <nl> <nl> < string id = " 12384 " > 24 - tuntinen kello < / string > <nl> < string id = " 12385 " > Päivä / Kuukausi < / string > <nl> < string id = " 12386 " > Kuukausi / Päivä < / string > <nl> - < string id = " 12387 " > < / string > <nl> + <nl> < string id = " 12390 " > Järjestelmän käyntiaika < / string > <nl> < string id = " 12391 " > minuuttia < / string > <nl> < string id = " 12392 " > tuntia < / string > <nl> <nl> < string id = " 13009 " > Poistu < / string > <nl> < string id = " 13010 " > Horrostila < / string > <nl> < string id = " 13011 " > Valmiustila < / string > <nl> - < string id = " 13012 " > Lopeta < / string > <nl> - < string id = " 13013 " > Käynnistä uudelleen < / string > <nl> + < string id = " 13012 " > Lopeta < / string > <nl> + < string id = " 13013 " > Käynnistä uudelleen < / string > <nl> < string id = " 13014 " > Pienennä < / string > <nl> - < string id = " 13015 " > Virtakytkimen toiminto < / string > <nl> + < string id = " 13015 " > Virtakytkimen toiminto < / string > <nl> <nl> < string id = " 13020 " > Onko toinen istunto käynnissä , ehkä ssh : n kautta ? < / string > <nl> < string id = " 13021 " > Liitetty siirrettävä kiintolevy < / string > <nl> <nl> < string id = " 13025 " > Peliohjain irroitettu < / string > <nl> <nl> < string id = " 13100 " > Näytön välkynnän poisto < / string > <nl> - < string id = " 13101 " > Ajuri valitsee ( vaatii uudelleenkäynnistyksen ) < / string > <nl> + < string id = " 13101 " > Ajuri valitsee ( vaatii uudelleenkäynnistyksen ) < / string > <nl> < string id = " 13105 " > Pystytahdistus < / string > <nl> < string id = " 13106 " > Ei käytössä < / string > <nl> < string id = " 13107 " > Käytössä videoita toistettaessa < / string > <nl> <nl> < string id = " 13404 " > Jatka < / string > <nl> < string id = " 13405 " > Pienoiskuvake < / string > <nl> < string id = " 13406 " > Kuvan tiedot < / string > <nl> - < string id = " 13407 " > % s : in esiasetukset < / string > <nl> + < string id = " 13407 " > % s : in esiasetukset < / string > <nl> < string id = " 13408 " > ( IMDb - käyttäjien arvosana ) < / string > <nl> - < string id = " 13409 " > 250 suosituinta < / string > <nl> - < string id = " 13410 " > Aloita Last . fm : n kuuntelu < / string > <nl> + < string id = " 13409 " > 250 suosituinta < / string > <nl> + < string id = " 13410 " > Aloita Last . fm : n kuuntelu < / string > <nl> < string id = " 13411 " > Tuulettimen vähimmäisnopeus < / string > <nl> < string id = " 13412 " > RSS - syötteet sisältävät oikealta vasemmalle - tekstiä < / string > <nl> < string id = " 13413 " > Ladataan < / string > <nl> <nl> < string id = " 14074 " > Aseta aikavyöhyke < / string > <nl> < string id = " 14075 " > Kesäaika käytössä < / string > <nl> < string id = " 14076 " > Lisää suosikkeihin < / string > <nl> - < string id = " 14077 " > Poista suosikeista < / string > <nl> + < string id = " 14077 " > Poista suosikeista < / string > <nl> < string id = " 14078 " > Väritys < / string > <nl> < string id = " 14079 " > Aikavyöhyke maa < / string > <nl> < string id = " 14080 " > Aikavyöhyke < / string > <nl> - < string id = " 14081 " > Tiedostolistat < / string > <nl> + < string id = " 14081 " > Tiedostolistat < / string > <nl> < string id = " 14082 " > Näytä kuvan EXIF - tiedot < / string > <nl> < string id = " 14083 " > Käytä kokoruudun ikkunaa ennemmin kuin täyttä ruutua < / string > <nl> < string id = " 14084 " > Lisää valitut kappaleet toistojonoon < / string > <nl> <nl> < string id = " 14094 " > Syöttölaitteet < / string > <nl> < string id = " 14095 " > Tehonsäästö < / string > <nl> <nl> - < string id = " 15000 " > < / string > <nl> - < string id = " 15001 " > < / string > <nl> - < string id = " 15002 " > < / string > <nl> - < string id = " 15003 " > < / string > <nl> - < string id = " 15004 " > < / string > <nl> - < string id = " 15005 " > < / string > <nl> - < string id = " 15006 " > < / string > <nl> - < string id = " 15007 " > < / string > <nl> - < string id = " 15008 " > < / string > <nl> - < string id = " 15009 " > < / string > <nl> - < string id = " 15010 " > < / string > <nl> - < string id = " 15011 " > < / string > <nl> - < string id = " 15012 " > < / string > <nl> - < string id = " 15013 " > < / string > <nl> - < string id = " 15014 " > < / string > <nl> < string id = " 15015 " > Poista < / string > <nl> < string id = " 15016 " > Pelit < / string > <nl> - < string id = " 15017 " > < / string > <nl> - < string id = " 15018 " > < / string > <nl> + <nl> < string id = " 15019 " > Lisää < / string > <nl> - < string id = " 15020 " > < / string > <nl> - < string id = " 15021 " > < / string > <nl> - < string id = " 15022 " > < / string > <nl> - < string id = " 15023 " > < / string > <nl> - < string id = " 15024 " > < / string > <nl> - < string id = " 15025 " > < / string > <nl> - < string id = " 15026 " > < / string > <nl> - < string id = " 15027 " > < / string > <nl> - < string id = " 15028 " > < / string > <nl> - < string id = " 15029 " > < / string > <nl> - < string id = " 15030 " > < / string > <nl> - < string id = " 15031 " > < / string > <nl> - < string id = " 15032 " > < / string > <nl> - < string id = " 15033 " > < / string > <nl> - < string id = " 15034 " > < / string > <nl> - < string id = " 15035 " > < / string > <nl> - < string id = " 15036 " > < / string > <nl> - < string id = " 15037 " > < / string > <nl> - < string id = " 15038 " > < / string > <nl> - < string id = " 15039 " > < / string > <nl> - < string id = " 15040 " > < / string > <nl> - < string id = " 15041 " > < / string > <nl> - < string id = " 15042 " > < / string > <nl> - < string id = " 15043 " > < / string > <nl> - < string id = " 15044 " > < / string > <nl> - < string id = " 15045 " > < / string > <nl> - < string id = " 15046 " > < / string > <nl> - < string id = " 15047 " > < / string > <nl> - < string id = " 15048 " > < / string > <nl> - < string id = " 15049 " > < / string > <nl> - < string id = " 15050 " > < / string > <nl> - < string id = " 15051 " > < / string > <nl> + <nl> < string id = " 15052 " > Salasana < / string > <nl> - < string id = " 15053 " > < / string > <nl> - < string id = " 15054 " > < / string > <nl> - < string id = " 15055 " > < / string > <nl> - < string id = " 15056 " > < / string > <nl> - < string id = " 15057 " > < / string > <nl> - < string id = " 15058 " > < / string > <nl> - < string id = " 15059 " > < / string > <nl> - < string id = " 15060 " > < / string > <nl> - < string id = " 15061 " > < / string > <nl> + <nl> < string id = " 15100 " > Kirjasto < / string > <nl> < string id = " 15101 " > Tietokanta < / string > <nl> < string id = " 15102 " > * Kaikki albumit < / string > <nl> < string id = " 15103 " > * Kaikki esittäjät < / string > <nl> < string id = " 15104 " > * Kaikki kappaleet < / string > <nl> < string id = " 15105 " > * Kaikki lajityypit < / string > <nl> - < string id = " 15106 " > Muista valittu tiedosto selattaessa eteenpäin < / string > <nl> + <nl> < string id = " 15107 " > Siirretään puskurimuistiin . . . < / string > <nl> < string id = " 15108 " > Valikkoäänet < / string > <nl> < string id = " 15109 " > Oletus < / string > <nl> <nl> < string id = " 16306 " > Sinc8 < / string > <nl> <nl> < string id = " 16307 " > Bicubic ( ohjelmallinen ) < / string > <nl> - < string id = " 16308 " > Lanczos ( ohjelmallinen ) < / string > <nl> + < string id = " 16308 " > Lanczos ( ohjelmallinen ) < / string > <nl> < string id = " 16309 " > Sinc ( ohjelmallinen ) < / string > <nl> <nl> < string id = " 16310 " > ( VDPAU ) Temporal < / string > <nl> <nl> < string id = " 20002 " > Ulkoinen DVD - toisto - ohjelma < / string > <nl> < string id = " 20003 " > Trainer - kansio < / string > <nl> < string id = " 20004 " > Kuvakaappauskansio < / string > <nl> - < string id = " 20005 " > Nauhoituskansio < / string > <nl> + <nl> < string id = " 20006 " > Soittolistakansio < / string > <nl> < string id = " 20007 " > Nauhoitukset < / string > <nl> < string id = " 20008 " > Kuvakaappaukset < / string > <nl> < string id = " 20009 " > Käytä XBMC : ia < / string > <nl> - < string id = " 20010 " > < / string > <nl> + <nl> < string id = " 20011 " > Musiikkisoittolistat < / string > <nl> < string id = " 20012 " > Videosoittolistat < / string > <nl> < string id = " 20013 " > Käynnistetäänkö peli ? < / string > <nl> <nl> < string id = " 20017 " > Paikallinen pienoiskuvake < / string > <nl> < string id = " 20018 " > Ei pienoiskuvaketta < / string > <nl> < string id = " 20019 " > Valitse kuvaketiedosto < / string > <nl> - < string id = " 20020 " > < / string > <nl> - < string id = " 20021 " > < / string > <nl> + <nl> < ! - - string id 20022 will always be set to an empty string ( LocalizeStrings . cpp ) - - > <nl> < string id = " 20022 " > < / string > <nl> < string id = " 20023 " > Ei onnistu < / string > <nl> <nl> < string id = " 20053 " > Poistuit pääkäyttäjätilaan < / string > <nl> < string id = " 20054 " > Siirryit pääkäyttäjätilasta < / string > <nl> < string id = " 20055 " > Allmusic . com - pienoiskuvake < / string > <nl> - < string id = " 20056 " > < / string > <nl> + <nl> < string id = " 20057 " > Poista pienoiskuvake < / string > <nl> < string id = " 20058 " > Lisää uusi profiili . . . < / string > <nl> < string id = " 20059 " > Nouda albumien tiedot < / string > <nl> <nl> < string id = " 20190 " > Mukautus < / string > <nl> < string id = " 20191 " > Suorituslokin tallennus ( debug log ) < / string > <nl> < string id = " 20192 " > Nouda lisätiedot kirjastoa päivitettäessä < / string > <nl> - < string id = " 20193 " > Nouda esittäjän tiedot kirjastoon lisättäessä < / string > <nl> + <nl> < string id = " 20194 " > Tietojen hakupaikka < / string > <nl> < string id = " 20195 " > Vaihda hakupaikkaa < / string > <nl> < string id = " 20196 " > Vie musiikkikirjasto < / string > <nl> <nl> < string id = " 20302 " > Kansioon kirjoitus epäonnistui : < / string > <nl> < string id = " 20303 " > Ohitetaanko ja jatketaanko suoritusta ? < / string > <nl> < string id = " 20304 " > RSS - syöte < / string > <nl> - < string id = " 20305 " > < / string > <nl> <nl> < string id = " 20306 " > Ei voida tunnistaa < / string > <nl> < string id = " 20307 " > Toissijainen DNS < / string > <nl> <nl> < string id = " 20326 " > Tämä palauttaa asettamasi % s kalibroinnin arvot < / string > <nl> < string id = " 20327 " > oletusarvoiksi < / string > <nl> < string id = " 20328 " > Valitse kohdesijainti < / string > <nl> - < string id = " 20329 " > < / string > <nl> + <nl> < string id = " 20330 " > Hae kansionimiä < / string > <nl> < string id = " 20331 " > Tiedostonimet < / string > <nl> < string id = " 20332 " > Käytetäänkö haettaessa tiedosto - vai kansionimiä ? < / string > <nl> <nl> < string id = " 20373 " > Tuotantokausi < / string > <nl> < string id = " 20374 " > Noudetaan elokuvan tiedot < / string > <nl> < string id = " 20375 " > Määrittelemätön sisältö < / string > <nl> - < string id = " 20376 " > < / string > <nl> + <nl> < string id = " 20377 " > Päivitä TV - ohjelman tiedot < / string > <nl> < string id = " 20378 " > Päivitetäänkö kaikkien jaksojen tiedot ? < / string > <nl> < string id = " 20379 " > Valittu kansio sisältää yhden TV - ohjelman < / string > <nl> <nl> < string id = " 20401 " > Toista musiikkivideo < / string > <nl> < string id = " 20402 " > Lataa ohjaajan pienoiskuvakkeet kirjastoon lisättäessä < / string > <nl> < string id = " 20403 " > Aseta ohjaajan pienoiskuvake < / string > <nl> - < string id = " 20404 " > < / string > <nl> + <nl> < string id = " 20405 " > Poista jakson kirjanmerkki < / string > <nl> < string id = " 20406 " > Aseta jakson kirjanmerkki < / string > <nl> < string id = " 20407 " > Hakupaikan asetukset < / string > <nl> <nl> < string id = " 20416 " > Ensimmäinen esitys < / string > <nl> < string id = " 20417 " > Käsikirjoittaja < / string > <nl> < string id = " 20418 " > Siisti tiedostojen ja kansioiden nimet < / string > <nl> - < string id = " 20419 " > Pinoa TV - ohjelmien kaksoiskappaleet < / string > <nl> + <nl> < string id = " 20420 " > Ei koskaan < / string > <nl> < string id = " 20421 " > Jos samalta tuotantokaudelta < / string > <nl> < string id = " 20422 " > Aina < / string > <nl> < string id = " 20423 " > On esittely < / string > <nl> < string id = " 20424 " > Ei ole < / string > <nl> - < string id = " 20425 " > Fanikuvien kuvaesitys < / string > <nl> + < string id = " 20425 " > Fanikuvien kuvaesitys < / string > <nl> < string id = " 20426 " > Tallennetaanko yhdistettynä tiedostona vai erillisinä < / string > <nl> < string id = " 20427 " > tiedostoina ? Valitse tallennusmuoto . < / string > <nl> < string id = " 20428 " > Yhdistettynä < / string > <nl> <nl> < string id = " 20434 " > Setit < / string > <nl> < string id = " 20435 " > Aseta elokuvasetin pienoiskuvake < / string > <nl> < string id = " 20436 " > Vie ohjaajan pienoiskuvakkeet < / string > <nl> - < string id = " 20437 " > Valitse fanitaide < / string > <nl> + < string id = " 20437 " > Valitse fanitaide < / string > <nl> < string id = " 20438 " > Paikallinen fanitaide < / string > <nl> < string id = " 20439 " > Ei fanitaidetta < / string > <nl> < string id = " 20440 " > Nykyinen fanitaide < / string > <nl> < string id = " 20441 " > Poista fanitaide < / string > <nl> - < string id = " 20442 " > Vaihda sisältö < / string > <nl> - < string id = " 20443 " > Haluatko päivittää kaikkien kohteiden < / string > <nl> - < string id = " 20444 " > tiedot tästä polusta ? < / string > <nl> + < string id = " 20442 " > Vaihda sisältö < / string > <nl> + < string id = " 20443 " > Haluatko päivittää kaikkien kohteiden < / string > <nl> + < string id = " 20444 " > tiedot tästä polusta ? < / string > <nl> < string id = " 20445 " > Fanitaide < / string > <nl> < ! - - up to 21329 is reserved for the video db ! ! ! - - > <nl> <nl> <nl> < string id = " 21337 " > TuxBox - laite < / string > <nl> < ! - - up to 21355 is reserved for the TuxBox Client ! ! ! - - > <nl> <nl> - < string id = " 21356 " > UPnP - musiikki < / string > <nl> - < string id = " 21357 " > UPnP - video < / string > <nl> - < string id = " 21358 " > UPnP - kuvat < / string > <nl> < string id = " 21359 " > Lisää sijainti . . . < / string > <nl> < string id = " 21360 " > Jaa video - ja musiikkikirjastot UPnP : n kautta < / string > <nl> - < string id = " 21361 " > Muokkaa UPnP - musiikin sijaintia < / string > <nl> - < string id = " 21362 " > Muokkaa UPnP - videoiden sijaintia < / string > <nl> - < string id = " 21363 " > Muokkaa UPnP - kuvien sijaintia < / string > <nl> + <nl> < string id = " 21364 " > Muokkaa sijaintia < / string > <nl> < string id = " 21365 " > Poista sijainti < / string > <nl> - < string id = " 21366 " > Tekstitysten hakemisto < / string > <nl> + < string id = " 21366 " > Tekstityskansio < / string > <nl> < string id = " 21367 " > Elokuvien ja vaihtoehtoisten tekstitysten hakemisto < / string > <nl> <nl> < string id = " 21369 " > Hiiri käytössä < / string > <nl> <nl> < string id = " 21382 " > Näytä " Lisää sijainti " - painike tiedostolistoissa < / string > <nl> < string id = " 21383 " > Vierityspalkit käytössä < / string > <nl> < string id = " 21384 " > Tee katsotut suodatus vaihdettaessa videokirjastoon < / string > <nl> - < string id = " 21385 " > Avaa < / string > <nl> + < string id = " 21385 " > Avaa < / string > <nl> < string id = " 21386 " > Äänekkyyden tason säätö < / string > <nl> < string id = " 21387 " > Nopea < / string > <nl> < string id = " 21388 " > Hiljainen < / string > <nl> <nl> < string id = " 21393 " > Korkea valmiustila < / string > <nl> < string id = " 21394 " > Matala valmiustila < / string > <nl> < string id = " 21395 " > Yli 4GB : n tiedostoja ei voida välimuistittaa < / string > <nl> - < string id = " 21396 " > Luku < / string > <nl> + < string id = " 21396 " > Luku < / string > <nl> < string id = " 21397 " > Pixel Shader V2 ( korkea laatu ) < / string > <nl> < string id = " 21398 " > Käynnistyessä soittolista käytössä < / string > <nl> < string id = " 21399 " > Tween - animointi käytössä < / string > <nl> <nl> < string id = " 21835 " > Valon lähde < / string > <nl> < string id = " 21836 " > Mittaustapa < / string > <nl> < string id = " 21837 " > ISO - arvo < / string > <nl> - < string id = " 21838 " > Digitaalinen zoomaus < / string > <nl> + < string id = " 21838 " > Digitaalinen zoom < / string > <nl> < string id = " 21839 " > Kuvakennon leveys < / string > <nl> < string id = " 21840 " > GPS - leveysaste < / string > <nl> < string id = " 21841 " > GPS - pituusaste < / string > <nl> <nl> < string id = " 22000 " > Päivitä kirjasto ohjelman käynnistyessä < / string > <nl> < string id = " 22001 " > Suorita päivitys taustalla < / string > <nl> < string id = " 22002 " > DNS - loppupääte < / string > <nl> - < string id = " 22003 " > EDl - leikkaustietoluettelot käytössä < / string > <nl> + <nl> < string id = " 22004 " > Viivästetty : % 2 . 3fs < / string > <nl> < string id = " 22005 " > Aiennettu : % 2 . 3fs < / string > <nl> < string id = " 22006 " > Tekstityksen ajoitus < / string > <nl> <nl> < string id = " 22010 " > GPU : n lämpötila : < / string > <nl> < string id = " 22011 " > CPU : n lämpötila : < / string > <nl> < string id = " 22012 " > Kokonaismuisti < / string > <nl> - < string id = " 22013 " > Profiilin tiedot < / string > <nl> + < string id = " 22013 " > Profiilin tiedot < / string > <nl> < string id = " 22014 " > Himmennä jos videon toisto keskeytetään < / string > <nl> < string id = " 22015 " > Kaikki nauhoitukset < / string > <nl> < string id = " 22016 " > Nimet < / string > <nl> <nl> < string id = " 22018 " > Lähetyskanavat < / string > <nl> < string id = " 22019 " > Levytykset nimen mukaan < / string > <nl> < string id = " 22020 " > Ohjelmaopas < / string > <nl> - < string id = " 22021 " > Suurin kuvasuhteen virhe maksimi kuva - alan säädössä < / string > <nl> + < string id = " 22021 " > Sallittu kuvasuhdevirhe maksimi kuva - alan säädössä < / string > <nl> < string id = " 22022 " > Näytä videotiedostot listauksessa < / string > <nl> < string id = " 22023 " > DirectX valmistaja : < / string > <nl> < string id = " 22024 " > Direct3D versio : < / string > <nl> <nl> <nl> < string id = " 23050 " > Teksti - tv käytössä < / string > <nl> < string id = " 23051 " > Osa % i < / string > <nl> - < string id = " 23052 " > Puskuroidaan % i tavua < / string > <nl> + < string id = " 23052 " > Puskuroidaan % i tavua < / string > <nl> <nl> < ! - - strings 23100 thru 23150 reserved for external player - - > <nl> < string id = " 23100 " > Ulkoinen toisto - ohjelma käytössä < / string > <nl> < string id = " 23101 " > Paina OK sulkeaksesi toisto - ohjelman < / string > <nl> - < string id = " 23102 " > < / string > <nl> - < string id = " 23103 " > < / string > <nl> + <nl> < string id = " 23104 " > Paina OK kun toisto on loppunut < / string > <nl> - < string id = " 23105 " > < / string > <nl> - < string id = " 23106 " > < / string > <nl> <nl> - < ! - - strings 29800 thru 29998 reserved strings used only in the default Project Mayhem III skin and not c + + code - - > <nl> - < string id = " 29800 " > Kirjasto - tila < / string > <nl> + < ! - - strings 29800 thru 29998 reserved strings used only in the default Project Mayhem III skin and not c + + code - - > <nl> + < string id = " 29800 " > Kirjasto - tila < / string > <nl> < string id = " 29801 " > QWERTY - näppäimistö < / string > <nl> < string id = " 29802 " > Läpisyötetty ääni käytössä < / string > <nl> <nl> <nl> < string id = " 33078 " > Seuraava sivu < / string > <nl> < string id = " 33079 " > Pitää < / string > <nl> < string id = " 33080 " > Inhoaa < / string > <nl> - < string id = " 33081 " > Tämä tiedosto on pinottu , valitse osa jonka haluat toistaa < / string > <nl> + < string id = " 33081 " > Tämä tiedosto on pinottu , valitse osa jonka haluat toistaa < / string > <nl> < string id = " 33082 " > Polku < / string > <nl> - < string id = " 33083 " > Skriptin omavalintainen painike käytössä < / string > <nl> + < string id = " 33083 " > Skriptin omavalintainen painike käytössä < / string > <nl> < / strings > <nl>
Finnish translation , based on English r25792 ( thanks to mikko70 )
xbmc/xbmc
e07aa3c2dca6480bbfb94d5069cca3f955d51558
2009-12-20T09:05:53Z
mmm a / include / osquery / dispatcher . h <nl> ppp b / include / osquery / dispatcher . h <nl> class InternalRunnable : public apache : : thrift : : concurrency : : Runnable { <nl> / / / Check if the thread ' s entrypoint ( run ) executed , meaning thread context <nl> / / / was allocated . <nl> bool hasRun ( ) { return run_ ; } <nl> - / / / Sleep in a boost : : thread interruptable state . <nl> - void interruptableSleep ( size_t milli ) ; <nl> <nl> protected : <nl> / / / Require the runnable thread define an entrypoint . <nl> class Dispatcher { <nl> * / <nl> Status add ( std : : shared_ptr < InternalRunnable > task ) ; <nl> <nl> + / / / See ` add ` , but services are not limited to a thread poll size . <nl> Status addService ( std : : shared_ptr < InternalRunnable > service ) ; <nl> <nl> / * * <nl> class Dispatcher { <nl> * / <nl> void join ( ) ; <nl> <nl> + / / / See ` join ` , but applied to osquery services . <nl> void joinServices ( ) ; <nl> <nl> + / / / Destroy and stop all osquery service threads and service objects . <nl> void removeServices ( ) ; <nl> <nl> / * * <nl> class Dispatcher { <nl> * @ see getThreadManager <nl> * / <nl> InternalThreadManagerRef thread_manager_ ; <nl> + / / / The set of shared osquery service threads . <nl> std : : vector < std : : shared_ptr < boost : : thread > > service_threads_ ; <nl> + / / / THe set of shared osquery services . <nl> std : : vector < std : : shared_ptr < InternalRunnable > > services_ ; <nl> } ; <nl> + <nl> + / / / Sleep in a boost : : thread interruptable state . <nl> + void interruptableSleep ( size_t milli ) ; <nl> } <nl> mmm a / include / osquery / events . h <nl> ppp b / include / osquery / events . h <nl> <nl> # include < boost / thread / mutex . hpp > <nl> <nl> # include < osquery / database . h > <nl> + # include < osquery / dispatcher . h > <nl> # include < osquery / registry . h > <nl> # include < osquery / status . h > <nl> # include < osquery / tables . h > <nl> mmm a / osquery / dispatcher / dispatcher . cpp <nl> ppp b / osquery / dispatcher / dispatcher . cpp <nl> DEFINE_osquery_flag ( int32 , <nl> 4 , <nl> " Number of work dispatch threads " ) ; <nl> <nl> - void InternalRunnable : : interruptableSleep ( size_t milli ) { <nl> + void interruptableSleep ( size_t milli ) { <nl> boost : : this_thread : : sleep ( boost : : posix_time : : milliseconds ( milli ) ) ; <nl> } <nl> <nl> mmm a / osquery / events / darwin / iokit_hid . cpp <nl> ppp b / osquery / events / darwin / iokit_hid . cpp <nl> Status IOKitHIDEventPublisher : : run ( ) { <nl> CFRunLoopRun ( ) ; <nl> <nl> / / Add artificial latency to run loop . <nl> - : : sleep ( 1 ) ; <nl> + osquery : : interruptableSleep ( 1000 ) ; <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> <nl> mmm a / osquery / events / darwin / scnetwork . cpp <nl> ppp b / osquery / events / darwin / scnetwork . cpp <nl> Status SCNetworkEventPublisher : : run ( ) { <nl> CFRunLoopRun ( ) ; <nl> <nl> / / Do not expect the run loop to exit often , if so , add artificial latency . <nl> - : : sleep ( 1 ) ; <nl> + osquery : : interruptableSleep ( 1000 ) ; <nl> return Status ( 0 , " OK " ) ; <nl> } <nl> } ; <nl> mmm a / osquery / events / events . cpp <nl> ppp b / osquery / events / events . cpp <nl> <nl> # include < boost / lexical_cast . hpp > <nl> <nl> # include < osquery / core . h > <nl> - # include < osquery / dispatcher . h > <nl> # include < osquery / events . h > <nl> # include < osquery / flags . h > <nl> # include < osquery / logger . h > <nl> <nl> <nl> namespace osquery { <nl> <nl> + / / / Helper cooloff ( ms ) macro to prevent thread failure thrashing . <nl> + # define EVENTS_COOLOFF 20 <nl> + <nl> DEFINE_osquery_flag ( bool , <nl> disable_events , <nl> false , <nl> Status EventFactory : : run ( EventPublisherID & type_id ) { <nl> while ( ! publisher - > isEnding ( ) & & status . ok ( ) ) { <nl> / / Can optionally implement a global cooloff latency here . <nl> status = publisher - > run ( ) ; <nl> - : : usleep ( 20 ) ; <nl> + osquery : : interruptableSleep ( EVENTS_COOLOFF ) ; <nl> } <nl> <nl> / / The runloop status is not reflective of the event type ' s . <nl> mmm a / osquery / events / linux / inotify . cpp <nl> ppp b / osquery / events / linux / inotify . cpp <nl> <nl> <nl> namespace osquery { <nl> <nl> - int kINotifyULatency = 200 ; <nl> + int kINotifyMLatency = 200 ; <nl> + <nl> static const uint32_t BUFFER_SIZE = <nl> ( 10 * ( ( sizeof ( struct inotify_event ) ) + NAME_MAX + 1 ) ) ; <nl> <nl> Status INotifyEventPublisher : : run ( ) { <nl> FD_ZERO ( & set ) ; <nl> FD_SET ( getHandle ( ) , & set ) ; <nl> <nl> - struct timeval timeout = { 0 , kINotifyULatency } ; <nl> + struct timeval timeout = { 0 , kINotifyMLatency } ; <nl> int selector = : : select ( getHandle ( ) + 1 , & set , nullptr , nullptr , & timeout ) ; <nl> if ( selector = = - 1 ) { <nl> LOG ( ERROR ) < < " Could not read inotify handle " ; <nl> Status INotifyEventPublisher : : run ( ) { <nl> p + = ( sizeof ( struct inotify_event ) ) + event - > len ; <nl> } <nl> <nl> - : : usleep ( kINotifyULatency ) ; <nl> + osquery : : interruptableSleep ( kINotifyMLatency ) ; <nl> return Status ( 0 , " Continue " ) ; <nl> } <nl> <nl> mmm a / osquery / events / linux / udev . cpp <nl> ppp b / osquery / events / linux / udev . cpp <nl> <nl> <nl> namespace osquery { <nl> <nl> - int kUdevULatency = 200 ; <nl> + int kUdevMLatency = 200 ; <nl> <nl> REGISTER ( UdevEventPublisher , " event_publisher " , " udev " ) ; <nl> <nl> Status UdevEventPublisher : : run ( ) { <nl> <nl> udev_device_unref ( device ) ; <nl> <nl> - : : usleep ( kUdevULatency ) ; <nl> + osquery : : interruptableSleep ( kUdevMLatency ) ; <nl> return Status ( 0 , " Continue " ) ; <nl> } <nl> <nl>
Merge pull request from theopolis / pubs_as_runnables
osquery/osquery
7f7b2acd371dd128b53d49e50ee21d4680aa7a6d
2015-02-10T21:06:16Z
mmm a / ports / realsense2 / CONTROL <nl> ppp b / ports / realsense2 / CONTROL <nl> <nl> Source : realsense2 <nl> - Version : 2 . 33 . 1 - 1 <nl> + Version : 2 . 34 . 0 <nl> Homepage : https : / / github . com / IntelRealSense / librealsense <nl> Description : Intel ® RealSense ™ SDK 2 . 0 is a cross - platform library for Intel ® RealSense ™ depth cameras ( D400 series and the SR300 ) . <nl> <nl> deleted file mode 100644 <nl> index 4a9543ed00a . . 00000000000 <nl> mmm a / ports / realsense2 / fix - tools - compile - on - vs2019 . patch <nl> ppp / dev / null <nl> <nl> - diff - - git a / common / rs - config . cpp b / common / rs - config . cpp <nl> - index ff3a757 . . c9efa1a 100644 <nl> mmm - a / common / rs - config . cpp <nl> - ppp b / common / rs - config . cpp <nl> - config_file : : config_file ( std : : string filename ) <nl> - auto j = json : : parse ( str ) ; <nl> - for ( json : : iterator it = j . begin ( ) ; it ! = j . end ( ) ; + + it ) <nl> - { <nl> - - _values [ it . key ( ) ] = it . value ( ) ; <nl> - + _values [ it . key ( ) ] = it . value ( ) . get < std : : string > ( ) ; <nl> - } <nl> - } <nl> - catch ( . . . ) <nl> mmm a / ports / realsense2 / portfile . cmake <nl> ppp b / ports / realsense2 / portfile . cmake <nl> <nl> vcpkg_from_github ( <nl> OUT_SOURCE_PATH SOURCE_PATH <nl> REPO IntelRealSense / librealsense <nl> - REF 842ee1e1e5c4bb96d63582a7fde061dbc1bebf69 # v2 . 33 . 1 <nl> - SHA512 70f6f9c2f1c5925532b2ff22779579b3610a7f616d66ac92e8e85c6f30df334bf8fb125355a0706bacef0be8370acc62bb7623f3f200326e71fe53e07726fa6a <nl> + REF 025fccf76803ee6a6e60de9f18ac6193b7ff8597 # v2 . 34 . 0 <nl> + SHA512 c502fba6b3dbb34b0ac0094deef9ffce330faf435bbc7612148fd8ba3d5b380f7990604a67236e7da815c8e6988ae58c17fa597571a2462f75c8f5000007cc0a <nl> HEAD_REF master <nl> PATCHES <nl> fix_openni2 . patch <nl> fix - dependency - glfw3 . patch <nl> - fix - tools - compile - on - vs2019 . patch <nl> ) <nl> <nl> file ( COPY $ { SOURCE_PATH } / src / win7 / drivers / IntelRealSense_D400_series_win7 . inf DESTINATION $ { SOURCE_PATH } ) <nl>
[ realsense2 ] Update to 2 . 34 . 0 ( )
microsoft/vcpkg
cb388456a29e3c1ab434e00adce6e121d3aa86b9
2020-05-19T22:15:33Z
mmm a / src / btree / node . cc <nl> ppp b / src / btree / node . cc <nl> void node_handler : : merge ( block_size_t block_size , btree_node * node , btree_node * <nl> } <nl> } <nl> <nl> - void node_handler : : remove ( block_size_t block_size , btree_node * node , btree_key * key ) { <nl> - if ( node_handler : : is_leaf ( node ) ) { <nl> - leaf_node_handler : : remove ( block_size , leaf_node_handler : : leaf_node ( node ) , key ) ; <nl> - } else { <nl> - internal_node_handler : : remove ( block_size , internal_node_handler : : internal_node ( node ) , key ) ; <nl> - } <nl> - } <nl> - <nl> bool node_handler : : level ( block_size_t block_size , btree_node * node , btree_node * rnode , btree_key * key_to_replace , btree_key * replacement_key , btree_node * parent ) { <nl> if ( node_handler : : is_leaf ( node ) ) { <nl> return leaf_node_handler : : level ( block_size , leaf_node_handler : : leaf_node ( node ) , leaf_node_handler : : leaf_node ( rnode ) , key_to_replace , replacement_key ) ; <nl> mmm a / src / btree / node . hpp <nl> ppp b / src / btree / node . hpp <nl> class node_handler { <nl> static bool is_mergable ( block_size_t block_size , const btree_node * node , const btree_node * sibling , const btree_node * parent ) ; <nl> static int nodecmp ( const btree_node * node1 , const btree_node * node2 ) ; <nl> static void merge ( block_size_t block_size , btree_node * node , btree_node * rnode , btree_key * key_to_remove , btree_node * parent ) ; <nl> - static void remove ( block_size_t block_size , btree_node * node , btree_key * key ) ; <nl> static bool level ( block_size_t block_size , btree_node * node , btree_node * rnode , btree_key * key_to_replace , btree_key * replacement_key , btree_node * parent ) ; <nl> <nl> static void print ( const btree_node * node ) ; <nl>
Removed node_handler : : remove . Leaf node timestamp tracking is in a working state .
rethinkdb/rethinkdb
2df61031899efb42ab55b50a14f0753de4684a14
2010-12-07T22:10:55Z
new file mode 100644 <nl> index 0000000000 . . 83b71052ee <nl> mmm / dev / null <nl> ppp b / code / dynamic_programming / matrix_chain_multiplication / MatrixChainMultiplication . java <nl> <nl> + / / Dynamic Programming Python implementation of Matrix <nl> + / / Chain Multiplication . <nl> + / / See the Cormen book for details of the following algorithm <nl> + class MatrixChainMultiplication <nl> + { <nl> + / / Matrix Ai has dimension p [ i - 1 ] x p [ i ] for i = 1 . . n <nl> + static int MatrixChainOrder ( int p [ ] , int n ) <nl> + { <nl> + / * For simplicity of the program , one extra row and one <nl> + extra column are allocated in m [ ] [ ] . 0th row and 0th <nl> + column of m [ ] [ ] are not used * / <nl> + int m [ ] [ ] = new int [ n ] [ n ] ; <nl> + <nl> + int i , j , k , L , q ; <nl> + <nl> + / * m [ i , j ] = Minimum number of scalar multiplications needed <nl> + to compute the matrix A [ i ] A [ i + 1 ] . . . A [ j ] = A [ i . . j ] where <nl> + dimension of A [ i ] is p [ i - 1 ] x p [ i ] * / <nl> + <nl> + / / cost is zero when multiplying one matrix . <nl> + for ( i = 1 ; i < n ; i + + ) <nl> + m [ i ] [ i ] = 0 ; <nl> + <nl> + / / L is chain length . <nl> + for ( L = 2 ; L < n ; L + + ) <nl> + { <nl> + for ( i = 1 ; i < n - L + 1 ; i + + ) <nl> + { <nl> + j = i + L - 1 ; <nl> + if ( j = = n ) continue ; <nl> + m [ i ] [ j ] = Integer . MAX_VALUE ; <nl> + for ( k = i ; k < = j - 1 ; k + + ) <nl> + { <nl> + / / q = cost / scalar multiplications <nl> + q = m [ i ] [ k ] + m [ k + 1 ] [ j ] + p [ i - 1 ] * p [ k ] * p [ j ] ; <nl> + if ( q < m [ i ] [ j ] ) <nl> + m [ i ] [ j ] = q ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + return m [ 1 ] [ n - 1 ] ; <nl> + } <nl> + <nl> + / / Driver program to test above function <nl> + public static void main ( String args [ ] ) <nl> + { <nl> + int arr [ ] = new int [ ] { 1 , 2 , 3 , 4 } ; <nl> + int size = arr . length ; <nl> + <nl> + System . out . println ( " Minimum number of multiplications is " + <nl> + MatrixChainOrder ( arr , size ) ) ; <nl> + } <nl> + } <nl> \ No newline at end of file <nl>
Matrix Chain Multiplication in Java
OpenGenus/cosmos
91550e123b9d2e7c7d4a19bf3e798cfd97f45c1b
2017-10-14T15:42:25Z
mmm a / test / backward_compatibility / check_backward_compatibility . py <nl> ppp b / test / backward_compatibility / check_backward_compatibility . py <nl> <nl> ( ' aten : : gt ' , datetime . date ( 2020 , 6 , 30 ) ) , <nl> ( ' aten : : le ' , datetime . date ( 2020 , 6 , 30 ) ) , <nl> ( ' aten : : ge ' , datetime . date ( 2020 , 6 , 30 ) ) , <nl> + ( ' aten : : pow ' , datetime . date ( 2020 , 6 , 30 ) ) , <nl> ] <nl> <nl> + <nl> # The nightly will fail to parse newly added syntax to schema declarations <nl> # Add new schemas that will fail the nightly here <nl> dont_parse_list = [ <nl> mmm a / torch / csrc / jit / runtime / register_prim_ops . cpp <nl> ppp b / torch / csrc / jit / runtime / register_prim_ops . cpp <nl> RegisterOperators reg ( <nl> static_cast < double > ( pow ( a , b ) ) , <nl> static_cast < double > ( pow ( a , b ) ) , <nl> float ) , <nl> - <nl> - DEFINE_BINARY_OP ( aten : : pow , pow ( a , b ) ) , <nl> + DEFINE_SCALAR_BINARY_OP ( <nl> + aten : : pow . Scalar , <nl> + static_cast < double > ( pow ( a , b ) ) , <nl> + static_cast < double > ( pow ( a , b ) ) , <nl> + Scalar ) , <nl> + DEFINE_INT_OP ( aten : : pow . int_to_int , pow ( a , b ) ) , <nl> / / min and max are in prim : : because there is a difference between <nl> / / the python builtin ' min ' and ' torch . min ' <nl> DEFINE_BINARY_OP ( prim : : min , a < b ? a : b ) , <nl>
remove duplicated op schema for aten : : pow ( )
pytorch/pytorch
8177637374bd57d007dc18c73f3d0a6eb1b2d289
2020-06-07T23:17:34Z
mmm a / Code / Sandbox / Plugins / EditorCommon / EditorFramework / EditorWidget . cpp <nl> ppp b / Code / Sandbox / Plugins / EditorCommon / EditorFramework / EditorWidget . cpp <nl> bool CEditorWidget : : event ( QEvent * pEvent ) <nl> / / If we found a match for the shortcut , then trigger the action and mark the event as handled <nl> if ( ite ! = m_actions . cend ( ) ) <nl> { <nl> - QCommandAction * pCommandAction = qobject_cast < QCommandAction * > ( ite - > second ) ; <nl> - pCommandAction - > trigger ( ) ; <nl> - pEvent - > setAccepted ( true ) ; <nl> + / / Catch the action we want to execute on the following keypress and signal that we will handle the shortcut override <nl> + m_pShortcutOverride = qobject_cast < QCommandAction * > ( ite - > second ) ; <nl> + pEvent - > accept ( ) ; <nl> return true ; <nl> } <nl> } <nl> bool CEditorWidget : : event ( QEvent * pEvent ) <nl> return QWidget : : event ( pEvent ) ; <nl> } <nl> <nl> + void CEditorWidget : : keyPressEvent ( QKeyEvent * pEvent ) <nl> + { <nl> + if ( m_pShortcutOverride ) <nl> + { <nl> + / / Reset the shortcut override action <nl> + QCommandAction * pShortcutOverrideAction = m_pShortcutOverride ; <nl> + m_pShortcutOverride = nullptr ; <nl> + <nl> + / / If there ' s an incoming shortcut override key press , double check that the key sequence matches the shortcut action <nl> + QKeySequence keySequence ( pEvent - > key ( ) | pEvent - > modifiers ( ) ) ; <nl> + if ( pShortcutOverrideAction - > shortcut ( ) = = keySequence ) <nl> + { <nl> + pShortcutOverrideAction - > trigger ( ) ; <nl> + pEvent - > accept ( ) ; <nl> + return ; <nl> + } <nl> + } <nl> + <nl> + QWidget : : keyPressEvent ( pEvent ) ; <nl> + } <nl> + <nl> void CEditorWidget : : customEvent ( QEvent * pEvent ) <nl> { <nl> if ( pEvent - > type ( ) = = SandboxEvent : : Command ) <nl> mmm a / Code / Sandbox / Plugins / EditorCommon / EditorFramework / EditorWidget . h <nl> ppp b / Code / Sandbox / Plugins / EditorCommon / EditorFramework / EditorWidget . h <nl> class EDITOR_COMMON_API CEditorWidget : public QWidget <nl> CEditorWidget ( QWidget * pParent = nullptr ) <nl> : QWidget ( pParent ) <nl> , m_widgetActionRegistry ( * this ) <nl> + , m_pShortcutOverride ( nullptr ) <nl> { } <nl> <nl> const std : : vector < CCommand * > & GetCommands ( ) const { return m_commands ; } <nl> class EDITOR_COMMON_API CEditorWidget : public QWidget <nl> protected : <nl> virtual bool event ( QEvent * pEvent ) override ; <nl> virtual void customEvent ( QEvent * pEvent ) override ; <nl> + virtual void keyPressEvent ( QKeyEvent * pEvent ) override ; <nl> <nl> / / ! Registers an action on the widget . <nl> / / ! / param actionId id of the action to register . <nl> class EDITOR_COMMON_API CEditorWidget : public QWidget <nl> CWidgetActionRegistry m_widgetActionRegistry ; <nl> std : : vector < CCommand * > m_commands ; <nl> StringMap < QCommandAction * > m_actions ; <nl> + QCommandAction * m_pShortcutOverride ; <nl> } ; <nl>
! XI Integrating CL 2019510 from main
CRYTEK/CRYENGINE
6ab3b9c5f49e19a8d53aaf857907a23769a1f44c
2019-10-25T09:48:46Z
mmm a / docs / StandardLibraryProgrammersManual . md <nl> ppp b / docs / StandardLibraryProgrammersManual . md <nl> The standard library cannot import the Darwin module ( much less an ICU module ) , <nl> <nl> # # # # ` _FixedArray ` <nl> <nl> - The standard library has internal fixed size arrays of some limited sizes . This provides fast random access into contiguous ( usually stack - allocated ) memory . These are metaprogrammed based on size , so if you need a new size not currently defined , add it to the ` sizes ` gyb variable . See [ FixedArray . swift . gyb ] ( https : / / github . com / apple / swift / blob / master / stdlib / public / core / FixedArray . swift . gyb ) for implementation . <nl> + The standard library has internal array of fixed size 16 . This provides fast random access into contiguous ( usually stack - allocated ) memory . See [ FixedArray . swift ] ( https : / / github . com / apple / swift / blob / master / stdlib / public / core / FixedArray . swift ) for implementation . <nl> <nl> # # # # Thread Local Storage <nl> <nl>
Fixes info about internal _FixedArray16 in StandardLibraryProgrammersManual . md
apple/swift
97c9b71f9db40b0fdb6dc85dd55fe301a648a5d4
2020-04-20T10:47:01Z
mmm a / Jenkinsfile <nl> ppp b / Jenkinsfile <nl> void build_taichi ( ) { <nl> $ CC - - version <nl> $ CXX - - version <nl> echo $ WORKSPACE <nl> - $ PYTHON_EXECUTABLE - m pip install twine numpy Pillow scipy pybind11 colorama setuptools astor matplotlib pytest autograd GitPython - - user <nl> + $ PYTHON_EXECUTABLE - m pip install twine numpy Pillow scipy pybind11 colorama setuptools astor matplotlib pytest autograd GitPython dill - - user <nl> export TAICHI_REPO_DIR = $ WORKSPACE / <nl> echo $ TAICHI_REPO_DIR <nl> export PYTHONPATH = $ TAICHI_REPO_DIR / python <nl> mmm a / docs / dev_install . rst <nl> ppp b / docs / dev_install . rst <nl> Installing Dependencies <nl> <nl> . . code - block : : bash <nl> <nl> - python3 - m pip install - - user setuptools astpretty astor pybind11 Pillow <nl> + python3 - m pip install - - user setuptools astpretty astor pybind11 Pillow dill <nl> python3 - m pip install - - user pytest pytest - rerunfailures pytest - xdist yapf <nl> python3 - m pip install - - user numpy GitPython coverage colorama autograd <nl> <nl> mmm a / examples / mpm128 . py <nl> ppp b / examples / mpm128 . py <nl> def reset ( ) : <nl> gravity [ None ] = [ 0 , - 1 ] <nl> <nl> for frame in range ( 20000 ) : <nl> - while gui . get_event ( ti . GUI . PRESS ) : <nl> + if gui . get_event ( ti . GUI . PRESS ) : <nl> if gui . event . key = = ' r ' : reset ( ) <nl> - elif gui . event . key in [ ti . GUI . ESCAPE , ti . GUI . EXIT ] : exit ( 0 ) <nl> + elif gui . event . key in [ ti . GUI . ESCAPE , ti . GUI . EXIT ] : break <nl> if gui . event is not None : gravity [ None ] = [ 0 , 0 ] # if had any event <nl> if gui . is_pressed ( ti . GUI . LEFT , ' a ' ) : gravity [ None ] [ 0 ] = - 1 <nl> if gui . is_pressed ( ti . GUI . RIGHT , ' d ' ) : gravity [ None ] [ 0 ] = 1 <nl> mmm a / examples / stable_fluid . py <nl> ppp b / examples / stable_fluid . py <nl> def main ( ) : <nl> md_gen = MouseDataGen ( ) <nl> paused = False <nl> while True : <nl> - while gui . get_event ( ti . GUI . PRESS ) : <nl> + if gui . get_event ( ti . GUI . PRESS ) : <nl> e = gui . event <nl> if e . key = = ti . GUI . ESCAPE : <nl> - exit ( 0 ) <nl> + break <nl> elif e . key = = ' r ' : <nl> paused = False <nl> reset ( ) <nl> mmm a / misc / ci_setup . py <nl> ppp b / misc / ci_setup . py <nl> def run ( self ) : <nl> " distro " , <nl> " autograd " , <nl> " astor " , <nl> + " dill " , <nl> " pytest " , <nl> " pytest - xdist " , <nl> " pytest - rerunfailures " , <nl> mmm a / misc / prtags . json <nl> ppp b / misc / prtags . json <nl> <nl> " mac " : " Mac OS X " , <nl> " windows " : " Windows " , <nl> " perf " : " Performance improvements " , <nl> + " ipython " : " IPython and other shells " , <nl> " release " : " Release " <nl> } <nl> mmm a / python / taichi / lang / ast_checker . py <nl> ppp b / python / taichi / lang / ast_checker . py <nl> <nl> import ast <nl> + from . shell import oinspect <nl> <nl> <nl> class KernelSimplicityASTChecker ( ast . NodeVisitor ) : <nl> def __exit__ ( self , exc_type , exc_val , exc_tb ) : <nl> def __init__ ( self , func ) : <nl> super ( ) . __init__ ( ) <nl> import inspect <nl> - self . _func_file = inspect . getsourcefile ( func ) <nl> - self . _func_lineno = inspect . getsourcelines ( func ) [ 1 ] <nl> + self . _func_file = oinspect . getsourcefile ( func ) <nl> + self . _func_lineno = oinspect . getsourcelines ( func ) [ 1 ] <nl> self . _func_name = func . __name__ <nl> self . _scope_guards = [ ] <nl> <nl> mmm a / python / taichi / lang / kernel . py <nl> ppp b / python / taichi / lang / kernel . py <nl> <nl> import ast <nl> from . kernel_arguments import * <nl> from . util import * <nl> + from . shell import oinspect <nl> import functools <nl> <nl> <nl> def __call__ ( self , * args ) : <nl> <nl> def do_compile ( self ) : <nl> from . impl import get_runtime <nl> - src = remove_indent ( inspect . getsource ( self . func ) ) <nl> + src = remove_indent ( oinspect . getsource ( self . func ) ) <nl> tree = ast . parse ( src ) <nl> <nl> func_body = tree . body [ 0 ] <nl> def do_compile ( self ) : <nl> print ( ' After preprocessing : ' ) <nl> print ( astor . to_source ( tree . body [ 0 ] , indent_with = ' ' ) ) <nl> <nl> - ast . increment_lineno ( tree , inspect . getsourcelines ( self . func ) [ 1 ] - 1 ) <nl> + ast . increment_lineno ( tree , oinspect . getsourcelines ( self . func ) [ 1 ] - 1 ) <nl> <nl> local_vars = { } <nl> # frame = inspect . currentframe ( ) . f_back <nl> def do_compile ( self ) : <nl> global_vars = copy . copy ( self . func . __globals__ ) <nl> exec ( <nl> compile ( tree , <nl> - filename = inspect . getsourcefile ( self . func ) , <nl> + filename = oinspect . getsourcefile ( self . func ) , <nl> mode = ' exec ' ) , global_vars , local_vars ) <nl> self . compiled = local_vars [ self . func . __name__ ] <nl> <nl> def materialize ( self , key = None , args = None , arg_features = None ) : <nl> import taichi as ti <nl> ti . trace ( " Compiling kernel { } . . . " . format ( kernel_name ) ) <nl> <nl> - src = remove_indent ( inspect . getsource ( self . func ) ) <nl> + src = remove_indent ( oinspect . getsource ( self . func ) ) <nl> tree = ast . parse ( src ) <nl> if self . runtime . print_preprocessed : <nl> import astor <nl> def materialize ( self , key = None , args = None , arg_features = None ) : <nl> print ( ' After preprocessing : ' ) <nl> print ( astor . to_source ( tree . body [ 0 ] , indent_with = ' ' ) ) <nl> <nl> - ast . increment_lineno ( tree , inspect . getsourcelines ( self . func ) [ 1 ] - 1 ) <nl> + ast . increment_lineno ( tree , oinspect . getsourcelines ( self . func ) [ 1 ] - 1 ) <nl> <nl> freevar_names = self . func . __code__ . co_freevars <nl> closure = self . func . __closure__ <nl> def materialize ( self , key = None , args = None , arg_features = None ) : <nl> <nl> exec ( <nl> compile ( tree , <nl> - filename = inspect . getsourcefile ( self . func ) , <nl> + filename = oinspect . getsourcefile ( self . func ) , <nl> mode = ' exec ' ) , global_vars , local_vars ) <nl> compiled = local_vars [ self . func . __name__ ] <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 0d4258df88b <nl> mmm / dev / null <nl> ppp b / python / taichi / lang / shell . py <nl> <nl> + import sys , os <nl> + <nl> + <nl> + class ShellType : <nl> + NATIVE = ' Python shell ' <nl> + IPYTHON = ' IPython TerminalInteractiveShell ' <nl> + JUPYTER = ' IPython ZMQInteractiveShell ' <nl> + IPYBASED = ' IPython Based Shell ' <nl> + SCRIPT = None <nl> + <nl> + <nl> + def get_shell_name ( ) : <nl> + " " " <nl> + Detect which type of shell is using . <nl> + Can be IPython , IDLE , Python native , or none . <nl> + " " " <nl> + shell = os . environ . get ( ' TI_SHELL_TYPE ' ) <nl> + if shell is not None : <nl> + return getattr ( ShellType , shell . upper ( ) ) <nl> + <nl> + try : <nl> + import __main__ as main <nl> + if hasattr ( main , ' __file__ ' ) : # Called from a script ? <nl> + return ShellType . SCRIPT <nl> + except : <nl> + pass <nl> + <nl> + # Let ' s detect which type of interactive shell is being used . <nl> + # As you can see , huge engineering efforts are done here just to <nl> + # make IDLE and IPython happy . Hope our users really love them : ) <nl> + <nl> + try : # IPython / Jupyter ? <nl> + return ' IPython ' + get_ipython ( ) . __class__ . __name__ <nl> + except : <nl> + # Note that we can ' t simply do ` ' IPython ' in sys . modules ` , <nl> + # since it seems ` torch ` will import IPython on it ' s own too . . <nl> + if hasattr ( __builtins__ , ' __IPYTHON__ ' ) : <nl> + return ShellType . IPYBASED <nl> + <nl> + try : <nl> + if getattr ( sys , ' ps1 ' , sys . flags . interactive ) : <nl> + return ShellType . NATIVE <nl> + except : <nl> + pass <nl> + <nl> + return ShellType . SCRIPT <nl> + <nl> + <nl> + class ShellInspectorWrapper : <nl> + " " " <nl> + Wrapper of the ` inspect ` module . When interactive shell detected , <nl> + we will redirect getsource ( ) calls to the corresponding inspector <nl> + provided by / suitable for each type of shell . <nl> + " " " <nl> + def __init__ ( self ) : <nl> + self . name = get_shell_name ( ) <nl> + <nl> + if self . name is not None : <nl> + print ( ' [ Taichi ] Interactive shell detected : ' , self . name ) <nl> + <nl> + if self . name is None : <nl> + # ` inspect ` for " Python script " <nl> + import inspect <nl> + self . getsource = inspect . getsource <nl> + self . getsourcelines = inspect . getsourcelines <nl> + self . getsourcefile = inspect . getsourcefile <nl> + <nl> + elif self . name = = ShellType . NATIVE : <nl> + # ` dill . source ` for " Python native shell " <nl> + import dill <nl> + self . getsource = dill . source . getsource <nl> + self . getsourcelines = dill . source . getsourcelines <nl> + self . getsourcefile = dill . source . getsourcefile <nl> + <nl> + elif self . name . startswith ( ' IPython ' ) : <nl> + # ` IPython . core . oinspect ` for " IPython advanced shell " <nl> + def getsource ( o ) : <nl> + import IPython <nl> + return IPython . core . oinspect . getsource ( o ) <nl> + <nl> + def getsourcelines ( o ) : <nl> + import IPython <nl> + lineno = IPython . core . oinspect . find_source_lines ( o ) <nl> + lines = IPython . core . oinspect . getsource ( o ) . split ( ' \ n ' ) <nl> + return lines , lineno <nl> + <nl> + def getsourcefile ( o ) : <nl> + return ' < IPython > ' <nl> + <nl> + self . getsource = getsource <nl> + self . getsourcelines = getsourcelines <nl> + self . getsourcefile = getsourcefile <nl> + <nl> + else : <nl> + raise RuntimeError ( f ' Shell type " { self . name } " not supported ' ) <nl> + <nl> + <nl> + oinspect = ShellInspectorWrapper ( ) <nl> mmm a / python / taichi / main . py <nl> ppp b / python / taichi / main . py <nl> def __init__ ( self , debug : bool = False , test_mode : bool = False ) : <nl> parser . add_argument ( ' command ' , <nl> help = " command from the above list to run " ) <nl> <nl> - # Print help if no command provided <nl> - if len ( sys . argv [ 1 : 2 ] ) = = 0 : <nl> - parser . print_help ( ) <nl> - exit ( 1 ) <nl> - <nl> # Flag for unit testing <nl> self . test_mode = test_mode <nl> <nl> def __init__ ( self , debug : bool = False , test_mode : bool = False ) : <nl> <nl> @ timer <nl> def __call__ ( self ) : <nl> + # Print help if no command provided <nl> + if len ( sys . argv [ 1 : 2 ] ) = = 0 : <nl> + self . main_parser . print_help ( ) <nl> + return 1 <nl> + <nl> # Parse the command <nl> args = self . main_parser . parse_args ( sys . argv [ 1 : 2 ] ) <nl> <nl> def __call__ ( self ) : <nl> TaichiMain . _exec_python_file ( args . command ) <nl> print ( f " { args . command } is not a valid command ! " ) <nl> self . main_parser . print_help ( ) <nl> - exit ( 1 ) <nl> + return 1 <nl> <nl> return getattr ( self , args . command ) ( sys . argv [ 2 : ] ) <nl> <nl> def main_debug ( ) : <nl> <nl> <nl> if __name__ = = " __main__ " : <nl> - exit ( main ( ) ) <nl> + sys . exit ( main ( ) ) <nl> mmm a / setup . py <nl> ppp b / setup . py <nl> <nl> ' colorama ' , <nl> ' setuptools ' , <nl> ' astor ' , <nl> + ' dill ' , <nl> # For testing : <nl> ' pytest ' , <nl> ' pytest - xdist ' , <nl> mmm a / tests / python / test_cli . py <nl> ppp b / tests / python / test_cli . py <nl> def patch_sys_argv_helper ( custom_argv : list ) : <nl> <nl> def test_cli_exit_one_with_no_command_provided ( ) : <nl> with patch_sys_argv_helper ( [ " ti " ] ) : <nl> - with pytest . raises ( SystemExit ) as pytest_wrapped_err : <nl> - cli = TaichiMain ( test_mode = True ) <nl> - cli ( ) <nl> - assert pytest_wrapped_err . type = = SystemExit <nl> - assert pytest_wrapped_err . value . code = = 1 <nl> + cli = TaichiMain ( test_mode = True ) <nl> + assert cli ( ) = = 1 <nl> <nl> <nl> def test_cli_exit_one_with_bogus_command_provided ( ) : <nl> with patch_sys_argv_helper ( [ " ti " , " bogus - command - not - registered - yet " ] ) : <nl> - with pytest . raises ( SystemExit ) as pytest_wrapped_err : <nl> - cli = TaichiMain ( test_mode = True ) <nl> - cli ( ) <nl> - assert pytest_wrapped_err . type = = SystemExit <nl> - assert pytest_wrapped_err . value . code = = 1 <nl> + cli = TaichiMain ( test_mode = True ) <nl> + assert cli ( ) = = 1 <nl> <nl> <nl> def test_cli_can_dispatch_commands_to_methods_correctly ( ) : <nl>
[ IPython ] Source inspection dispatcher for better IDLE compatibility ( )
taichi-dev/taichi
37976e750e1579ff9328f3d25f1a63cb3b3292e2
2020-06-24T07:00:42Z
mmm a / tensorflow / compiler / xla / service / heap_simulator . cc <nl> ppp b / tensorflow / compiler / xla / service / heap_simulator . cc <nl> GlobalDecreasingSizeBestFitHeap : : FindChunkCandidate ( <nl> offset = std : : max ( offset , RoundUpToNearest ( chunk . chunk_end ( ) , alignment_ ) ) ; <nl> } <nl> use_free_chunk_if_smaller ( offset , result_ . heap_size - offset ) ; <nl> + / / When preferred offset is provided and the preferred offset is larger than <nl> + / / the current heap size , simply use the preferred offset provided . <nl> + if ( result_ . heap_size < = preferred_offset ) { <nl> + chunk_candidate . heap_size = preferred_offset + buffer_interval . size ; <nl> + min_fit_chunk = { preferred_offset , buffer_interval . size } ; <nl> + } <nl> <nl> if ( min_fit_chunk . offset = = - 1 ) { <nl> / / Increase the heap size to fit in the last free chunk . <nl> mmm a / tensorflow / compiler / xla / service / memory_space_assignment . cc <nl> ppp b / tensorflow / compiler / xla / service / memory_space_assignment . cc <nl> namespace xla { <nl> <nl> namespace { <nl> / / Define a dummy chunk for chunks that will be allocated in the default memory <nl> - / / space . <nl> - const HeapSimulator : : Chunk kDefaultMemorySpaceDummyChunk { - 1 , - 1 } ; <nl> + / / space and for keeping track of number of asynchronous copies . <nl> + const HeapSimulator : : Chunk kDummyChunk { - 1 , - 1 } ; <nl> } / / namespace <nl> <nl> std : : vector < const GlobalDecreasingSizeBestFitHeap : : BufferInterval * > <nl> HeapSimulator : : Result AlternateMemoryBestFitHeap : : Finish ( ) { <nl> <nl> MemorySpaceAssignment : : AllocationSequence * allocation_sequence = <nl> & ( * allocation_map_ ) [ & buffer ] ; <nl> - if ( keep_in_default_memory ) { <nl> - continue ; <nl> - } <nl> <nl> / / At this point , none of the colocated buffers contain any phi buffers . <nl> for ( const BufferInterval * colocated_interval : colocated_intervals ) { <nl> + if ( keep_in_default_memory ) { <nl> + break ; <nl> + } <nl> const HloValue * value = colocated_interval - > buffer ; <nl> int64 definition_time = <nl> instruction_schedule_ - > at ( value - > defining_instruction ( ) ) ; <nl> HeapSimulator : : Result AlternateMemoryBestFitHeap : : Finish ( ) { <nl> / / Skip allocating buffers for bitcast uses . The uses that feed from <nl> / / bitcasts will be handled specially . <nl> if ( use . instruction - > opcode ( ) ! = HloOpcode : : kBitcast ) { <nl> - FindAllocation ( definition_time , use_time , value - > defining_position ( ) , <nl> - use , value , colocated_interval - > size , <nl> - allocation_sequence ) ; <nl> + if ( ! FindAllocation ( definition_time , use_time , <nl> + value - > defining_position ( ) , use , value , <nl> + colocated_interval - > size , allocation_sequence ) ) { <nl> + / / If the allocation finding failed ( e . g . , due to running out of <nl> + / / asynchronous copies ) , then fall back to allocating the buffer <nl> + / / entirely in the default memory . <nl> + pending_chunks_ . clear ( ) ; <nl> + pending_async_copies_ . clear ( ) ; <nl> + allocation_sequence - > clear ( ) ; <nl> + keep_in_default_memory = true ; <nl> + break ; <nl> + } <nl> + <nl> / / If there are multiple uses , they can try using the memory <nl> / / allocation already at the alternate memory . <nl> definition_time = use_time ; <nl> } <nl> } <nl> } <nl> + <nl> + CommitPendingChunks ( ) ; <nl> } <nl> <nl> if ( VLOG_IS_ON ( 3 ) ) { <nl> HloInstruction * AlternateMemoryBestFitHeap : : GetInstructionAt ( int64 time ) const { <nl> return flattened_instruction_sequence_ - > instructions ( ) [ time ] ; <nl> } <nl> <nl> - void AlternateMemoryBestFitHeap : : FindAllocation ( <nl> + void AlternateMemoryBestFitHeap : : CommitPendingChunks ( ) { <nl> + for ( auto interval_and_chunk : pending_chunks_ ) { <nl> + VLOG ( 3 ) < < " Committing chunk : " < < interval_and_chunk . first . start < < " - " <nl> + < < interval_and_chunk . first . end < < " : [ " <nl> + < < interval_and_chunk . second . chunk . offset < < " , " <nl> + < < interval_and_chunk . second . chunk . size < < " ] " ; <nl> + CommitChunk ( interval_and_chunk . first , interval_and_chunk . second ) ; <nl> + } <nl> + pending_chunks_ . clear ( ) ; <nl> + / / Also add the pending async copies to the interval tree . <nl> + if ( max_outstanding_async_copies_ > = 0 ) { <nl> + for ( auto interval : pending_async_copies_ ) { <nl> + async_copy_interval_tree_ . Add ( interval . first , interval . second , <nl> + kDummyChunk ) ; <nl> + } <nl> + } <nl> + pending_async_copies_ . clear ( ) ; <nl> + } <nl> + <nl> + void AlternateMemoryBestFitHeap : : AddToPendingChunks ( <nl> + const BufferInterval & buffer_interval , <nl> + const ChunkCandidate & chunk_candidate ) { <nl> + pending_chunks_ . emplace_back ( buffer_interval , chunk_candidate ) ; <nl> + } <nl> + <nl> + bool AlternateMemoryBestFitHeap : : FindAllocation ( <nl> int64 start_time , int64 end_time , HloPosition defining_position , HloUse use , <nl> const HloValue * buffer , int64 size , <nl> MemorySpaceAssignment : : AllocationSequence * allocations ) { <nl> void AlternateMemoryBestFitHeap : : FindAllocation ( <nl> if ( TryAllocatingInAlternateMemoryNoCopy ( <nl> start_time , end_time , defining_position , use , alternate_mem_interval , <nl> non_bitcast_operand , allocations ) ) { <nl> - return ; <nl> + return true ; <nl> } <nl> <nl> MemorySpaceAssignment : : Allocation * prev_allocation = nullptr ; <nl> void AlternateMemoryBestFitHeap : : FindAllocation ( <nl> / / TODO ( berkin ) : For now evictions happen relative to the most recent <nl> / / allocation in the alternate memory . We can potentially start evictions <nl> / / earlier and end later . <nl> - HloInstruction * earliest_instruction = <nl> - GetInstructionAt ( prev_allocation - > start_time ( ) ) ; <nl> - HloInstruction * latest_instruction = <nl> - GetInstructionAt ( prev_allocation - > end_time ( ) ) ; <nl> - <nl> VLOG ( 3 ) < < " Evicting buffer at " < < prev_allocation - > chunk ( ) . offset < < " ( " <nl> < < prev_allocation - > start_time ( ) < < " , " <nl> < < prev_allocation - > end_time ( ) < < " ) " ; <nl> - VLOG ( 3 ) < < " Copy to default mem between instructions " <nl> - < < earliest_instruction - > ToString ( ) < < " - " <nl> - < < latest_instruction - > ToString ( ) ; <nl> - <nl> - / / The live range of this buffer is from the start time of the previous <nl> - / / buffer that was in the alternate memory so that a buffer is allocated <nl> - / / during the copy . <nl> - allocations - > push_back ( <nl> - absl : : make_unique < MemorySpaceAssignment : : CopyAllocation > ( <nl> - * prev_allocation , MemorySpace : : kDefault , <nl> - kDefaultMemorySpaceDummyChunk , prev_allocation - > start_time ( ) , <nl> - end_time , earliest_instruction , latest_instruction ) ) ; <nl> + <nl> + / / See if this interval would violate the asynchronous copy limit . <nl> + if ( ! ViolatesMaximumOutstandingAsyncCopies ( prev_allocation - > start_time ( ) , <nl> + prev_allocation - > end_time ( ) ) ) { <nl> + AddAsyncCopy ( * prev_allocation , MemorySpace : : kDefault , kDummyChunk , <nl> + prev_allocation - > start_time ( ) , prev_allocation - > end_time ( ) , <nl> + allocations ) ; <nl> + <nl> + } else { <nl> + VLOG ( 3 ) < < " This violates the maximum async copies . " ; <nl> + / / If the original interval violated the limit , try sub - intervals within <nl> + / / this interval . <nl> + bool eviction_scheduled = false ; <nl> + for ( int64 time = prev_allocation - > start_time ( ) ; <nl> + time < = prev_allocation - > end_time ( ) ; + + time ) { <nl> + VLOG ( 3 ) < < " Try evicting ( " < < time < < " , " < < time < < " ) " ; <nl> + if ( ! ViolatesMaximumOutstandingAsyncCopies ( time , time ) ) { <nl> + VLOG ( 3 ) < < " Eviction successful . " ; <nl> + AddAsyncCopy ( * prev_allocation , MemorySpace : : kDefault , kDummyChunk , <nl> + time , time , allocations ) ; <nl> + eviction_scheduled = true ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + if ( ! eviction_scheduled ) { <nl> + / / If the eviction couldn ' t be scheduled , then fail . This buffer will be <nl> + / / kept in the default memory . <nl> + VLOG ( 3 ) < < " Bailing : Could not evict " < < use . ToString ( ) <nl> + < < " because we hit the limit of maximum asynchronous copies " <nl> + < < " between " <nl> + < < GetInstructionAt ( prev_allocation - > start_time ( ) ) - > ToString ( ) <nl> + < < " and " <nl> + < < GetInstructionAt ( prev_allocation - > end_time ( ) ) - > ToString ( ) ; <nl> + return false ; <nl> + } <nl> + } <nl> } else if ( prev_allocation ! = nullptr & & <nl> prev_allocation - > memory_space ( ) = = MemorySpace : : kDefault & & <nl> prev_allocation - > instruction ( ) = = non_bitcast_operand ) { <nl> void AlternateMemoryBestFitHeap : : FindAllocation ( <nl> } else { <nl> allocations - > push_back ( absl : : make_unique < MemorySpaceAssignment : : Allocation > ( <nl> non_bitcast_operand , defining_position , MemorySpace : : kDefault , <nl> - kDefaultMemorySpaceDummyChunk , start_time , end_time ) ) ; <nl> + kDummyChunk , start_time , end_time ) ) ; <nl> } <nl> <nl> / / Try partially placing the buffer in the alternate space . The time that is <nl> void AlternateMemoryBestFitHeap : : FindAllocation ( <nl> VLOG ( 4 ) < < " Trying alternate memory allocation ( " <nl> < < alternate_mem_interval . start < < " , " <nl> < < alternate_mem_interval . end < < " ) " ; <nl> + / / If this additional asynchronous copy would violate the limit , try a <nl> + / / different interval . <nl> + if ( ViolatesMaximumOutstandingAsyncCopies ( alternate_mem_interval . start , <nl> + alternate_mem_interval . end ) ) { <nl> + VLOG ( 4 ) < < " This would violate the outstanding async copy limit . " ; <nl> + continue ; <nl> + } <nl> ChunkCandidate chunk_candidate = FindChunkCandidate ( alternate_mem_interval ) ; <nl> / / Check if the new heap size fits within limits . <nl> if ( chunk_candidate . heap_size < max_size_in_bytes_ ) { <nl> - HloInstruction * earliest_instruction = <nl> - GetInstructionAt ( alternate_mem_interval . start ) ; <nl> VLOG ( 3 ) < < " Move the buffer to alternate memory at " <nl> < < alternate_mem_interval . start <nl> < < " . Offset = " < < chunk_candidate . chunk . offset <nl> < < " , size = " < < chunk_candidate . chunk . size <nl> < < " , heap_size = " < < chunk_candidate . heap_size ; <nl> - VLOG ( 3 ) < < " Copy to alternate mem between instructions " <nl> - < < earliest_instruction - > ToString ( ) < < " - " <nl> - < < use . instruction - > ToString ( ) ; <nl> - CommitChunk ( alternate_mem_interval , chunk_candidate ) ; <nl> + AddToPendingChunks ( alternate_mem_interval , chunk_candidate ) ; <nl> + <nl> + AddAsyncCopy ( * allocations - > back ( ) . get ( ) , MemorySpace : : kAlternate , <nl> + chunk_candidate . chunk , alternate_mem_interval . start , <nl> + end_time , allocations ) ; <nl> <nl> - / / Since copies couldn ' t be removed , create an allocation in the <nl> - / / default memory space . <nl> - allocations - > push_back ( <nl> - absl : : make_unique < MemorySpaceAssignment : : CopyAllocation > ( <nl> - * allocations - > back ( ) . get ( ) , MemorySpace : : kAlternate , <nl> - chunk_candidate . chunk , alternate_mem_interval . start , end_time , <nl> - earliest_instruction , use . instruction ) ) ; <nl> allocations - > back ( ) - > AddUse ( use ) ; <nl> - return ; <nl> + return true ; <nl> } <nl> } <nl> <nl> / / If a copy wasn ' t inserted , then add this use to the latest allocation . <nl> allocations - > back ( ) - > AddUse ( use ) ; <nl> + return true ; <nl> + } <nl> + <nl> + void AlternateMemoryBestFitHeap : : AddAsyncCopy ( <nl> + const MemorySpaceAssignment : : Allocation & prev_allocation , <nl> + MemorySpace memory_space , Chunk chunk , int64 start_time , int64 end_time , <nl> + MemorySpaceAssignment : : AllocationSequence * allocations ) { <nl> + HloInstruction * earliest_instruction = GetInstructionAt ( start_time ) ; <nl> + HloInstruction * latest_instruction = GetInstructionAt ( end_time ) ; <nl> + <nl> + VLOG ( 3 ) < < " Copy to " <nl> + < < ( memory_space = = MemorySpaceAssignment : : MemorySpace : : kDefault <nl> + ? " default " <nl> + : " alternate " ) <nl> + < < " memory between instructions " < < earliest_instruction - > ToString ( ) <nl> + < < " - " < < latest_instruction - > ToString ( ) ; <nl> + <nl> + allocations - > push_back ( <nl> + absl : : make_unique < MemorySpaceAssignment : : CopyAllocation > ( <nl> + prev_allocation , memory_space , chunk , start_time , end_time , <nl> + earliest_instruction , latest_instruction ) ) ; <nl> + <nl> + / / Register the additional async copy with the interval tree to keep track of <nl> + / / the limit at any given time . <nl> + pending_async_copies_ . emplace_back ( start_time , end_time ) ; <nl> + } <nl> + <nl> + bool AlternateMemoryBestFitHeap : : ViolatesMaximumOutstandingAsyncCopies ( <nl> + int64 start_time , int64 end_time ) const { <nl> + if ( max_outstanding_async_copies_ < 0 ) { <nl> + return false ; <nl> + } <nl> + <nl> + / / Count both the asynchronous copies in the interval tree as well as the <nl> + / / pending asynchronous copies belonging to this buffer . <nl> + int64 num_async_copies = <nl> + async_copy_interval_tree_ . ChunksOverlappingInTime ( start_time , end_time ) <nl> + . size ( ) ; <nl> + <nl> + for ( auto interval : pending_async_copies_ ) { <nl> + if ( interval . second > start_time & & interval . first < end_time ) { <nl> + num_async_copies + + ; <nl> + } <nl> + } <nl> + / / Add one because we are checking if adding an additional asynchronous copy <nl> + / / would violate the limit . <nl> + return num_async_copies + 1 > max_outstanding_async_copies_ ; <nl> } <nl> <nl> bool AlternateMemoryBestFitHeap : : TryAllocatingInAlternateMemoryNoCopy ( <nl> bool AlternateMemoryBestFitHeap : : TryAllocatingInAlternateMemoryNoCopy ( <nl> < < chunk_candidate . chunk . offset <nl> < < " , size = " < < chunk_candidate . chunk . size <nl> < < " , heap_size = " < < chunk_candidate . heap_size ; <nl> - CommitChunk ( alternate_mem_interval , chunk_candidate ) ; <nl> + AddToPendingChunks ( alternate_mem_interval , chunk_candidate ) ; <nl> <nl> / / If there was a previous allocation , the buffer location is the <nl> / / same as the previous . Otherwise , it is the operand . <nl> bool AlternateMemoryBestFitHeap : : TryAllocatingInAlternateMemoryNoCopy ( <nl> return false ; <nl> } <nl> <nl> + / * static * / int64 MemorySpaceAssignment : : CountMaximumOutstandingAsyncCopies ( <nl> + const HloModule & module ) { <nl> + int64 max_copies = 0 ; <nl> + int64 current_copies = 0 ; <nl> + for ( HloInstruction * instruction : <nl> + module . schedule ( ) . sequence ( module . entry_computation ( ) ) . instructions ( ) ) { <nl> + if ( instruction - > opcode ( ) = = HloOpcode : : kCopyStart ) { <nl> + current_copies + + ; <nl> + } else if ( instruction - > opcode ( ) = = HloOpcode : : kCopyDone ) { <nl> + current_copies - - ; <nl> + } <nl> + max_copies = std : : max ( max_copies , current_copies ) ; <nl> + } <nl> + return max_copies ; <nl> + } <nl> + <nl> / * static * / StatusOr < std : : unique_ptr < PresetAssignments > > <nl> MemorySpaceAssignment : : Run ( <nl> HloModule * module , int64 alternate_memory_space , int64 max_size_in_bytes , <nl> MemorySpaceAssignment : : Run ( <nl> int64 alternate_memory_space_alignment_in_bytes , <nl> BufferValue : : SizeFunction size_fn , <nl> AlternateMemoryBestFitHeap : : IsAllowedInAlternateMemoryFunction <nl> - is_allowed_in_alternate_mem ) { <nl> + is_allowed_in_alternate_mem , <nl> + int64 max_outstanding_async_copies ) { <nl> CHECK ( module - > has_schedule ( ) ) ; <nl> VLOG ( 4 ) < < " Module before memory space assignment : " ; <nl> XLA_VLOG_LINES ( 4 , module - > ToString ( ) ) ; <nl> MemorySpaceAssignment : : Run ( <nl> min_prefetch_interval , max_prefetch_interval , * alias_analysis , <nl> alternate_memory_space_alignment_in_bytes , <nl> GlobalDecreasingSizeBestFitHeap : : Type : : kSpatial , <nl> - is_allowed_in_alternate_mem ) ; <nl> + is_allowed_in_alternate_mem , max_outstanding_async_copies ) ; <nl> <nl> TF_RETURN_IF_ERROR ( HeapSimulator : : Run ( std : : move ( algorithm ) , * module , <nl> module - > schedule ( ) , <nl> MemorySpaceAssignment : : Run ( <nl> VLOG ( 4 ) < < " Module after memory space assignment : " ; <nl> XLA_VLOG_LINES ( 4 , module - > ToString ( ) ) ; <nl> TF_CHECK_OK ( module - > schedule ( ) . Verify ( ) ) ; <nl> + VLOG ( 1 ) < < " Maximum number of outstanding async copies : " <nl> + < < CountMaximumOutstandingAsyncCopies ( * module ) ; <nl> <nl> return std : : move ( memory_space_assignment . preset_assignments_ ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / memory_space_assignment . h <nl> ppp b / tensorflow / compiler / xla / service / memory_space_assignment . h <nl> class MemorySpaceAssignment { <nl> / / in the alternate memory space , size_fn is the size function for buffer <nl> / / values , and is_allowed_in_alternate_mem can be used to prevent certain <nl> / / HloValues ( e . g . , based on the opcode ) to be placed on the alternate memory . <nl> + / / max_outstanding_async_copies specifies the upper bound for number of <nl> + / / outstanding asynchronous copies , - 1 for unlimited . <nl> / / TODO ( berkin ) : Use the cost model instead of using number of instructions to <nl> / / decide how early to prefetch . <nl> static StatusOr < std : : unique_ptr < PresetAssignments > > Run ( <nl> class MemorySpaceAssignment { <nl> int64 min_prefetch_interval , int64 max_prefetch_interval , <nl> int64 alternate_memory_space_alignment_in_bytes , <nl> BufferValue : : SizeFunction size_fn , <nl> - std : : function < bool ( const HloValue & ) > is_allowed_in_alternate_mem ) ; <nl> + std : : function < bool ( const HloValue & ) > is_allowed_in_alternate_mem , <nl> + int64 max_outstanding_async_copies = - 1 ) ; <nl> + <nl> + / / Returns the maximum number of outstanding asynchronous copies in the <nl> + / / module . <nl> + static int64 CountMaximumOutstandingAsyncCopies ( const HloModule & module ) ; <nl> <nl> private : <nl> MemorySpaceAssignment ( HloModule * module , int64 alternate_memory_space ) <nl> class AlternateMemoryBestFitHeap : public GlobalDecreasingSizeBestFitHeap { <nl> int64 max_size_in_bytes , int64 min_prefetch_interval , <nl> int64 max_prefetch_interval , const HloAliasAnalysis & alias_analysis , <nl> int64 alignment , GlobalDecreasingSizeBestFitHeap : : Type type , <nl> - IsAllowedInAlternateMemoryFunction is_allowed_in_alternate_mem ) <nl> + IsAllowedInAlternateMemoryFunction is_allowed_in_alternate_mem , <nl> + int64 max_outstanding_async_copies ) <nl> : GlobalDecreasingSizeBestFitHeap ( alignment , type ) , <nl> allocation_map_ ( allocation_map ) , <nl> max_size_in_bytes_ ( max_size_in_bytes ) , <nl> min_prefetch_interval_ ( min_prefetch_interval ) , <nl> max_prefetch_interval_ ( max_prefetch_interval ) , <nl> alias_analysis_ ( alias_analysis ) , <nl> - is_allowed_in_alternate_mem_ ( is_allowed_in_alternate_mem ) { } <nl> + is_allowed_in_alternate_mem_ ( is_allowed_in_alternate_mem ) , <nl> + max_outstanding_async_copies_ ( max_outstanding_async_copies ) { } <nl> <nl> HeapSimulator : : Result Finish ( ) override ; <nl> <nl> class AlternateMemoryBestFitHeap : public GlobalDecreasingSizeBestFitHeap { <nl> / / find a suitable chunk candidate within the heap size and prefetch interval <nl> / / limits , and append the new allocation ( s ) to allocations . The new <nl> / / allocations can be in default or alternate memory spaces , or can be <nl> - / / prefetches or evictions . <nl> - void FindAllocation ( int64 start_time , int64 end_time , <nl> + / / prefetches or evictions . Returns true if successful . <nl> + bool FindAllocation ( int64 start_time , int64 end_time , <nl> HloPosition defining_position , HloUse use , <nl> const HloValue * buffer , int64 size , <nl> MemorySpaceAssignment : : AllocationSequence * allocations ) ; <nl> class AlternateMemoryBestFitHeap : public GlobalDecreasingSizeBestFitHeap { <nl> / / unnecessarily adding the chunk to the chunk map . <nl> void AddToChunkMap ( const HloValue * buffer , Chunk chunk ) override { } <nl> <nl> + / / Returns true if the addition of an asynchronous copy in the given time <nl> + / / interval would violate the maximum number of asynchronous copies . <nl> + bool ViolatesMaximumOutstandingAsyncCopies ( int64 start_time , <nl> + int64 end_time ) const ; <nl> + <nl> + / / Adds an asynchronous copy to the allocations . <nl> + void AddAsyncCopy ( const MemorySpaceAssignment : : Allocation & prev_allocation , <nl> + MemorySpace memory_space , Chunk chunk , int64 start_time , <nl> + int64 end_time , <nl> + MemorySpaceAssignment : : AllocationSequence * allocations ) ; <nl> + <nl> + / / These methods are used for delaying committing the chunk candidate until <nl> + / / the entire live range of the buffer has been considered . <nl> + void AddToPendingChunks ( const BufferInterval & buffer_interval , <nl> + const ChunkCandidate & chunk_candidate ) ; <nl> + void CommitPendingChunks ( ) ; <nl> + <nl> MemorySpaceAssignment : : AllocationMap * allocation_map_ ; <nl> int64 max_size_in_bytes_ ; <nl> / / The min and max prefetch intervals decribe the number of independent HLOs <nl> class AlternateMemoryBestFitHeap : public GlobalDecreasingSizeBestFitHeap { <nl> int64 max_prefetch_interval_ ; <nl> const HloAliasAnalysis & alias_analysis_ ; <nl> IsAllowedInAlternateMemoryFunction is_allowed_in_alternate_mem_ ; <nl> + / / We use a interval tree to keep track of the number of outstanding <nl> + / / asynchronous copies . <nl> + BufferIntervalTree async_copy_interval_tree_ ; <nl> + int64 max_outstanding_async_copies_ ; <nl> + std : : vector < std : : pair < BufferInterval , ChunkCandidate > > pending_chunks_ ; <nl> + std : : vector < std : : pair < int64 , int64 > > pending_async_copies_ ; <nl> } ; <nl> <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / service / memory_space_assignment_test . cc <nl> ppp b / tensorflow / compiler / xla / service / memory_space_assignment_test . cc <nl> class MemorySpaceAssignmentTest : public HloTestBase { <nl> const int64 kDefaultMemorySpace = 0 ; <nl> const int64 kAlternateMemorySpace = 1 ; <nl> <nl> - std : : unique_ptr < PresetAssignments > AssignMemorySpace ( HloModule * module ) { <nl> + std : : unique_ptr < PresetAssignments > AssignMemorySpace ( <nl> + HloModule * module , int64 max_outstanding_async_copies = - 1 ) { <nl> auto size_fn = [ ] ( const BufferValue & buffer ) { <nl> return ShapeUtil : : ByteSizeOf ( buffer . shape ( ) , / * pointer_size = * / 8 ) ; <nl> } ; <nl> class MemorySpaceAssignmentTest : public HloTestBase { <nl> / * min_prefetch_interval = * / 2 , <nl> / * max_prefetch_interval = * / 10 , <nl> / * alternate_memory_space_alignment_in_bytes = * / 8 , size_fn , <nl> - is_allowed_in_alternate_mem ) <nl> + is_allowed_in_alternate_mem , max_outstanding_async_copies ) <nl> . ValueOrDie ( ) ; <nl> CheckPresetAssignments ( preset_assignments . get ( ) ) ; <nl> return preset_assignments ; <nl> class MemorySpaceAssignmentTest : public HloTestBase { <nl> < < position . ToString ( ) ; <nl> } <nl> } <nl> + <nl> + std : : unique_ptr < HloModule > CreateEvictAndPrefetchModule ( ) { <nl> + HloComputation : : Builder builder ( TestName ( ) ) ; <nl> + Shape shape = ShapeUtil : : MakeShape ( F32 , { 2 , 3 } ) ; <nl> + HloInstruction * p0 = <nl> + builder . AddInstruction ( HloInstruction : : CreateParameter ( 0 , shape , " p0 " ) ) ; <nl> + HloInstruction * p1 = <nl> + builder . AddInstruction ( HloInstruction : : CreateParameter ( 1 , shape , " p1 " ) ) ; <nl> + HloInstruction * tanh = builder . AddInstruction ( <nl> + HloInstruction : : CreateUnary ( shape , HloOpcode : : kTanh , p0 ) ) ; <nl> + / / tanh should be placed in the alternate memory since there isn ' t much <nl> + / / contention in the beginning . However , tanh has another consumer at the <nl> + / / end . So it should be kicked out to default memory and prefetched back in . <nl> + / / The graph below is meant to increase the contention to force <nl> + / / eviction / prefetch behavior . <nl> + HloInstruction * a = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , p0 , tanh ) ) ; <nl> + HloInstruction * b = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kSubtract , p0 , p1 ) ) ; <nl> + HloInstruction * c = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , p0 , p1 ) ) ; <nl> + HloInstruction * d = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kSubtract , p0 , p1 ) ) ; <nl> + HloInstruction * e = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , a , b ) ) ; <nl> + HloInstruction * f = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , a , c ) ) ; <nl> + HloInstruction * g = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , a , d ) ) ; <nl> + HloInstruction * h = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , b , c ) ) ; <nl> + HloInstruction * i = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , b , d ) ) ; <nl> + HloInstruction * j = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , c , d ) ) ; <nl> + HloInstruction * k = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , e , f ) ) ; <nl> + HloInstruction * l = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , g , h ) ) ; <nl> + HloInstruction * m = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , i , j ) ) ; <nl> + HloInstruction * n = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , k , l ) ) ; <nl> + HloInstruction * o = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , n , m ) ) ; <nl> + / / tanh is being used at the root instruction , and this should be <nl> + / / prefetched . <nl> + HloInstruction * add = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , o , tanh ) ) ; <nl> + <nl> + auto module = CreateNewVerifiedModule ( ) ; <nl> + HloComputation * computation = module - > AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + HloSchedule schedule ( module . get ( ) ) ; <nl> + schedule . set_sequence ( computation , { p0 , p1 , tanh , a , b , c , d , e , f , g , h , i , <nl> + j , k , l , m , n , o , add } ) ; <nl> + TF_CHECK_OK ( module - > set_schedule ( schedule ) ) ; <nl> + return module ; <nl> + } <nl> } ; <nl> <nl> TEST_F ( MemorySpaceAssignmentTest , ParameterOnly ) { <nl> TEST_F ( MemorySpaceAssignmentTest , Simple ) { <nl> EXPECT_THAT ( sub , op : : ShapeWithLayout ( shape_in_alternate_mem ) ) ; <nl> <nl> / / Make sure the preset assignments is sane . <nl> - EXPECT_THAT ( preset_assignments - > chunks ( ) . size ( ) , 2 ) ; <nl> - EXPECT_THAT ( preset_assignments - > sizes ( ) . size ( ) , 1 ) ; <nl> + EXPECT_EQ ( preset_assignments - > chunks ( ) . size ( ) , 2 ) ; <nl> + EXPECT_EQ ( preset_assignments - > sizes ( ) . size ( ) , 1 ) ; <nl> + / / Ensure the offset assigned to add and sub are different . <nl> + EXPECT_NE ( preset_assignments - > chunks ( ) [ 0 ] . second . offset , <nl> + preset_assignments - > chunks ( ) [ 1 ] . second . offset ) ; <nl> } <nl> <nl> TEST_F ( MemorySpaceAssignmentTest , NegateChain ) { <nl> TEST_F ( MemorySpaceAssignmentTest , NegateChain ) { <nl> } <nl> <nl> TEST_F ( MemorySpaceAssignmentTest , EvictAndPrefetch ) { <nl> - HloComputation : : Builder builder ( TestName ( ) ) ; <nl> - Shape shape = ShapeUtil : : MakeShape ( F32 , { 2 , 3 } ) ; <nl> - HloInstruction * p0 = <nl> - builder . AddInstruction ( HloInstruction : : CreateParameter ( 0 , shape , " p0 " ) ) ; <nl> - HloInstruction * p1 = <nl> - builder . AddInstruction ( HloInstruction : : CreateParameter ( 1 , shape , " p1 " ) ) ; <nl> - HloInstruction * tanh = builder . AddInstruction ( <nl> - HloInstruction : : CreateUnary ( shape , HloOpcode : : kTanh , p0 ) ) ; <nl> - / / tanh should be placed in the alternate memory since there isn ' t much <nl> - / / contention in the beginning . However , tanh has another consumer at the end . <nl> - / / So it should be kicked out to default memory and prefetched back in . <nl> - / / The graph below is meant to increase the contention to force <nl> - / / eviction / prefetch behavior . <nl> - HloInstruction * a = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , p0 , tanh ) ) ; <nl> - HloInstruction * b = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kSubtract , p0 , p1 ) ) ; <nl> - HloInstruction * c = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , p0 , p1 ) ) ; <nl> - HloInstruction * d = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kSubtract , p0 , p1 ) ) ; <nl> - HloInstruction * e = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , a , b ) ) ; <nl> - HloInstruction * f = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , a , c ) ) ; <nl> - HloInstruction * g = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , a , d ) ) ; <nl> - HloInstruction * h = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , b , c ) ) ; <nl> - HloInstruction * i = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , b , d ) ) ; <nl> - HloInstruction * j = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kMultiply , c , d ) ) ; <nl> - HloInstruction * k = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , e , f ) ) ; <nl> - HloInstruction * l = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , g , h ) ) ; <nl> - HloInstruction * m = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , i , j ) ) ; <nl> - HloInstruction * n = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , k , l ) ) ; <nl> - HloInstruction * o = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , n , m ) ) ; <nl> - / / tanh is being used at the root instruction , and this should be prefetched . <nl> - HloInstruction * add = builder . AddInstruction ( <nl> - HloInstruction : : CreateBinary ( shape , HloOpcode : : kAdd , o , tanh ) ) ; <nl> - <nl> - auto module = CreateNewVerifiedModule ( ) ; <nl> - HloComputation * computation = module - > AddEntryComputation ( builder . Build ( ) ) ; <nl> - <nl> - HloSchedule schedule ( module . get ( ) ) ; <nl> - schedule . set_sequence ( computation , { p0 , p1 , tanh , a , b , c , d , e , f , g , h , i , <nl> - j , k , l , m , n , o , add } ) ; <nl> - TF_CHECK_OK ( module - > set_schedule ( schedule ) ) ; <nl> + std : : unique_ptr < HloModule > module = CreateEvictAndPrefetchModule ( ) ; <nl> <nl> AssignMemorySpace ( module . get ( ) ) ; <nl> <nl> EXPECT_THAT ( <nl> - add , <nl> + module - > entry_computation ( ) - > root_instruction ( ) , <nl> op : : Add ( op : : Add ( ) , <nl> op : : AsyncCopy ( kAlternateMemorySpace , kDefaultMemorySpace , <nl> op : : AsyncCopy ( kDefaultMemorySpace , <nl> kAlternateMemorySpace , op : : Tanh ( ) ) ) ) ) ; <nl> + <nl> + EXPECT_EQ ( MemorySpaceAssignment : : CountMaximumOutstandingAsyncCopies ( * module ) , <nl> + 2 ) ; <nl> + } <nl> + <nl> + TEST_F ( MemorySpaceAssignmentTest , EvictAndPrefetchLimitAsyncCopies0 ) { <nl> + std : : unique_ptr < HloModule > module = CreateEvictAndPrefetchModule ( ) ; <nl> + <nl> + AssignMemorySpace ( module . get ( ) , / * max_outstanding_async_copies = * / 0 ) ; <nl> + <nl> + EXPECT_EQ ( MemorySpaceAssignment : : CountMaximumOutstandingAsyncCopies ( * module ) , <nl> + 0 ) ; <nl> + } <nl> + <nl> + TEST_F ( MemorySpaceAssignmentTest , EvictAndPrefetchLimitAsyncCopies1 ) { <nl> + std : : unique_ptr < HloModule > module = CreateEvictAndPrefetchModule ( ) ; <nl> + <nl> + AssignMemorySpace ( module . get ( ) , / * max_outstanding_async_copies = * / 1 ) ; <nl> + <nl> + EXPECT_EQ ( MemorySpaceAssignment : : CountMaximumOutstandingAsyncCopies ( * module ) , <nl> + 1 ) ; <nl> } <nl> <nl> TEST_F ( MemorySpaceAssignmentTest , While ) { <nl> mmm a / tensorflow / opensource_only . files <nl> ppp b / tensorflow / opensource_only . files <nl> tensorflow / python / tpu / profiler / pip_package / build_pip_package . sh <nl> tensorflow / python / tpu / profiler / pip_package / setup . py <nl> tensorflow / stream_executor / build_defs . bzl <nl> tensorflow / third_party / BUILD <nl> - tensorflow / third_party / __init__ . py <nl> tensorflow / third_party / android / BUILD <nl> tensorflow / third_party / android / android . bzl . tpl <nl> tensorflow / third_party / android / android_configure . BUILD . tpl <nl> tensorflow / third_party / android / android_configure . bzl <nl> + tensorflow / third_party / __init__ . py <nl> tensorflow / third_party / arm_neon_2_x86_sse . BUILD <nl> tensorflow / third_party / astor . BUILD <nl> - tensorflow / third_party / backports_weakref . BUILD <nl> tensorflow / third_party / boringssl / BUILD <nl> + tensorflow / third_party / backports_weakref . BUILD <nl> tensorflow / third_party / clang_toolchain / BUILD <nl> tensorflow / third_party / clang_toolchain / cc_configure_clang . bzl <nl> tensorflow / third_party / clang_toolchain / download_clang . bzl <nl> tensorflow / third_party / codegen . BUILD <nl> tensorflow / third_party / com_google_absl . BUILD <nl> tensorflow / third_party / common . bzl <nl> tensorflow / third_party / cub . BUILD <nl> - tensorflow / third_party / cython . BUILD <nl> tensorflow / third_party / curl . BUILD <nl> - tensorflow / third_party / eigen . BUILD <nl> + tensorflow / third_party / cython . BUILD <nl> tensorflow / third_party / double_conversion . BUILD <nl> - tensorflow / third_party / eigen3 / BUILD <nl> - tensorflow / third_party / eigen3 / Eigen / Core <nl> tensorflow / third_party / eigen3 / Eigen / Cholesky <nl> + tensorflow / third_party / eigen3 / Eigen / Core <nl> tensorflow / third_party / eigen3 / Eigen / Eigenvalues <nl> tensorflow / third_party / eigen3 / Eigen / LU <nl> tensorflow / third_party / eigen3 / Eigen / QR <nl> tensorflow / third_party / eigen3 / Eigen / SVD <nl> + tensorflow / third_party / eigen3 / BUILD <nl> tensorflow / third_party / eigen3 / LICENSE <nl> tensorflow / third_party / eigen3 / gpu_packet_math . patch <nl> + tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / FixedPoint <nl> tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / ThreadPool <nl> tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor <nl> - tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / FixedPoint <nl> tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / src / FixedPoint / MatMatProduct . h <nl> - tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / src / FixedPoint / MatMatProductNEON . h <nl> - tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / src / FixedPoint / FixedPointTypes . h <nl> tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / src / FixedPoint / MatMatProductAVX2 . h <nl> + tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / src / FixedPoint / FixedPointTypes . h <nl> + tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / src / FixedPoint / MatMatProductNEON . h <nl> + tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / src / FixedPoint / PacketMathAVX512 . h <nl> tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / src / FixedPoint / MatVecProduct . h <nl> tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / src / FixedPoint / PacketMathAVX2 . h <nl> - tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / src / FixedPoint / PacketMathAVX512 . h <nl> tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / src / FixedPoint / TypeCastingAVX2 . h <nl> tensorflow / third_party / eigen3 / unsupported / Eigen / CXX11 / src / FixedPoint / TypeCastingAVX512 . h <nl> tensorflow / third_party / eigen3 / unsupported / Eigen / MatrixFunctions <nl> tensorflow / third_party / eigen3 / unsupported / Eigen / SpecialFunctions <nl> - tensorflow / third_party / enum34 . BUILD <nl> - tensorflow / third_party / farmhash . BUILD <nl> + tensorflow / third_party / eigen . BUILD <nl> tensorflow / third_party / fft2d / BUILD <nl> tensorflow / third_party / fft2d / LICENSE <nl> - tensorflow / third_party / fft2d / fft2d . BUILD <nl> tensorflow / third_party / fft2d / fft . h <nl> + tensorflow / third_party / fft2d / fft2d . BUILD <nl> tensorflow / third_party / fft2d / fft2d . h <nl> + tensorflow / third_party / enum34 . BUILD <nl> + tensorflow / third_party / farmhash . BUILD <nl> + tensorflow / third_party / git / BUILD <nl> + tensorflow / third_party / git / BUILD . tpl <nl> + tensorflow / third_party / git / git_configure . bzl <nl> tensorflow / third_party / functools32 . BUILD <nl> tensorflow / third_party / gast . BUILD <nl> tensorflow / third_party / gif . BUILD <nl> - tensorflow / third_party / git / BUILD . tpl <nl> - tensorflow / third_party / git / BUILD <nl> - tensorflow / third_party / git / git_configure . bzl <nl> - tensorflow / third_party / googleapis . BUILD <nl> - tensorflow / third_party / gpus / BUILD <nl> tensorflow / third_party / gpus / crosstool / BUILD <nl> tensorflow / third_party / gpus / crosstool / BUILD . tpl <nl> tensorflow / third_party / gpus / crosstool / LICENSE <nl> tensorflow / third_party / gpus / crosstool / clang / bin / crosstool_wrapper_driver_is_not_gcc . tpl <nl> tensorflow / third_party / gpus / crosstool / clang / bin / crosstool_wrapper_driver_rocm . tpl <nl> tensorflow / third_party / gpus / crosstool / windows / msvc_wrapper_for_nvcc . py . tpl <nl> + tensorflow / third_party / gpus / BUILD <nl> tensorflow / third_party / gpus / cuda / BUILD <nl> - tensorflow / third_party / gpus / cuda / BUILD . tpl <nl> tensorflow / third_party / gpus / cuda / BUILD . windows . tpl <nl> - tensorflow / third_party / gpus / cuda / build_defs . bzl . tpl <nl> + tensorflow / third_party / gpus / cuda / BUILD . tpl <nl> tensorflow / third_party / gpus / cuda / LICENSE <nl> + tensorflow / third_party / gpus / cuda / build_defs . bzl . tpl <nl> tensorflow / third_party / gpus / cuda / cuda_config . h . tpl <nl> - tensorflow / third_party / gpus / cuda_configure . bzl <nl> tensorflow / third_party / gpus / rocm / BUILD <nl> - tensorflow / third_party / gpus / rocm / build_defs . bzl . tpl <nl> tensorflow / third_party / gpus / rocm / BUILD . tpl <nl> tensorflow / third_party / gpus / rocm / rocm_config . h . tpl <nl> + tensorflow / third_party / gpus / rocm / build_defs . bzl . tpl <nl> + tensorflow / third_party / gpus / cuda_configure . bzl <nl> tensorflow / third_party / gpus / find_cuda_config . py <nl> tensorflow / third_party / gpus / rocm_configure . bzl <nl> + tensorflow / third_party / googleapis . BUILD <nl> tensorflow / third_party / grpc / BUILD <nl> tensorflow / third_party / icu / udata . patch <nl> - tensorflow / third_party / jsoncpp . BUILD <nl> - tensorflow / third_party / kafka / config . patch <nl> tensorflow / third_party / kafka / BUILD <nl> - tensorflow / third_party / libxsmm . BUILD <nl> - tensorflow / third_party / linenoise . BUILD <nl> + tensorflow / third_party / kafka / config . patch <nl> + tensorflow / third_party / jsoncpp . BUILD <nl> tensorflow / third_party / llvm / BUILD <nl> tensorflow / third_party / llvm / expand_cmake_vars . py <nl> tensorflow / third_party / llvm / llvm . autogenerated . BUILD <nl> tensorflow / third_party / llvm / llvm . bzl <nl> - tensorflow / third_party / mkl / LICENSE <nl> + tensorflow / third_party / libxsmm . BUILD <nl> + tensorflow / third_party / linenoise . BUILD <nl> + tensorflow / third_party / lmdb . BUILD <nl> tensorflow / third_party / mkl / BUILD <nl> + tensorflow / third_party / mkl / LICENSE <nl> tensorflow / third_party / mkl / MKL_LICENSE <nl> tensorflow / third_party / mkl / build_defs . bzl <nl> tensorflow / third_party / mkl / mkl . BUILD <nl> - tensorflow / third_party / lmdb . BUILD <nl> - tensorflow / third_party / mkl_dnn / mkldnn . BUILD <nl> tensorflow / third_party / mkl_dnn / LICENSE <nl> + tensorflow / third_party / mkl_dnn / mkldnn . BUILD <nl> tensorflow / third_party / mpi / . gitignore <nl> tensorflow / third_party / mpi / BUILD <nl> tensorflow / third_party / mpi_collectives / BUILD <nl> tensorflow / third_party / nccl / system . BUILD . tpl <nl> tensorflow / third_party / ngraph / BUILD <nl> tensorflow / third_party / ngraph / LICENSE <nl> tensorflow / third_party / ngraph / NGRAPH_LICENSE <nl> + tensorflow / third_party / ngraph / ngraph_tf . BUILD <nl> tensorflow / third_party / ngraph / build_defs . bzl <nl> tensorflow / third_party / ngraph / ngraph . BUILD <nl> tensorflow / third_party / ngraph / nlohmann_json . BUILD <nl> - tensorflow / third_party / ngraph / ngraph_tf . BUILD <nl> tensorflow / third_party / ngraph / tbb . BUILD <nl> tensorflow / third_party / opt_einsum . BUILD <nl> tensorflow / third_party / pcre . BUILD <nl> tensorflow / third_party / png . BUILD <nl> tensorflow / third_party / png_fix_rpi . patch <nl> - tensorflow / third_party / pprof . BUILD <nl> tensorflow / third_party / protobuf / BUILD <nl> - tensorflow / third_party / py / BUILD . tpl <nl> - tensorflow / third_party / py / BUILD <nl> + tensorflow / third_party / pprof . BUILD <nl> tensorflow / third_party / py / numpy / BUILD <nl> + tensorflow / third_party / py / BUILD <nl> + tensorflow / third_party / py / BUILD . tpl <nl> tensorflow / third_party / py / python_configure . bzl <nl> - tensorflow / third_party / pybind11 . BUILD <nl> tensorflow / third_party / python_runtime / BUILD <nl> + tensorflow / third_party / pybind11 . BUILD <nl> tensorflow / third_party / repo . bzl <nl> tensorflow / third_party / six . BUILD <nl> tensorflow / third_party / snappy . BUILD <nl> tensorflow / third_party / sqlite . BUILD <nl> tensorflow / third_party / swig . BUILD <nl> tensorflow / third_party / sycl / crosstool / BUILD <nl> - tensorflow / third_party / systemlibs / BUILD <nl> tensorflow / third_party / systemlibs / BUILD . tpl <nl> + tensorflow / third_party / systemlibs / BUILD <nl> tensorflow / third_party / systemlibs / absl_py . BUILD <nl> - tensorflow / third_party / systemlibs / absl_py . absl . flags . BUILD <nl> tensorflow / third_party / systemlibs / absl_py . absl . testing . BUILD <nl> + tensorflow / third_party / systemlibs / absl_py . absl . flags . BUILD <nl> tensorflow / third_party / systemlibs / astor . BUILD <nl> - tensorflow / third_party / systemlibs / build_defs . bzl . tpl <nl> tensorflow / third_party / systemlibs / boringssl . BUILD <nl> - tensorflow / third_party / systemlibs / cython . BUILD <nl> + tensorflow / third_party / systemlibs / build_defs . bzl . tpl <nl> tensorflow / third_party / systemlibs / curl . BUILD <nl> + tensorflow / third_party / systemlibs / cython . BUILD <nl> tensorflow / third_party / systemlibs / double_conversion . BUILD <nl> tensorflow / third_party / systemlibs / gast . BUILD <nl> tensorflow / third_party / systemlibs / gif . BUILD <nl> - tensorflow / third_party / systemlibs / google_cloud_cpp . BUILD <nl> tensorflow / third_party / systemlibs / google_cloud_cpp . google . cloud . bigtable . BUILD <nl> - tensorflow / third_party / systemlibs / grpc . BUILD <nl> + tensorflow / third_party / systemlibs / google_cloud_cpp . BUILD <nl> tensorflow / third_party / systemlibs / googleapis . BUILD <nl> - tensorflow / third_party / systemlibs / lmdb . BUILD <nl> - tensorflow / third_party / systemlibs / nsync . BUILD <nl> tensorflow / third_party / systemlibs / jsoncpp . BUILD <nl> + tensorflow / third_party / systemlibs / grpc . BUILD <nl> + tensorflow / third_party / systemlibs / lmdb . BUILD <nl> tensorflow / third_party / systemlibs / opt_einsum . BUILD <nl> + tensorflow / third_party / systemlibs / nsync . BUILD <nl> tensorflow / third_party / systemlibs / pcre . BUILD <nl> - tensorflow / third_party / systemlibs / png . BUILD <nl> tensorflow / third_party / systemlibs / protobuf . BUILD <nl> + tensorflow / third_party / systemlibs / six . BUILD <nl> tensorflow / third_party / systemlibs / protobuf . bzl <nl> + tensorflow / third_party / systemlibs / png . BUILD <nl> tensorflow / third_party / systemlibs / re2 . BUILD <nl> - tensorflow / third_party / systemlibs / six . BUILD <nl> - tensorflow / third_party / systemlibs / snappy . BUILD <nl> tensorflow / third_party / systemlibs / sqlite . BUILD <nl> tensorflow / third_party / systemlibs / swig . BUILD <nl> + tensorflow / third_party / systemlibs / snappy . BUILD <nl> tensorflow / third_party / systemlibs / syslibs_configure . bzl <nl> tensorflow / third_party / systemlibs / termcolor . BUILD <nl> tensorflow / third_party / systemlibs / zlib . BUILD <nl> tensorflow / third_party / tensorrt / BUILD <nl> - tensorflow / third_party / tensorrt / LICENSE <nl> tensorflow / third_party / tensorrt / BUILD . tpl <nl> tensorflow / third_party / tensorrt / build_defs . bzl . tpl <nl> + tensorflow / third_party / tensorrt / LICENSE <nl> tensorflow / third_party / tensorrt / tensorrt / include / tensorrt_config . h . tpl <nl> tensorflow / third_party / tensorrt / tensorrt_configure . bzl <nl> tensorflow / third_party / termcolor . BUILD <nl> tensorflow / third_party / tflite_mobilenet . BUILD <nl> tensorflow / third_party / tflite_mobilenet_float . BUILD <nl> tensorflow / third_party / tflite_mobilenet_quant . BUILD <nl> - tensorflow / third_party / tflite_ovic_testdata . BUILD <nl> - tensorflow / third_party / tflite_smartreply . BUILD <nl> - tensorflow / third_party / toolchains / BUILD <nl> tensorflow / third_party / toolchains / clang6 / BUILD <nl> tensorflow / third_party / toolchains / clang6 / CROSSTOOL . tpl <nl> tensorflow / third_party / toolchains / clang6 / README . md <nl> tensorflow / third_party / toolchains / clang6 / clang . BUILD <nl> tensorflow / third_party / toolchains / clang6 / repo . bzl <nl> - tensorflow / third_party / toolchains / cpus / arm / cc_config . bzl . tpl <nl> - tensorflow / third_party / toolchains / cpus / arm / arm_compiler_configure . bzl <nl> + tensorflow / third_party / toolchains / BUILD <nl> tensorflow / third_party / toolchains / cpus / arm / BUILD <nl> + tensorflow / third_party / toolchains / cpus / arm / arm_compiler_configure . bzl <nl> + tensorflow / third_party / toolchains / cpus / arm / cc_config . bzl . tpl <nl> tensorflow / third_party / toolchains / cpus / py / BUILD <nl> tensorflow / third_party / toolchains / cpus / py3 / BUILD <nl> tensorflow / third_party / toolchains / preconfig / centos6 / cuda10 . 0 - cudnn7 / cuda / BUILD <nl> tensorflow / third_party / toolchains / preconfig / centos6 / cuda10 . 0 - cudnn7 / cuda / build_defs . bzl <nl> - tensorflow / third_party / toolchains / preconfig / centos6 / cuda10 . 1 - cudnn7 / cuda / build_defs . bzl <nl> tensorflow / third_party / toolchains / preconfig / centos6 / cuda10 . 1 - cudnn7 / cuda / BUILD <nl> + tensorflow / third_party / toolchains / preconfig / centos6 / cuda10 . 1 - cudnn7 / cuda / build_defs . bzl <nl> tensorflow / third_party / toolchains / preconfig / centos6 / gcc7 / BUILD <nl> tensorflow / third_party / toolchains / preconfig / centos6 / gcc7 / cc_toolchain_config . bzl <nl> tensorflow / third_party / toolchains / preconfig / centos6 / gcc7 / dummy_toolchain . bzl <nl> tensorflow / third_party / toolchains / preconfig / centos6 / py3 / BUILD <nl> tensorflow / third_party / toolchains / preconfig / centos6 / tensorrt5 / BUILD <nl> tensorflow / third_party / toolchains / preconfig / centos6 / tensorrt5 / build_defs . bzl <nl> tensorflow / third_party / toolchains / preconfig / generate / BUILD <nl> - tensorflow / third_party / toolchains / preconfig / generate / containers . bzl <nl> tensorflow / third_party / toolchains / preconfig / generate / archives . bzl <nl> + tensorflow / third_party / toolchains / preconfig / generate / containers . bzl <nl> tensorflow / third_party / toolchains / preconfig / generate / generate . bzl <nl> tensorflow / third_party / toolchains / preconfig / generate / workspace . bzl <nl> tensorflow / third_party / toolchains / preconfig / ubuntu14 . 04 / cuda10 . 0 - cudnn7 / cuda / BUILD <nl> tensorflow / third_party / toolchains / preconfig / ubuntu14 . 04 / gcc - nvcc - cuda10 . 0 / cc_too <nl> tensorflow / third_party / toolchains / preconfig / ubuntu14 . 04 / py3 / BUILD <nl> tensorflow / third_party / toolchains / preconfig / ubuntu14 . 04 / tensorrt5 / BUILD <nl> tensorflow / third_party / toolchains / preconfig / ubuntu14 . 04 / tensorrt5 / build_defs . bzl <nl> - tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / clang / BUILD <nl> tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / clang / cc_toolchain_config . bzl <nl> tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / clang / dummy_toolchain . bzl <nl> + tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / clang / BUILD <nl> tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / cuda10 . 0 - cudnn7 / cuda / BUILD <nl> tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / cuda10 . 0 - cudnn7 / cuda / build_defs . bzl <nl> tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / gcc5 - rocm / BUILD <nl> tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / gcc7_manylinux2010 - nvcc - <nl> tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / py / BUILD <nl> tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / py3 / BUILD <nl> tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / py3_opt / BUILD <nl> - tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / rocm / rocm / build_defs . bzl <nl> tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / rocm / rocm / BUILD <nl> + tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / rocm / rocm / build_defs . bzl <nl> tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / tensorrt5 / BUILD <nl> tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / tensorrt5 . 1 / BUILD <nl> tensorflow / third_party / toolchains / preconfig / ubuntu16 . 04 / tensorrt5 . 1 / build_defs . bzl <nl> - tensorflow / third_party / toolchains / preconfig / win_1803 / BUILD <nl> tensorflow / third_party / toolchains / preconfig / win_1803 / bazel_025 / BUILD <nl> + tensorflow / third_party / toolchains / preconfig / win_1803 / BUILD <nl> tensorflow / third_party / toolchains / preconfig / win_1803 / py36 / BUILD <nl> tensorflow / third_party / toolchains / remote / BUILD <nl> - tensorflow / third_party / toolchains / remote / BUILD . tpl <nl> tensorflow / third_party / toolchains / remote / configure . bzl <nl> + tensorflow / third_party / toolchains / remote / BUILD . tpl <nl> tensorflow / third_party / toolchains / remote / execution . bzl . tpl <nl> + tensorflow / third_party / tflite_ovic_testdata . BUILD <nl> + tensorflow / third_party / tflite_smartreply . BUILD <nl> tensorflow / third_party / wrapt . BUILD <nl> tensorflow / third_party / zlib . BUILD <nl> tensorflow / tools / ci_build / remote / BUILD <nl> tensorflow / tools / lib_package / libtensorflow_test . c <nl> tensorflow / tools / lib_package / libtensorflow_test . sh <nl> tensorflow / tools / pip_package / BUILD <nl> tensorflow / tools / pip_package / MANIFEST . in <nl> - tensorflow / tools / pip_package / README <nl> - tensorflow / tools / pip_package / check_load_py_test . py <nl> tensorflow / tools / pip_package / build_pip_package . sh <nl> + tensorflow / tools / pip_package / check_load_py_test . py <nl> + tensorflow / tools / pip_package / README <nl> tensorflow / tools / pip_package / pip_smoke_test . py <nl> tensorflow / tools / pip_package / setup . py <nl> tensorflow / tools / pip_package / simple_console . py <nl>
[ XLA ] Add support to limit outstanding async copies in memory space assignment .
tensorflow/tensorflow
a0ee95db2298bc8839ebae17b3be3ab56194c9e0
2019-08-19T23:11:45Z
deleted file mode 100644 <nl> index e494a301f8a . . 00000000000 <nl> Binary files a / Documentation / Books / Cookbook / Arangodb_Logo . png and / dev / null differ <nl> deleted file mode 100644 <nl> index 333ffcf73eb . . 00000000000 <nl> mmm a / Documentation / Books / Cookbook / SUMMARY . md <nl> ppp / dev / null <nl> <nl> - # Summary <nl> - * [ Replicating Data ] ( ReplicatingData . md ) <nl> - * [ Accessing Shapes Data ] ( AccessingShapesData . md ) <nl> - * [ Query Documents ] ( QueryDocuments . md ) <nl> - * [ Fulldepth ] ( Fulldepth . md ) <nl>
Removed cookbook from documentation
arangodb/arangodb
751d6851eb77907306777b0acda76cf69f5c90b7
2014-12-01T12:37:48Z
mmm a / src / js / i18n . js <nl> ppp b / src / js / i18n . js <nl> var ArrayIndexOf ; <nl> var ArrayJoin ; <nl> var ArrayPush ; <nl> var FLAG_intl_extra ; <nl> - var GlobalBoolean = global . Boolean ; <nl> var GlobalDate = global . Date ; <nl> var GlobalNumber = global . Number ; <nl> var GlobalRegExp = global . RegExp ; <nl> var InstallGetter = utils . InstallGetter ; <nl> var InternalArray = utils . InternalArray ; <nl> var InternalRegExpMatch ; <nl> var InternalRegExpReplace <nl> - var IsFinite ; <nl> var IsNaN ; <nl> var MakeError ; <nl> var MakeRangeError ; <nl> utils . Import ( function ( from ) { <nl> ArrayIndexOf = from . ArrayIndexOf ; <nl> ArrayJoin = from . ArrayJoin ; <nl> ArrayPush = from . ArrayPush ; <nl> - IsFinite = from . IsFinite ; <nl> IsNaN = from . IsNaN ; <nl> MakeError = from . MakeError ; <nl> MakeRangeError = from . MakeRangeError ; <nl> function supportedLocalesOf ( service , locales , options ) { <nl> <nl> var matcher = options . localeMatcher ; <nl> if ( ! IS_UNDEFINED ( matcher ) ) { <nl> - matcher = GlobalString ( matcher ) ; <nl> + matcher = TO_STRING ( matcher ) ; <nl> if ( matcher ! = = ' lookup ' & & matcher ! = = ' best fit ' ) { <nl> throw MakeRangeError ( kLocaleMatcher , matcher ) ; <nl> } <nl> function getGetOption ( options , caller ) { <nl> var value = options [ property ] ; <nl> switch ( type ) { <nl> case ' boolean ' : <nl> - value = GlobalBoolean ( value ) ; <nl> + value = TO_BOOLEAN ( value ) ; <nl> break ; <nl> case ' string ' : <nl> - value = GlobalString ( value ) ; <nl> + value = TO_STRING ( value ) ; <nl> break ; <nl> case ' number ' : <nl> - value = GlobalNumber ( value ) ; <nl> + value = TO_NUMBER ( value ) ; <nl> break ; <nl> default : <nl> throw MakeError ( kWrongValueType ) ; <nl> function setOptions ( inOptions , extensionMap , keyValues , getOption , outOptions ) { <nl> var extension = ' ' ; <nl> <nl> var updateExtension = function updateExtension ( key , value ) { <nl> - return ' - ' + key + ' - ' + GlobalString ( value ) ; <nl> + return ' - ' + key + ' - ' + TO_STRING ( value ) ; <nl> } <nl> <nl> var updateProperty = function updateProperty ( property , type , value ) { <nl> function canonicalizeLanguageTag ( localeID ) { <nl> return localeID ; <nl> } <nl> <nl> - var localeString = GlobalString ( localeID ) ; <nl> + var localeString = TO_STRING ( localeID ) ; <nl> <nl> if ( isValidLanguageTag ( localeString ) = = = false ) { <nl> throw MakeRangeError ( kInvalidLanguageTag , localeString ) ; <nl> InstallFunction ( Intl . Collator , ' supportedLocalesOf ' , function ( locales ) { <nl> * / <nl> function compare ( collator , x , y ) { <nl> return % InternalCompare ( % GetImplFromInitializedIntlObject ( collator ) , <nl> - GlobalString ( x ) , GlobalString ( y ) ) ; <nl> + TO_STRING ( x ) , TO_STRING ( y ) ) ; <nl> } ; <nl> <nl> <nl> function isWellFormedCurrencyCode ( currency ) { <nl> function getNumberOption ( options , property , min , max , fallback ) { <nl> var value = options [ property ] ; <nl> if ( ! IS_UNDEFINED ( value ) ) { <nl> - value = GlobalNumber ( value ) ; <nl> - if ( IsNaN ( value ) | | value < min | | value > max ) { <nl> + value = TO_NUMBER ( value ) ; <nl> + if ( NUMBER_IS_NAN ( value ) | | value < min | | value > max ) { <nl> throw MakeRangeError ( kPropertyValueOutOfRange , property ) ; <nl> } <nl> return % math_floor ( value ) ; <nl> function formatNumber ( formatter , value ) { <nl> * / <nl> function IntlParseNumber ( formatter , value ) { <nl> return % InternalNumberParse ( % GetImplFromInitializedIntlObject ( formatter ) , <nl> - GlobalString ( value ) ) ; <nl> + TO_STRING ( value ) ) ; <nl> } <nl> <nl> AddBoundMethod ( Intl . NumberFormat , ' format ' , formatNumber , 1 , ' numberformat ' ) ; <nl> function formatDate ( formatter , dateValue ) { <nl> dateMs = TO_NUMBER ( dateValue ) ; <nl> } <nl> <nl> - if ( ! IsFinite ( dateMs ) ) throw MakeRangeError ( kDateRange ) ; <nl> + if ( ! NUMBER_IS_FINITE ( dateMs ) ) throw MakeRangeError ( kDateRange ) ; <nl> <nl> return % InternalDateFormat ( % GetImplFromInitializedIntlObject ( formatter ) , <nl> new GlobalDate ( dateMs ) ) ; <nl> function formatDate ( formatter , dateValue ) { <nl> * / <nl> function IntlParseDate ( formatter , value ) { <nl> return % InternalDateParse ( % GetImplFromInitializedIntlObject ( formatter ) , <nl> - GlobalString ( value ) ) ; <nl> + TO_STRING ( value ) ) ; <nl> } <nl> <nl> <nl> InstallFunction ( Intl . v8BreakIterator , ' supportedLocalesOf ' , <nl> * / <nl> function adoptText ( iterator , text ) { <nl> % BreakIteratorAdoptText ( % GetImplFromInitializedIntlObject ( iterator ) , <nl> - GlobalString ( text ) ) ; <nl> + TO_STRING ( text ) ) ; <nl> } <nl> <nl> <nl> mmm a / src / js / v8natives . js <nl> ppp b / src / js / v8natives . js <nl> function GetIterator ( obj , method ) { <nl> utils . Export ( function ( to ) { <nl> to . GetIterator = GetIterator ; <nl> to . GetMethod = GetMethod ; <nl> - to . IsFinite = GlobalIsFinite ; <nl> to . IsNaN = GlobalIsNaN ; <nl> to . NumberIsNaN = NumberIsNaN ; <nl> to . NumberIsInteger = NumberIsInteger ; <nl>
[ i18n ] use intrinsics for conversion instead of wrappers .
v8/v8
18ba2d1c9127e85df779cc73137148700fa7b92b
2016-05-30T20:25:23Z
mmm a / ChangeLog <nl> ppp b / ChangeLog <nl> <nl> + 2008 - 05 - 21 Tatsuhiro Tsujikawa < tujikawa at rednoah dot com > <nl> + <nl> + * src / ChunkChecksum . h ( getChecksum ) : Return const reference . <nl> + <nl> 2008 - 05 - 20 Tatsuhiro Tsujikawa < tujikawa at rednoah dot com > <nl> <nl> Fixed compile error when configured with - - disable - metalink . <nl> mmm a / src / ChunkChecksum . h <nl> ppp b / src / ChunkChecksum . h <nl> class ChunkChecksum { <nl> return _checksums . size ( ) ; <nl> } <nl> <nl> - std : : string getChecksum ( size_t index ) const <nl> + const std : : string & getChecksum ( size_t index ) const <nl> { <nl> if ( index < _checksums . size ( ) ) { <nl> return _checksums [ index ] ; <nl>
2008 - 05 - 21 Tatsuhiro Tsujikawa < tujikawa at rednoah dot com >
aria2/aria2
90846bdafe3e67aed9865f1642dc752b0cfc9060
2008-05-20T15:22:53Z
mmm a / db / instance . cpp <nl> ppp b / db / instance . cpp <nl> namespace mongo { <nl> } <nl> } <nl> pass + + ; <nl> - DEV <nl> - sleepmillis ( 20 ) ; <nl> + if ( debug ) <nl> + sleepmillis ( 20 ) ; <nl> else <nl> sleepmillis ( 2 ) ; <nl> continue ; <nl>
cleaner
mongodb/mongo
8593d9caadea8d0c39d609787c34c0872cdd1d45
2011-10-04T20:49:23Z
mmm a / build / cocos2d_libs . xcodeproj / project . pbxproj . REMOVED . git - id <nl> ppp b / build / cocos2d_libs . xcodeproj / project . pbxproj . REMOVED . git - id <nl> @ @ - 1 + 1 @ @ <nl> - f0d0e2815c4a581e7a1d8895efca2c2e8bc71679 <nl> \ No newline at end of file <nl> + ce0e937f5399e1b52ffc6084295bc18ceb220195 <nl> \ No newline at end of file <nl> mmm a / cocos / editor - support / cocostudio / CCComAttribute . cpp <nl> ppp b / cocos / editor - support / cocostudio / CCComAttribute . cpp <nl> THE SOFTWARE . <nl> * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> <nl> # include " cocostudio / CCComAttribute . h " <nl> + <nl> using namespace cocos2d ; <nl> <nl> namespace cocostudio { <nl> <nl> + IMPLEMENT_CLASS_COMPONENT_INFO ( ComAttribute ) <nl> ComAttribute : : ComAttribute ( void ) <nl> { <nl> _name = " CCComAttribute " ; <nl> ComAttribute * ComAttribute : : create ( void ) <nl> return pRet ; <nl> } <nl> <nl> + bool ComAttribute : : serialize ( void * r ) <nl> + { <nl> + bool bRet = false ; <nl> + do <nl> + { <nl> + CC_BREAK_IF ( r = = nullptr ) ; <nl> + rapidjson : : Value * v = ( rapidjson : : Value * ) r ; <nl> + const char * className = DICTOOL - > getStringValue_json ( * v , " classname " ) ; <nl> + CC_BREAK_IF ( className = = nullptr ) ; <nl> + const char * comName = DICTOOL - > getStringValue_json ( * v , " name " ) ; <nl> + if ( comName ! = nullptr ) <nl> + { <nl> + setName ( comName ) ; <nl> + } <nl> + else <nl> + { <nl> + setName ( className ) ; <nl> + } <nl> + const rapidjson : : Value & fileData = DICTOOL - > getSubDictionary_json ( * v , " fileData " ) ; <nl> + CC_BREAK_IF ( ! DICTOOL - > checkObjectExist_json ( fileData ) ) ; <nl> + const char * file = DICTOOL - > getStringValue_json ( fileData , " path " ) ; <nl> + CC_BREAK_IF ( file = = nullptr ) ; <nl> + std : : string filePath ; <nl> + if ( file ! = nullptr ) <nl> + { <nl> + filePath . assign ( cocos2d : : CCFileUtils : : getInstance ( ) - > fullPathForFilename ( file ) ) ; <nl> + } <nl> + int resType = DICTOOL - > getIntValue_json ( fileData , " resourceType " , - 1 ) ; <nl> + CC_BREAK_IF ( resType ! = 0 ) ; <nl> + parse ( filePath . c_str ( ) ) ; <nl> + bRet = true ; <nl> + } while ( 0 ) ; <nl> + <nl> + return bRet ; <nl> + } <nl> + <nl> bool ComAttribute : : parse ( const std : : string & jsonFile ) <nl> { <nl> bool ret = false ; <nl> mmm a / cocos / editor - support / cocostudio / CCComAttribute . h <nl> ppp b / cocos / editor - support / cocostudio / CCComAttribute . h <nl> THE SOFTWARE . <nl> # ifndef __CC_EXTENTIONS_CCCOMATTRIBUTE_H__ <nl> # define __CC_EXTENTIONS_CCCOMATTRIBUTE_H__ <nl> <nl> - # include " cocos2d . h " <nl> - # include < string > <nl> - # include " cocostudio / DictionaryHelper . h " <nl> + # include " CCComBase . h " <nl> <nl> namespace cocostudio { <nl> <nl> class ComAttribute : public cocos2d : : Component <nl> { <nl> + DECLARE_CLASS_COMPONENT_INFO <nl> protected : <nl> / * * <nl> * @ js ctor <nl> class ComAttribute : public cocos2d : : Component <nl> public : <nl> virtual bool init ( ) ; <nl> static ComAttribute * create ( void ) ; <nl> + virtual bool serialize ( void * r ) ; <nl> <nl> void setInt ( const std : : string & key , int value ) ; <nl> void setFloat ( const std : : string & key , float value ) ; <nl> mmm a / cocos / editor - support / cocostudio / CCComAudio . cpp <nl> ppp b / cocos / editor - support / cocostudio / CCComAudio . cpp <nl> THE SOFTWARE . <nl> <nl> namespace cocostudio { <nl> <nl> + IMPLEMENT_CLASS_COMPONENT_INFO ( ComAudio ) <nl> ComAudio : : ComAudio ( void ) <nl> : _filePath ( " " ) <nl> , _loop ( false ) <nl> void ComAudio : : setEnabled ( bool b ) <nl> _enabled = b ; <nl> } <nl> <nl> + <nl> + bool ComAudio : : serialize ( void * r ) <nl> + { <nl> + bool bRet = false ; <nl> + do <nl> + { <nl> + CC_BREAK_IF ( r = = nullptr ) ; <nl> + rapidjson : : Value * v = ( rapidjson : : Value * ) r ; <nl> + const char * className = DICTOOL - > getStringValue_json ( * v , " classname " ) ; <nl> + CC_BREAK_IF ( className = = nullptr ) ; <nl> + const char * comName = DICTOOL - > getStringValue_json ( * v , " name " ) ; <nl> + if ( comName ! = nullptr ) <nl> + { <nl> + setName ( comName ) ; <nl> + } <nl> + else <nl> + { <nl> + setName ( className ) ; <nl> + } <nl> + const rapidjson : : Value & fileData = DICTOOL - > getSubDictionary_json ( * v , " fileData " ) ; <nl> + CC_BREAK_IF ( ! DICTOOL - > checkObjectExist_json ( fileData ) ) ; <nl> + const char * file = DICTOOL - > getStringValue_json ( fileData , " path " ) ; <nl> + CC_BREAK_IF ( file = = nullptr ) ; <nl> + std : : string filePath ; <nl> + if ( file ! = nullptr ) <nl> + { <nl> + filePath . assign ( cocos2d : : CCFileUtils : : getInstance ( ) - > fullPathForFilename ( file ) ) ; <nl> + } <nl> + int resType = DICTOOL - > getIntValue_json ( fileData , " resourceType " , - 1 ) ; <nl> + CC_BREAK_IF ( resType ! = 0 ) ; <nl> + if ( strcmp ( className , " CCBackgroundAudio " ) = = 0 ) <nl> + { <nl> + preloadBackgroundMusic ( filePath . c_str ( ) ) ; <nl> + bool loop = DICTOOL - > getIntValue_json ( * v , " loop " ) ! = 0 ? true : false ; <nl> + setLoop ( loop ) ; <nl> + playBackgroundMusic ( filePath . c_str ( ) , loop ) ; <nl> + } <nl> + else if ( strcmp ( className , " CCComAudio " ) = = 0 ) <nl> + { <nl> + preloadEffect ( filePath . c_str ( ) ) ; <nl> + } <nl> + else <nl> + { <nl> + CC_BREAK_IF ( true ) ; <nl> + } <nl> + bRet = true ; <nl> + } while ( 0 ) ; <nl> + <nl> + return bRet ; <nl> + } <nl> + <nl> ComAudio * ComAudio : : create ( void ) <nl> { <nl> ComAudio * pRet = new ComAudio ( ) ; <nl> void ComAudio : : preloadBackgroundMusic ( const char * pszFilePath ) <nl> setLoop ( false ) ; <nl> } <nl> <nl> - void ComAudio : : playBackgroundMusic ( const char * pszFilePath , bool bLoop ) <nl> + void ComAudio : : playBackgroundMusic ( const char * pszFilePath , bool loop ) <nl> { <nl> - CocosDenshion : : SimpleAudioEngine : : getInstance ( ) - > playBackgroundMusic ( pszFilePath , bLoop ) ; <nl> + CocosDenshion : : SimpleAudioEngine : : getInstance ( ) - > playBackgroundMusic ( pszFilePath , loop ) ; <nl> <nl> } <nl> <nl> void ComAudio : : setEffectsVolume ( float volume ) <nl> CocosDenshion : : SimpleAudioEngine : : getInstance ( ) - > setEffectsVolume ( volume ) ; <nl> } <nl> <nl> - unsigned int ComAudio : : playEffect ( const char * pszFilePath , bool bLoop ) <nl> + unsigned int ComAudio : : playEffect ( const char * pszFilePath , bool loop ) <nl> { <nl> - return CocosDenshion : : SimpleAudioEngine : : getInstance ( ) - > playEffect ( pszFilePath , bLoop ) ; <nl> + return CocosDenshion : : SimpleAudioEngine : : getInstance ( ) - > playEffect ( pszFilePath , loop ) ; <nl> } <nl> <nl> unsigned int ComAudio : : playEffect ( const char * pszFilePath ) <nl> void ComAudio : : setFile ( const char * pszFilePath ) <nl> _filePath . assign ( pszFilePath ) ; <nl> } <nl> <nl> - void ComAudio : : setLoop ( bool bLoop ) <nl> + void ComAudio : : setLoop ( bool loop ) <nl> { <nl> - _loop = bLoop ; <nl> + _loop = loop ; <nl> } <nl> <nl> const char * ComAudio : : getFile ( ) <nl> mmm a / cocos / editor - support / cocostudio / CCComAudio . h <nl> ppp b / cocos / editor - support / cocostudio / CCComAudio . h <nl> THE SOFTWARE . <nl> # ifndef __CC_EXTENTIONS_CCCOMAUDIO_H__ <nl> # define __CC_EXTENTIONS_CCCOMAUDIO_H__ <nl> <nl> - # include " cocos2d . h " <nl> + # include " CCComBase . h " <nl> <nl> namespace cocostudio { <nl> <nl> class ComAudio : public cocos2d : : Component <nl> { <nl> + <nl> + DECLARE_CLASS_COMPONENT_INFO <nl> protected : <nl> / * * <nl> * @ js ctor <nl> class ComAudio : public cocos2d : : Component <nl> virtual void onExit ( ) ; <nl> virtual bool isEnabled ( ) const ; <nl> virtual void setEnabled ( bool b ) ; <nl> + virtual bool serialize ( void * r ) ; <nl> <nl> static ComAudio * create ( void ) ; <nl> <nl> new file mode 100644 <nl> index 000000000000 . . 82c4bd11a8e3 <nl> mmm / dev / null <nl> ppp b / cocos / editor - support / cocostudio / CCComBase . h <nl> <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + Copyright ( c ) 2013 cocos2d - x . org <nl> + <nl> + http : / / www . cocos2d - x . org <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy <nl> + of this software and associated documentation files ( the " Software " ) , to deal <nl> + in the Software without restriction , including without limitation the rights <nl> + to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> + copies of the Software , and to permit persons to whom the Software is <nl> + furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in <nl> + all copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> + LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN <nl> + THE SOFTWARE . <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + # ifndef __CC_EXTENTIONS_CCCOMBASE_H__ <nl> + # define __CC_EXTENTIONS_CCCOMBASE_H__ <nl> + <nl> + # include " cocos2d . h " <nl> + # include " ObjectFactory . h " <nl> + # include " DictionaryHelper . h " <nl> + # include < string > <nl> + <nl> + <nl> + # define DECLARE_CLASS_COMPONENT_INFO \ <nl> + public : \ <nl> + static cocostudio : : ObjectFactory : : TInfo Type ; \ <nl> + static cocos2d : : Object * createInstance ( void ) ; \ <nl> + <nl> + # define IMPLEMENT_CLASS_COMPONENT_INFO ( className ) \ <nl> + cocos2d : : Object * className : : createInstance ( void ) \ <nl> + { \ <nl> + return className : : create ( ) ; \ <nl> + } \ <nl> + cocostudio : : ObjectFactory : : TInfo className : : Type ( # className , & className : : createInstance ) ; \ <nl> + <nl> + # define CREATE_CLASS_COMPONENT_INFO ( className ) \ <nl> + cocostudio : : ObjectFactory : : TInfo ( # className , & className : : createInstance ) <nl> + <nl> + <nl> + # endif <nl> mmm a / cocos / editor - support / cocostudio / CCComController . cpp <nl> ppp b / cocos / editor - support / cocostudio / CCComController . cpp <nl> THE SOFTWARE . <nl> <nl> namespace cocostudio { <nl> <nl> + IMPLEMENT_CLASS_COMPONENT_INFO ( ComController ) <nl> ComController : : ComController ( void ) <nl> { <nl> _name = " CCComController " ; <nl> mmm a / cocos / editor - support / cocostudio / CCComController . h <nl> ppp b / cocos / editor - support / cocostudio / CCComController . h <nl> THE SOFTWARE . <nl> # ifndef __CC_EXTENTIONS_CCCOMCONTROLLER_H__ <nl> # define __CC_EXTENTIONS_CCCOMCONTROLLER_H__ <nl> <nl> - # include " cocos2d . h " <nl> + # include " CCComBase . h " <nl> # include " cocostudio / CCInputDelegate . h " <nl> <nl> namespace cocostudio { <nl> <nl> class ComController : public cocos2d : : Component , public InputDelegate <nl> { <nl> + <nl> + DECLARE_CLASS_COMPONENT_INFO <nl> public : <nl> / * * <nl> * @ js ctor <nl> mmm a / cocos / editor - support / cocostudio / CCComRender . cpp <nl> ppp b / cocos / editor - support / cocostudio / CCComRender . cpp <nl> THE SOFTWARE . <nl> * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> <nl> # include " cocostudio / CCComRender . h " <nl> + # include " cocostudio / CocoStudio . h " <nl> + <nl> + using namespace cocos2d ; <nl> <nl> namespace cocostudio { <nl> <nl> + IMPLEMENT_CLASS_COMPONENT_INFO ( ComRender ) <nl> ComRender : : ComRender ( void ) <nl> : _render ( nullptr ) <nl> { <nl> void ComRender : : setNode ( cocos2d : : Node * node ) <nl> _render = node ; <nl> } <nl> <nl> - ComRender * ComRender : : create ( cocos2d : : Node * node , const char * comName ) <nl> + <nl> + bool ComRender : : serialize ( void * r ) <nl> { <nl> - ComRender * ret = new ComRender ( node , comName ) ; <nl> + bool bRet = false ; <nl> + do <nl> + { <nl> + CC_BREAK_IF ( r = = nullptr ) ; <nl> + rapidjson : : Value * v = ( rapidjson : : Value * ) r ; <nl> + const char * className = DICTOOL - > getStringValue_json ( * v , " classname " ) ; <nl> + CC_BREAK_IF ( className = = nullptr ) ; <nl> + const char * comName = DICTOOL - > getStringValue_json ( * v , " name " ) ; <nl> + if ( comName ! = nullptr ) <nl> + { <nl> + setName ( comName ) ; <nl> + } <nl> + else <nl> + { <nl> + setName ( className ) ; <nl> + } <nl> + const rapidjson : : Value & fileData = DICTOOL - > getSubDictionary_json ( * v , " fileData " ) ; <nl> + CC_BREAK_IF ( ! DICTOOL - > checkObjectExist_json ( fileData ) ) ; <nl> + const char * file = DICTOOL - > getStringValue_json ( fileData , " path " ) ; <nl> + const char * plist = DICTOOL - > getStringValue_json ( fileData , " plistFile " ) ; <nl> + CC_BREAK_IF ( file = = nullptr & & plist = = nullptr ) ; <nl> + std : : string filePath ; <nl> + std : : string plistPath ; <nl> + if ( file ! = nullptr ) <nl> + { <nl> + filePath . assign ( cocos2d : : CCFileUtils : : getInstance ( ) - > fullPathForFilename ( file ) ) ; <nl> + } <nl> + if ( plist ! = nullptr ) <nl> + { <nl> + plistPath . assign ( cocos2d : : CCFileUtils : : getInstance ( ) - > fullPathForFilename ( plist ) ) ; <nl> + } <nl> + int resType = DICTOOL - > getIntValue_json ( fileData , " resourceType " , - 1 ) ; <nl> + if ( resType = = 0 ) <nl> + { <nl> + if ( strcmp ( className , " CCSprite " ) = = 0 & & filePath . find ( " . png " ) ! = filePath . npos ) <nl> + { <nl> + _render = Sprite : : create ( filePath . c_str ( ) ) ; <nl> + } <nl> + else if ( strcmp ( className , " CCTMXTiledMap " ) = = 0 & & filePath . find ( " . tmx " ) ! = filePath . npos ) <nl> + { <nl> + _render = TMXTiledMap : : create ( filePath . c_str ( ) ) ; <nl> + } <nl> + else if ( strcmp ( className , " CCParticleSystemQuad " ) = = 0 & & filePath . find ( " . plist " ) ! = filePath . npos ) <nl> + { <nl> + _render = ParticleSystemQuad : : create ( filePath . c_str ( ) ) ; <nl> + _render - > setPosition ( Point ( 0 . 0f , 0 . 0f ) ) ; <nl> + } <nl> + else if ( strcmp ( className , " CCArmature " ) = = 0 ) <nl> + { <nl> + std : : string reDir = filePath ; <nl> + std : : string file_path = " " ; <nl> + size_t pos = reDir . find_last_of ( ' / ' ) ; <nl> + if ( pos ! = std : : string : : npos ) <nl> + { <nl> + file_path = reDir . substr ( 0 , pos + 1 ) ; <nl> + } <nl> + rapidjson : : Document doc ; <nl> + if ( ! readJson ( filePath . c_str ( ) , doc ) ) <nl> + { <nl> + log ( " read json file [ % s ] error ! \ n " , filePath . c_str ( ) ) ; <nl> + continue ; <nl> + } <nl> + const rapidjson : : Value & subData = DICTOOL - > getDictionaryFromArray_json ( doc , " armature_data " , 0 ) ; <nl> + const char * name = DICTOOL - > getStringValue_json ( subData , " name " ) ; <nl> + ArmatureDataManager : : getInstance ( ) - > addArmatureFileInfo ( filePath . c_str ( ) ) ; <nl> + Armature * pAr = Armature : : create ( name ) ; <nl> + _render = pAr ; <nl> + const char * actionName = DICTOOL - > getStringValue_json ( * v , " selectedactionname " ) ; <nl> + if ( actionName ! = nullptr & & pAr - > getAnimation ( ) ! = nullptr ) <nl> + { <nl> + pAr - > getAnimation ( ) - > play ( actionName ) ; <nl> + } <nl> + } <nl> + else if ( strcmp ( className , " GUIComponent " ) = = 0 ) <nl> + { <nl> + cocos2d : : gui : : Widget * widget = GUIReader : : getInstance ( ) - > widgetFromJsonFile ( filePath . c_str ( ) ) ; <nl> + _render = widget ; <nl> + } <nl> + else <nl> + { <nl> + CC_BREAK_IF ( true ) ; <nl> + } <nl> + } <nl> + else if ( resType = = 1 ) <nl> + { <nl> + if ( strcmp ( className , " CCSprite " ) = = 0 ) <nl> + { <nl> + std : : string strPngFile = plistPath ; <nl> + std : : string : : size_type pos = strPngFile . find ( " . plist " ) ; <nl> + if ( pos = = strPngFile . npos ) <nl> + { <nl> + continue ; <nl> + } <nl> + strPngFile . replace ( pos , strPngFile . length ( ) , " . png " ) ; <nl> + SpriteFrameCache : : getInstance ( ) - > addSpriteFramesWithFile ( plistPath . c_str ( ) , strPngFile . c_str ( ) ) ; <nl> + _render = Sprite : : createWithSpriteFrameName ( filePath . c_str ( ) ) ; <nl> + } <nl> + else <nl> + { <nl> + CC_BREAK_IF ( true ) ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + CC_BREAK_IF ( true ) ; <nl> + } <nl> + bRet = true ; <nl> + } while ( 0 ) ; <nl> + <nl> + return bRet ; <nl> + } <nl> + <nl> + <nl> + ComRender * ComRender : : create ( void ) <nl> + { <nl> + ComRender * ret = new ComRender ( ) ; <nl> if ( ret ! = nullptr & & ret - > init ( ) ) <nl> { <nl> ret - > autorelease ( ) ; <nl> ComRender * ComRender : : create ( cocos2d : : Node * node , const char * comName ) <nl> return ret ; <nl> } <nl> <nl> + bool ComRender : : readJson ( const std : : string & fileName , rapidjson : : Document & doc ) <nl> + { <nl> + bool ret = false ; <nl> + do { <nl> + std : : string contentStr = FileUtils : : getInstance ( ) - > getStringFromFile ( fileName ) ; <nl> + doc . Parse < 0 > ( contentStr . c_str ( ) ) ; <nl> + CC_BREAK_IF ( doc . HasParseError ( ) ) ; <nl> + ret = true ; <nl> + } while ( 0 ) ; <nl> + return ret ; <nl> + } <nl> + <nl> } <nl> mmm a / cocos / editor - support / cocostudio / CCComRender . h <nl> ppp b / cocos / editor - support / cocostudio / CCComRender . h <nl> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN <nl> THE SOFTWARE . <nl> * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> <nl> - # ifndef __CC_EXTENTIONS_CCCOMNODE_H__ <nl> - # define __CC_EXTENTIONS_CCCOMNODE_H__ <nl> + # ifndef __CC_EXTENTIONS_CCCOMRENDER_H__ <nl> + # define __CC_EXTENTIONS_CCCOMRENDER_H__ <nl> <nl> - # include " cocos2d . h " <nl> + # include " CCComBase . h " <nl> <nl> namespace cocostudio { <nl> <nl> class ComRender : public cocos2d : : Component <nl> { <nl> + DECLARE_CLASS_COMPONENT_INFO <nl> protected : <nl> / * * <nl> * @ js ctor <nl> class ComRender : public cocos2d : : Component <nl> * @ lua NA <nl> * / <nl> virtual void onExit ( ) ; <nl> - cocos2d : : Node * getNode ( ) ; <nl> - void setNode ( cocos2d : : Node * node ) ; <nl> + virtual bool serialize ( void * r ) ; <nl> + virtual cocos2d : : Node * getNode ( ) ; <nl> + virtual void setNode ( cocos2d : : Node * node ) ; <nl> <nl> - static ComRender * create ( cocos2d : : Node * node , const char * comName ) ; <nl> + static ComRender * create ( void ) ; <nl> + private : <nl> + bool readJson ( const std : : string & fileName , rapidjson : : Document & doc ) ; <nl> <nl> private : <nl> cocos2d : : Node * _render ; <nl> mmm a / cocos / editor - support / cocostudio / CCSSceneReader . cpp <nl> ppp b / cocos / editor - support / cocostudio / CCSSceneReader . cpp <nl> <nl> # include " cocostudio / CocoStudio . h " <nl> # include " gui / CocosGUI . h " <nl> # include " SimpleAudioEngine . h " <nl> + # include " ObjectFactory . h " <nl> <nl> using namespace cocos2d ; <nl> using namespace gui ; <nl> SceneReader : : SceneReader ( ) <nl> : _fnSelector ( nullptr ) <nl> , _node ( nullptr ) <nl> { <nl> + ObjectFactory : : getInstance ( ) - > registerType ( CREATE_CLASS_COMPONENT_INFO ( ComAttribute ) ) ; <nl> + ObjectFactory : : getInstance ( ) - > registerType ( CREATE_CLASS_COMPONENT_INFO ( ComRender ) ) ; <nl> + ObjectFactory : : getInstance ( ) - > registerType ( CREATE_CLASS_COMPONENT_INFO ( ComAudio ) ) ; <nl> + ObjectFactory : : getInstance ( ) - > registerType ( CREATE_CLASS_COMPONENT_INFO ( ComController ) ) ; <nl> } <nl> <nl> SceneReader : : ~ SceneReader ( ) <nl> Node * SceneReader : : createObject ( const rapidjson : : Value & dict , cocos2d : : Node * par <nl> break ; <nl> } <nl> const char * comName = DICTOOL - > getStringValue_json ( subDict , " classname " ) ; <nl> - const char * pComName = DICTOOL - > getStringValue_json ( subDict , " name " ) ; <nl> - <nl> - const rapidjson : : Value & fileData = DICTOOL - > getSubDictionary_json ( subDict , " fileData " ) ; <nl> - std : : string pPath ; <nl> - std : : string pPlistFile ; <nl> - int nResType = 0 ; <nl> - if ( DICTOOL - > checkObjectExist_json ( fileData ) ) <nl> + Component * com = ObjectFactory : : getInstance ( ) - > createComponent ( comName ) ; <nl> + if ( com ! = NULL ) <nl> + { <nl> + if ( com - > serialize ( ( void * ) ( & subDict ) ) ) <nl> + { <nl> + gb - > addComponent ( com ) ; <nl> + } <nl> + else <nl> + { <nl> + CC_SAFE_RELEASE ( com ) ; <nl> + } <nl> + } <nl> + if ( _fnSelector ! = nullptr ) <nl> { <nl> - const char * file = DICTOOL - > getStringValue_json ( fileData , " path " ) ; <nl> - nResType = DICTOOL - > getIntValue_json ( fileData , " resourceType " , - 1 ) ; <nl> - const char * plistFile = DICTOOL - > getStringValue_json ( fileData , " plistFile " ) ; <nl> - if ( file ! = nullptr ) <nl> - { <nl> - pPath . assign ( cocos2d : : FileUtils : : getInstance ( ) - > fullPathForFilename ( file ) ) ; <nl> - } <nl> - <nl> - if ( plistFile ! = nullptr ) <nl> - { <nl> - pPlistFile . assign ( cocos2d : : FileUtils : : getInstance ( ) - > fullPathForFilename ( plistFile ) ) ; <nl> - } <nl> - <nl> - if ( file = = nullptr & & plistFile = = nullptr ) <nl> - { <nl> - continue ; <nl> - } <nl> - } <nl> - else <nl> - { <nl> - continue ; <nl> - } <nl> - <nl> - if ( comName ! = nullptr & & strcmp ( comName , " CCSprite " ) = = 0 ) <nl> - { <nl> - cocos2d : : Sprite * pSprite = nullptr ; <nl> - <nl> - if ( nResType = = 0 ) <nl> - { <nl> - if ( pPath . find ( " . png " ) = = pPath . npos ) <nl> - { <nl> - continue ; <nl> - } <nl> - pSprite = Sprite : : create ( pPath . c_str ( ) ) ; <nl> - } <nl> - else if ( nResType = = 1 ) <nl> - { <nl> - std : : string pngFile = pPlistFile ; <nl> - std : : string : : size_type pos = pngFile . find ( " . plist " ) ; <nl> - if ( pos = = pPath . npos ) <nl> - { <nl> - continue ; <nl> - } <nl> - pngFile . replace ( pos , pngFile . length ( ) , " . png " ) ; <nl> - CCSpriteFrameCache : : getInstance ( ) - > addSpriteFramesWithFile ( pPlistFile . c_str ( ) , pngFile . c_str ( ) ) ; <nl> - pSprite = Sprite : : createWithSpriteFrameName ( pPath . c_str ( ) ) ; <nl> - } <nl> - else <nl> - { <nl> - continue ; <nl> - } <nl> - <nl> - ComRender * pRender = ComRender : : create ( pSprite , " CCSprite " ) ; <nl> - if ( pComName ! = nullptr ) <nl> - { <nl> - pRender - > setName ( pComName ) ; <nl> - } <nl> - <nl> - gb - > addComponent ( pRender ) ; <nl> - if ( _fnSelector ! = nullptr ) <nl> - { <nl> - _fnSelector ( pSprite , ( void * ) ( & subDict ) ) ; <nl> - } <nl> - } <nl> - else if ( comName ! = nullptr & & strcmp ( comName , " CCTMXTiledMap " ) = = 0 ) <nl> - { <nl> - cocos2d : : TMXTiledMap * pTmx = nullptr ; <nl> - if ( nResType = = 0 ) <nl> - { <nl> - if ( pPath . find ( " . tmx " ) = = pPath . npos ) <nl> - { <nl> - continue ; <nl> - } <nl> - pTmx = TMXTiledMap : : create ( pPath . c_str ( ) ) ; <nl> - } <nl> - else <nl> - { <nl> - continue ; <nl> - } <nl> - <nl> - ComRender * pRender = ComRender : : create ( pTmx , " CCTMXTiledMap " ) ; <nl> - if ( pComName ! = nullptr ) <nl> - { <nl> - pRender - > setName ( pComName ) ; <nl> - } <nl> - gb - > addComponent ( pRender ) ; <nl> - if ( _fnSelector ! = nullptr ) <nl> - { <nl> - _fnSelector ( pTmx , ( void * ) ( & subDict ) ) ; <nl> - } <nl> - } <nl> - else if ( comName ! = nullptr & & strcmp ( comName , " CCParticleSystemQuad " ) = = 0 ) <nl> - { <nl> - std : : string : : size_type pos = pPath . find ( " . plist " ) ; <nl> - if ( pos = = pPath . npos ) <nl> - { <nl> - continue ; <nl> - } <nl> - <nl> - cocos2d : : ParticleSystemQuad * pParticle = nullptr ; <nl> - if ( nResType = = 0 ) <nl> - { <nl> - pParticle = ParticleSystemQuad : : create ( pPath . c_str ( ) ) ; <nl> - } <nl> - else <nl> - { <nl> - CCLOG ( " unknown resourcetype on CCParticleSystemQuad ! " ) ; <nl> - } <nl> - <nl> - pParticle - > setPosition ( 0 , 0 ) ; <nl> - ComRender * pRender = ComRender : : create ( pParticle , " CCParticleSystemQuad " ) ; <nl> - if ( pComName ! = nullptr ) <nl> - { <nl> - pRender - > setName ( pComName ) ; <nl> - } <nl> - gb - > addComponent ( pRender ) ; <nl> - if ( _fnSelector ! = nullptr ) <nl> - { <nl> - _fnSelector ( pParticle , ( void * ) ( & subDict ) ) ; <nl> - } <nl> - } <nl> - else if ( comName ! = nullptr & & strcmp ( comName , " CCArmature " ) = = 0 ) <nl> - { <nl> - if ( nResType ! = 0 ) <nl> - { <nl> - continue ; <nl> - } <nl> - std : : string reDir = pPath ; <nl> - std : : string file_path = " " ; <nl> - size_t pos = reDir . find_last_of ( ' / ' ) ; <nl> - if ( pos ! = std : : string : : npos ) <nl> - { <nl> - file_path = reDir . substr ( 0 , pos + 1 ) ; <nl> - } <nl> - <nl> - rapidjson : : Document jsonDict ; <nl> - if ( ! readJson ( pPath . c_str ( ) , jsonDict ) ) <nl> - { <nl> - log ( " read json file [ % s ] error ! \ n " , pPath . c_str ( ) ) ; <nl> - continue ; <nl> - } <nl> - <nl> - const rapidjson : : Value & subData = DICTOOL - > getDictionaryFromArray_json ( jsonDict , " armature_data " , 0 ) ; <nl> - const char * name = DICTOOL - > getStringValue_json ( subData , " name " ) ; <nl> - <nl> - ArmatureDataManager : : getInstance ( ) - > addArmatureFileInfo ( pPath . c_str ( ) ) ; <nl> - <nl> - Armature * pAr = Armature : : create ( name ) ; <nl> - ComRender * pRender = ComRender : : create ( pAr , " CCArmature " ) ; <nl> - if ( pComName ! = nullptr ) <nl> - { <nl> - pRender - > setName ( pComName ) ; <nl> - } <nl> - gb - > addComponent ( pRender ) ; <nl> - <nl> - const char * actionName = DICTOOL - > getStringValue_json ( subDict , " selectedactionname " ) ; <nl> - if ( actionName ! = nullptr & & pAr - > getAnimation ( ) ! = nullptr ) <nl> - { <nl> - pAr - > getAnimation ( ) - > play ( actionName ) ; <nl> - } <nl> - if ( _fnSelector ! = nullptr ) <nl> - { <nl> - _fnSelector ( pAr , ( void * ) ( & subDict ) ) ; <nl> - } <nl> - } <nl> - else if ( comName ! = nullptr & & strcmp ( comName , " CCComAudio " ) = = 0 ) <nl> - { <nl> - ComAudio * pAudio = nullptr ; <nl> - if ( nResType = = 0 ) <nl> - { <nl> - pAudio = ComAudio : : create ( ) ; <nl> - } <nl> - else <nl> - { <nl> - continue ; <nl> - } <nl> - pAudio - > preloadEffect ( pPath . c_str ( ) ) ; <nl> - if ( pComName ! = nullptr ) <nl> - { <nl> - pAudio - > setName ( pComName ) ; <nl> - } <nl> - gb - > addComponent ( pAudio ) ; <nl> - if ( _fnSelector ! = nullptr ) <nl> - { <nl> - _fnSelector ( pAudio , ( void * ) ( & subDict ) ) ; <nl> - } <nl> - } <nl> - else if ( comName ! = nullptr & & strcmp ( comName , " CCComAttribute " ) = = 0 ) <nl> - { <nl> - ComAttribute * pAttribute = nullptr ; <nl> - if ( nResType = = 0 ) <nl> - { <nl> - pAttribute = ComAttribute : : create ( ) ; <nl> - } <nl> - else <nl> - { <nl> - CCLOG ( " unknown resourcetype on CCComAttribute ! " ) ; <nl> - continue ; <nl> - } <nl> - pAttribute - > parse ( pPath ) ; <nl> - gb - > addComponent ( pAttribute ) ; <nl> - if ( _fnSelector ! = nullptr ) <nl> - { <nl> - _fnSelector ( pAttribute , ( void * ) ( & subDict ) ) ; <nl> - } <nl> - } <nl> - else if ( comName ! = nullptr & & strcmp ( comName , " CCBackgroundAudio " ) = = 0 ) <nl> - { <nl> - ComAudio * pAudio = nullptr ; <nl> - if ( nResType = = 0 ) <nl> - { <nl> - pAudio = ComAudio : : create ( ) ; <nl> - } <nl> - else <nl> - { <nl> - continue ; <nl> - } <nl> - pAudio - > preloadBackgroundMusic ( pPath . c_str ( ) ) ; <nl> - pAudio - > setFile ( pPath . c_str ( ) ) ; <nl> - const bool bLoop = ( DICTOOL - > getIntValue_json ( subDict , " loop " ) ! = 0 ) ; <nl> - pAudio - > setLoop ( bLoop ) ; <nl> - if ( pComName ! = nullptr ) <nl> - { <nl> - pAudio - > setName ( pComName ) ; <nl> - } <nl> - gb - > addComponent ( pAudio ) ; <nl> - if ( pComName ! = nullptr ) <nl> - { <nl> - pAudio - > setName ( pComName ) ; <nl> - } <nl> - pAudio - > playBackgroundMusic ( pPath . c_str ( ) , bLoop ) ; <nl> - } <nl> - else if ( comName ! = nullptr & & strcmp ( comName , " GUIComponent " ) = = 0 ) <nl> - { <nl> - Widget * widget = GUIReader : : getInstance ( ) - > widgetFromJsonFile ( pPath . c_str ( ) ) ; <nl> - ComRender * pRender = ComRender : : create ( widget , " GUIComponent " ) ; <nl> - if ( pComName ! = nullptr ) <nl> - { <nl> - pRender - > setName ( pComName ) ; <nl> - } <nl> - gb - > addComponent ( pRender ) ; <nl> - if ( _fnSelector ! = nullptr ) <nl> - { <nl> - _fnSelector ( widget , ( void * ) ( & subDict ) ) ; <nl> - } <nl> + _fnSelector ( com , ( void * ) ( & subDict ) ) ; <nl> } <nl> } <nl> <nl> mmm a / cocos / editor - support / cocostudio / ObjectFactory . cpp <nl> ppp b / cocos / editor - support / cocostudio / ObjectFactory . cpp <nl> void ObjectFactory : : destroyInstance ( ) <nl> CC_SAFE_DELETE ( _sharedFactory ) ; <nl> } <nl> <nl> - Object * ObjectFactory : : createObject ( const char * name ) <nl> + Object * ObjectFactory : : createObject ( const std : : string & name ) <nl> { <nl> Object * o = nullptr ; <nl> do <nl> Object * ObjectFactory : : createObject ( const char * name ) <nl> return o ; <nl> } <nl> <nl> + Component * ObjectFactory : : createComponent ( std : : string name ) <nl> + { <nl> + if ( name = = " CCSprite " | | name = = " CCTMXTiledMap " | | name = = " CCParticleSystemQuad " | | name = = " CCArmature " | | name = = " GUIComponent " ) <nl> + { <nl> + name = " ComRender " ; <nl> + } <nl> + else if ( name = = " CCComAudio " | | name = = " CCBackgroundAudio " ) <nl> + { <nl> + name = " ComAudio " ; <nl> + } <nl> + else if ( name = = " CCComController " ) <nl> + { <nl> + name = " ComController " ; <nl> + } <nl> + else if ( name = = " CCComAttribute " ) <nl> + { <nl> + name = " ComAttribute " ; <nl> + } <nl> + else if ( name = = " CCScene " ) <nl> + { <nl> + name = " CCScene " ; <nl> + } <nl> + else <nl> + { <nl> + CCASSERT ( false , " Unregistered Component ! " ) ; <nl> + } <nl> + Object * o = NULL ; <nl> + do <nl> + { <nl> + const TInfo t = _typeMap [ name ] ; <nl> + CC_BREAK_IF ( t . _fun = = NULL ) ; <nl> + o = t . _fun ( ) ; <nl> + } while ( 0 ) ; <nl> + <nl> + return ( Component * ) o ; <nl> + <nl> + } <nl> + <nl> void ObjectFactory : : registerType ( const TInfo & t ) <nl> { <nl> _typeMap . insert ( std : : make_pair ( t . _class , t ) ) ; <nl> mmm a / cocos / editor - support / cocostudio / ObjectFactory . h <nl> ppp b / cocos / editor - support / cocostudio / ObjectFactory . h <nl> THE SOFTWARE . <nl> # define __TRIGGERFACTORY_H__ <nl> <nl> # include " cocos2d . h " <nl> - # include " CocoStudio . h " <nl> # include < string > <nl> # include < unordered_map > <nl> <nl> class ObjectFactory <nl> <nl> static ObjectFactory * getInstance ( ) ; <nl> static void destroyInstance ( ) ; <nl> - cocos2d : : Object * createObject ( const char * name ) ; <nl> + cocos2d : : Object * createObject ( const std : : string & name ) ; <nl> + cocos2d : : Component * createComponent ( std : : string name ) ; <nl> void registerType ( const TInfo & t ) ; <nl> void removeAll ( ) ; <nl> <nl> mmm a / cocos / editor - support / cocostudio / TriggerBase . h <nl> ppp b / cocos / editor - support / cocostudio / TriggerBase . h <nl> THE SOFTWARE . <nl> <nl> # include " cocos2d . h " <nl> # include " cocostudio / CocoStudio . h " <nl> - # include " TriggerObj . h " <nl> # include " ObjectFactory . h " <nl> + # include " TriggerObj . h " <nl> # include " TriggerMng . h " <nl> <nl> <nl> mmm a / samples / Cpp / TestCpp / Classes / ExtensionsTest / CocoStudioSceneTest / SceneEditorTest . cpp <nl> ppp b / samples / Cpp / TestCpp / Classes / ExtensionsTest / CocoStudioSceneTest / SceneEditorTest . cpp <nl> void UIComponentTest : : touchEvent ( Object * pSender , TouchEventType type ) <nl> { <nl> case TOUCH_EVENT_BEGAN : <nl> { <nl> - ComRender * pBlowFish = static_cast < ComRender * > ( _node - > getChildByTag ( 10010 ) - > getComponent ( " Armature " ) ) ; <nl> + ComRender * pBlowFish = static_cast < ComRender * > ( _node - > getChildByTag ( 10010 ) - > getComponent ( " CCArmature " ) ) ; <nl> pBlowFish - > getNode ( ) - > runAction ( CCMoveBy : : create ( 10 . 0f , Point ( - 1000 . 0f , 0 ) ) ) ; <nl> <nl> - ComRender * pButterflyfish = static_cast < ComRender * > ( _node - > getChildByTag ( 10011 ) - > getComponent ( " Armature " ) ) ; <nl> + ComRender * pButterflyfish = static_cast < ComRender * > ( _node - > getChildByTag ( 10011 ) - > getComponent ( " CCArmature " ) ) ; <nl> pButterflyfish - > getNode ( ) - > runAction ( CCMoveBy : : create ( 10 . 0f , Point ( - 1000 . 0f , 0 ) ) ) ; <nl> } <nl> break ; <nl>
fixed
cocos2d/cocos2d-x
bac600ab9f328e616b14135a5cfba4b77fe5ee15
2014-01-04T18:22:32Z
mmm a / src / compiler / pipeline . cc <nl> ppp b / src / compiler / pipeline . cc <nl> PipelineCompilationJob : : Status PipelineCompilationJob : : PrepareJobImpl ( ) { <nl> } <nl> if ( ! info ( ) - > shared_info ( ) - > asm_function ( ) | | FLAG_turbo_asm_deoptimization ) { <nl> info ( ) - > MarkAsDeoptimizationEnabled ( ) ; <nl> - } <nl> - if ( ! info ( ) - > is_optimizing_from_bytecode ( ) ) { <nl> if ( FLAG_inline_accessors ) { <nl> info ( ) - > MarkAsAccessorInliningEnabled ( ) ; <nl> } <nl> + } <nl> + if ( ! info ( ) - > is_optimizing_from_bytecode ( ) ) { <nl> if ( info ( ) - > is_deoptimization_enabled ( ) & & FLAG_turbo_type_feedback ) { <nl> info ( ) - > MarkAsTypeFeedbackEnabled ( ) ; <nl> } <nl> mmm a / test / cctest / cctest . status <nl> ppp b / test / cctest / cctest . status <nl> <nl> ' test - cpu - profiler / DeoptAtFirstLevelInlinedSource ' : [ FAIL ] , <nl> ' test - cpu - profiler / DeoptAtSecondLevelInlinedSource ' : [ FAIL ] , <nl> <nl> + # TODO ( vogelheim , 5548 ) : Turbofan does support cached accessors . <nl> + ' test - api - accessors / CachedAccessorCrankshaft ' : [ FAIL ] , <nl> + <nl> # BUG ( 5193 ) : Flaky . <nl> ' test - cpu - profiler / FunctionApplySample ' : [ PASS , [ ' system = = windows ' , SKIP ] ] , <nl> } ] , # variant = = turbofan or variant = = ignition_turbofan <nl>
[ turbofan ] Enable accessor inlining when compiling from bytecode .
v8/v8
12af4128d17015ff2241a9cd654134805634b8b4
2016-11-09T14:42:57Z
mmm a / js / common / modules / org / arangodb / general - graph . js <nl> ppp b / js / common / modules / org / arangodb / general - graph . js <nl> var checkIfMayBeDropped = function ( colName , graphName , graphs ) { <nl> var from = edgeDefinition . from ; <nl> var to = edgeDefinition . to ; <nl> var collection = edgeDefinition . collection ; <nl> - if ( collection = = = colName | | from . indexOf ( colName ) ! = = - 1 | | to . indexOf ( colName ) ! = = - 1 ) { <nl> + if ( collection = = = colName <nl> + | | from . indexOf ( colName ) ! = = - 1 <nl> + | | to . indexOf ( colName ) ! = = - 1 <nl> + ) { <nl> result = false ; <nl> } <nl> } <nl>
jslint fix
arangodb/arangodb
c63c8c465e38e51cb3b6048e8ad9df65f3272046
2014-05-22T10:34:08Z
mmm a / Marlin / gcode . h <nl> ppp b / Marlin / gcode . h <nl> <nl> / / # define DEBUG_GCODE_PARSER <nl> <nl> # if ENABLED ( DEBUG_GCODE_PARSER ) <nl> - # include " hex_print_routines . h " <nl> + # if ENABLED ( AUTO_BED_LEVELING_UBL ) <nl> + extern char * hex_address ( const void * const w ) ; <nl> + # else <nl> + # include " hex_print_routines . h " <nl> + # endif <nl> # include " serial . h " <nl> # endif <nl> <nl> class GCodeParser { <nl> const uint8_t ind = LETTER_OFF ( c ) ; <nl> if ( ind > = COUNT ( param ) ) return false ; / / Only A - Z <nl> const bool b = TEST ( codebits [ PARAM_IND ( ind ) ] , PARAM_BIT ( ind ) ) ; <nl> - if ( b ) value_ptr = command_ptr + param [ ind ] ; <nl> + if ( b ) value_ptr = param [ ind ] ? command_ptr + param [ ind ] : ( char * ) NULL ; <nl> return b ; <nl> } <nl> <nl>
correct value_bool ( ) when FASTER_GCODE_PARSER is enabled
MarlinFirmware/Marlin
bfbf5f820063a4cb64533036d1bca61873187c35
2017-06-29T00:51:08Z
mmm a / table / iterator . cc <nl> ppp b / table / iterator . cc <nl> class EmptyInternalIterator : public InternalIteratorBase < TValue > { <nl> } ; <nl> } / / namespace <nl> <nl> + Iterator * NewEmptyIterator ( ) { return new EmptyIterator ( Status : : OK ( ) ) ; } <nl> + <nl> Iterator * NewErrorIterator ( const Status & status ) { <nl> return new EmptyIterator ( status ) ; <nl> } <nl>
Add back NewEmptyIterator ( )
facebook/rocksdb
d9280ff2d289c3ef371f25d96cbcd988eb0a7678
2019-04-17T03:28:05Z
mmm a / arangod / VocBase / collection . cpp <nl> ppp b / arangod / VocBase / collection . cpp <nl> TRI_collection_t * TRI_OpenCollection ( TRI_vocbase_t * vocbase , <nl> <nl> TRI_FreeCollectionInfoOptions ( & info ) ; <nl> <nl> + double start = TRI_microtime ( ) ; <nl> + <nl> + LOG_ACTION ( " open - collection { collection : % s / % s } " , <nl> + vocbase - > _name , <nl> + collection - > _info . _name ) ; <nl> + <nl> / / check for journals and datafiles <nl> bool ok = CheckCollection ( collection , ignoreErrors ) ; <nl> <nl> TRI_collection_t * TRI_OpenCollection ( TRI_vocbase_t * vocbase , <nl> <nl> return nullptr ; <nl> } <nl> + <nl> + LOG_TIMER ( ( TRI_microtime ( ) - start ) , <nl> + " open - collection { collection : % s / % s } " , <nl> + vocbase - > _name , <nl> + collection - > _info . _name ) ; <nl> <nl> return collection ; <nl> } <nl>
separate measure for CheckCollection
arangodb/arangodb
9d048fd1e04f8bbc4b114ce2a476b93a403cb964
2015-07-16T10:59:59Z
mmm a / tensorflow / contrib / makefile / Makefile <nl> ppp b / tensorflow / contrib / makefile / Makefile <nl> ifdef HEXAGON_LIBS <nl> endif <nl> endif # HEXAGON_LIBS <nl> <nl> + # If ANDROID_TYPES is not set assume __ANDROID_TYPES_SLIM__ <nl> + ifeq ( $ ( ANDROID_TYPES ) , ) <nl> + ANDROID_TYPES : = - D__ANDROID_TYPES_SLIM__ <nl> + endif <nl> + <nl> # Try to figure out the host system <nl> HOST_OS : = <nl> ifeq ( $ ( OS ) , Windows_NT ) <nl> ifeq ( $ ( TARGET ) , LINUX ) <nl> endif <nl> # If we ' re cross - compiling for the Raspberry Pi , use the right gcc . <nl> ifeq ( $ ( TARGET ) , PI ) <nl> - CXXFLAGS + = - D__ANDROID_TYPES_SLIM__ - DRASPBERRY_PI <nl> + CXXFLAGS + = $ ( ANDROID_TYPES ) - DRASPBERRY_PI <nl> LDFLAGS : = - Wl , - - no - whole - archive <nl> LIBS + = - ldl - lpthread <nl> LIBFLAGS + = - Wl , - - allow - multiple - definition - Wl , - - whole - archive <nl> ifeq ( $ ( TARGET ) , IOS ) <nl> - Wno - c + + 11 - narrowing \ <nl> - mno - thumb \ <nl> - DTF_LEAN_BINARY \ <nl> - - D__ANDROID_TYPES_SLIM__ \ <nl> + $ ( ANDROID_TYPES ) \ <nl> - fno - exceptions \ <nl> - isysroot \ <nl> $ { IPHONEOS_SYSROOT } <nl> ifeq ( $ ( TARGET ) , IOS ) <nl> - Wno - c + + 11 - narrowing \ <nl> - mno - thumb \ <nl> - DTF_LEAN_BINARY \ <nl> - - D__ANDROID_TYPES_SLIM__ \ <nl> + $ ( ANDROID_TYPES ) \ <nl> - fno - exceptions \ <nl> - isysroot \ <nl> $ { IPHONEOS_SYSROOT } <nl> ifeq ( $ ( TARGET ) , IOS ) <nl> - DUSE_GEMM_FOR_CONV \ <nl> - Wno - c + + 11 - narrowing \ <nl> - DTF_LEAN_BINARY \ <nl> - - D__ANDROID_TYPES_SLIM__ \ <nl> + $ ( ANDROID_TYPES ) \ <nl> - fno - exceptions \ <nl> - isysroot \ <nl> $ { IPHONEOS_SYSROOT } <nl> ifeq ( $ ( TARGET ) , IOS ) <nl> - DUSE_GEMM_FOR_CONV \ <nl> - Wno - c + + 11 - narrowing \ <nl> - DTF_LEAN_BINARY \ <nl> - - D__ANDROID_TYPES_SLIM__ \ <nl> + $ ( ANDROID_TYPES ) \ <nl> - fno - exceptions \ <nl> - isysroot \ <nl> $ { IPHONESIMULATOR_SYSROOT } <nl> ifeq ( $ ( TARGET ) , IOS ) <nl> - DUSE_GEMM_FOR_CONV \ <nl> - Wno - c + + 11 - narrowing \ <nl> - DTF_LEAN_BINARY \ <nl> - - D__ANDROID_TYPES_SLIM__ \ <nl> + $ ( ANDROID_TYPES ) \ <nl> - fno - exceptions \ <nl> - isysroot \ <nl> $ { IPHONESIMULATOR_SYSROOT } <nl>
iOS / RPi Add the ability to choose ANDROID_TYPES_FULL
tensorflow/tensorflow
029ce4ed70a1d8644a8e1c11345bcae2416a2a6d
2017-10-26T22:37:20Z
mmm a / ChangeLog <nl> ppp b / ChangeLog <nl> <nl> 2007 - 06 - 04 Tatsuhiro Tsujikawa < tujikawa at rednoah dot com > <nl> - <nl> + <nl> Accept incoming connections if download rate is low . <nl> * src / PeerListenCommand . h , src / PeerListenCommand . cc : <nl> ( _lowestSpeedLimit ) : New variable . <nl> <nl> ( execute ) : Accept incoming connections if download rate is low . <nl> MAX_PEERS is ignored in this case . <nl> <nl> + Disable PREF_OUT in multiple concurrent download : <nl> + * src / RequestGroup . h , src / RequestGroup . cc <nl> + ( setUserDefinedFilename ) : New function . <nl> + * src / DownloadEngineFactory . cc <nl> + ( newConsoleEngine ) : Do not set PREF_OUT to requestGroup in multiple <nl> + concurrent download . <nl> + * src / DefaultSegmentManFactory . cc <nl> + ( createNewInstance ) : Comment out the line : segmentMan - > ufilename = . . . <nl> + <nl> 2007 - 06 - 03 Tatsuhiro Tsujikawa < tujikawa at rednoah dot com > <nl> <nl> RequestGroup : : getNextCommand ( ) was renamed to createNextCommand ( ) . <nl> mmm a / TODO <nl> ppp b / TODO <nl> <nl> 100K / 300M ( 10 % ) ( 3cn ) ( 3more ) 100KB / s [ FileAlloc : 35MB / 40MB ( 90 % ) ] [ Checksum : 10MB / 20MB ( 50 % ) ] <nl> * exit status : all downloads have been successful - > EXIT_SUCCESS , <nl> some of downloads have been failed - > EXIT_FAILURE <nl> - * Do not use ufilename in multi - simultaneous download mode . <nl> * Create download command directly when 1connection download . <nl> Consider timeout when file allocation / check integrity is enabled . <nl> - * Accept incoming connections if download rate is low . <nl> mmm a / src / DefaultSegmentManFactory . cc <nl> ppp b / src / DefaultSegmentManFactory . cc <nl> SegmentManHandle DefaultSegmentManFactory : : createNewInstance ( ) <nl> segmentMan - > diskWriter = new DefaultDiskWriter ( ) ; <nl> segmentMan - > dir = _option - > get ( PREF_DIR ) ; <nl> / / TODO disable this in multi - simultaneous download mode . <nl> - segmentMan - > ufilename = _option - > get ( PREF_OUT ) ; <nl> + / / segmentMan - > ufilename = _option - > get ( PREF_OUT ) ; <nl> segmentMan - > option = _option ; <nl> return segmentMan ; <nl> } <nl> mmm a / src / DownloadEngineFactory . cc <nl> ppp b / src / DownloadEngineFactory . cc <nl> ConsoleDownloadEngine * <nl> DownloadEngineFactory : : newConsoleEngine ( const Option * op , <nl> const RequestGroups & requestGroups ) <nl> { <nl> + / / set PREF_OUT parameter to requestGroup in non - multi download mode . <nl> + if ( requestGroups . size ( ) = = 1 ) { <nl> + requestGroups . front ( ) - > setUserDefinedFilename ( op - > get ( PREF_OUT ) ) ; <nl> + } <nl> RequestGroups workingSet ; <nl> RequestGroups reservedSet ; <nl> if ( op - > getAsInt ( PREF_MAX_SIMULTANEOUS_DOWNLOADS ) < ( int32_t ) requestGroups . size ( ) ) { <nl> mmm a / src / RequestGroup . cc <nl> ppp b / src / RequestGroup . cc <nl> <nl> SegmentManHandle RequestGroup : : initSegmentMan ( ) <nl> { <nl> _segmentMan = _segmentManFactory - > createNewInstance ( ) ; <nl> - / * <nl> - _segmentMan = new SegmentMan ( ) ; <nl> - _segmentMan - > diskWriter = new DefaultDiskWriter ( ) ; / / DefaultDiskWriter : : createNewDiskWriter ( _option ) ; <nl> - _segmentMan - > dir = _option - > get ( PREF_DIR ) ; <nl> - / / TODO disable this in multi - simultaneous download mode . <nl> - _segmentMan - > ufilename = _option - > get ( PREF_OUT ) ; <nl> - _segmentMan - > option = _option ; <nl> - * / <nl> return _segmentMan ; <nl> } <nl> <nl> void RequestGroup : : validateTotalLengthByHint ( int64_t actualTotalLength ) const <nl> { <nl> validateTotalLength ( _hintTotalLength , actualTotalLength ) ; <nl> } <nl> + <nl> + void RequestGroup : : setUserDefinedFilename ( const string & filename ) <nl> + { <nl> + if ( _segmentMan . isNull ( ) ) { <nl> + throw new FatalException ( " SegmentMan is not initialized yet . Call initSegmentMan ( ) before calling this function . " ) ; <nl> + } <nl> + _segmentMan - > ufilename = filename ; <nl> + } <nl> mmm a / src / RequestGroup . h <nl> ppp b / src / RequestGroup . h <nl> class RequestGroup { <nl> { <nl> _numConcurrentCommand = num ; <nl> } <nl> + <nl> + void setUserDefinedFilename ( const string & filename ) ; <nl> } ; <nl> <nl> typedef SharedHandle < RequestGroup > RequestGroupHandle ; <nl>
2007 - 06 - 04 Tatsuhiro Tsujikawa < tujikawa at rednoah dot com >
aria2/aria2
3dd06aacfbd6c8a1a027532e34dd21c04731b309
2007-06-04T12:52:57Z
mmm a / test / attr / attr_autoclosure . swift <nl> ppp b / test / attr / attr_autoclosure . swift <nl> func passNonThrowingToThrowingAC ( _ fn : @ autoclosure ( ) - > Int ) { <nl> func passThrowingToThrowingAC ( _ fn : @ autoclosure ( ) throws - > Int ) { <nl> takesThrowingAutoclosure ( fn ) <nl> } <nl> + <nl> + / / rdar : / / problem / 20591571 - Various type inference problems with @ autoclosure <nl> + func rdar_20591571 ( ) { <nl> + func foo ( _ g : @ autoclosure ( ) - > Int ) { <nl> + typealias G = ( ) - > Int <nl> + let _ = unsafeBitCast ( g , to : G . self ) / / expected - error { { converting non - escaping value to ' T ' may allow it to escape } } <nl> + } <nl> + <nl> + func id < T > ( _ : T ) - > T { } <nl> + func same < T > ( _ : T , _ : T ) { } <nl> + <nl> + func takesAnAutoclosure ( _ fn : @ autoclosure ( ) - > Int , _ efn : @ escaping @ autoclosure ( ) - > Int ) { <nl> + / / expected - note @ - 1 2 { { parameter ' fn ' is implicitly non - escaping } } <nl> + <nl> + var _ = fn / / expected - error { { non - escaping parameter ' fn ' may only be called } } <nl> + let _ = fn / / expected - error { { non - escaping parameter ' fn ' may only be called } } <nl> + <nl> + var _ = efn <nl> + let _ = efn <nl> + <nl> + _ = id ( fn ) / / expected - error { { converting non - escaping value to ' T ' may allow it to escape } } <nl> + _ = same ( fn , { 3 } ) / / expected - error { { converting non - escaping value to ' T ' may allow it to escape } } <nl> + _ = same ( { 3 } , fn ) / / expected - error { { converting non - escaping value to ' T ' may allow it to escape } } <nl> + <nl> + withoutActuallyEscaping ( fn ) { _ in } / / Ok <nl> + withoutActuallyEscaping ( fn ) { ( _ : ( ) - > Int ) in } / / Ok <nl> + } <nl> + } <nl> + <nl> + / / rdar : / / problem / 30906031 - [ SR - 4188 ] : withoutActuallyEscaping doesn ' t accept an @ autoclosure argument <nl> + func rdar_30906031 ( in arr : [ Int ] , fn : @ autoclosure ( ) - > Int ) - > Bool { <nl> + return withoutActuallyEscaping ( fn ) { escapableF in / / Ok <nl> + arr . lazy . filter { $ 0 > = escapableF ( ) } . isEmpty <nl> + } <nl> + } <nl>
Merge remote - tracking branch ' origin / master ' into master - next
apple/swift
90f90c9ac771fef4e7687daab07942dc82c141bb
2018-11-11T09:09:27Z
mmm a / cmake / tests . cmake <nl> ppp b / cmake / tests . cmake <nl> endif ( ) <nl> <nl> if ( MINGW ) <nl> set_source_files_properties ( $ { tests_files } PROPERTIES COMPILE_FLAGS " - Wno - narrowing " ) <nl> + <nl> + # required for tests on MinGW Win64 <nl> + if ( CMAKE_SIZEOF_VOID_P EQUAL 8 ) <nl> + set ( CMAKE_EXE_LINKER_FLAGS " $ { CMAKE_EXE_LINKER_FLAGS } - Wl , - - stack , 16777216 " ) <nl> + set ( CMAKE_CXX_FLAGS " $ { CMAKE_CXX_FLAGS } - Wa , - mbig - obj " ) <nl> + endif ( ) <nl> + <nl> endif ( ) <nl> <nl> add_executable ( tests $ { tests_files } $ { common_test_files } $ { tests_proto_files } $ { lite_test_proto_files } ) <nl>
tests : fix link failure and stack overflow on Mingw w64
protocolbuffers/protobuf
a69dfe63bc26c12fd2786aec9239076997110315
2018-07-31T04:55:44Z
mmm a / scripts / test . sh <nl> ppp b / scripts / test . sh <nl> function testFontSpacing ( ) { <nl> echo " < html > < body style = \ " font - size : 7pt ; font - family : Arial ; \ " > vii vaa vuu vvv vee < / body > < / html > " > tmp . html <nl> wk tmp . html tmp . pdf 2 > $ LEVEL2 > $ LEVEL1 <nl> ( [ - f tmp . pdf ] & & <nl> - pdftotext tmp . pdf / dev / stdout | grep - q " vii vaa vuu vvv vee " ) & & good $ 1 | | bad $ 1 <nl> + pdftotext tmp . pdf / dev / stdout | grep - q " vii vaa vuu vvv vee " ) & & good $ 1 | | bad $ 1 false <nl> <nl> } <nl> <nl>
Issue 72 is not yet fixed when not using X11
wkhtmltopdf/wkhtmltopdf
13252dcdedc4af31e6f38bda4844c7d5aa6bc4bd
2010-06-26T18:39:06Z
mmm a / src / mongo / db / repl / repl_set_config . cpp <nl> ppp b / src / mongo / db / repl / repl_set_config . cpp <nl> Status ReplSetConfig : : validate ( ) const { <nl> <nl> auto extractHorizonMembers = [ ] ( const auto & replMember ) { <nl> std : : vector < std : : string > rv ; <nl> - std : : transform ( begin ( replMember . getHorizonMappings ( ) ) , <nl> - end ( replMember . getHorizonMappings ( ) ) , <nl> + std : : transform ( replMember . getHorizonMappings ( ) . begin ( ) , <nl> + replMember . getHorizonMappings ( ) . end ( ) , <nl> back_inserter ( rv ) , <nl> [ ] ( auto & & mapping ) { return mapping . first ; } ) ; <nl> std : : sort ( begin ( rv ) , end ( rv ) ) ; <nl> mmm a / src / mongo / db / repl / split_horizon . h <nl> ppp b / src / mongo / db / repl / split_horizon . h <nl> class SplitHorizon { <nl> invariant ( ! _forwardMapping . empty ( ) ) ; <nl> invariant ( ! horizon . empty ( ) ) ; <nl> auto found = _forwardMapping . find ( horizon ) ; <nl> - if ( found = = end ( _forwardMapping ) ) <nl> + if ( found = = _forwardMapping . end ( ) ) <nl> uasserted ( ErrorCodes : : NoSuchKey , str : : stream ( ) < < " No horizon named " < < horizon ) ; <nl> return found - > second ; <nl> } <nl>
SERVER - 43048 repl_set_config ADL
mongodb/mongo
ec82ec3e9f613ee65dd15d0563c109f5e6efcf15
2019-08-27T14:29:12Z
mmm a / Documentation / Books / Manual / SUMMARY . md <nl> ppp b / Documentation / Books / Manual / SUMMARY . md <nl> <nl> * [ Coming from SQL ] ( GettingStarted / ComingFromSql . md ) <nl> # * [ Coming from MongoDB ] ( GettingStarted / ComingFromMongoDb . md ) # TODO <nl> # <nl> - * [ StorageEngines ] ( StorageEngines / README . md ) <nl> + * [ Storage Engines ] ( StorageEngines / README . md ) <nl> # <nl> * [ Scalability ] ( Scalability / README . md ) <nl> * [ Architecture ] ( Scalability / Architecture . md ) <nl>
fixed typo
arangodb/arangodb
a9bad6de19b561ae8b21c0374db1319327dc03b4
2017-05-14T19:23:43Z
mmm a / emscripten . py <nl> ppp b / emscripten . py <nl> def compile_malloc ( ) : <nl> " " " <nl> src = path_from_root ( ' src ' , ' dlmalloc . c ' ) <nl> includes = ' - I ' + path_from_root ( ' src ' , ' include ' ) <nl> - command = [ shared . CLANG , ' - c ' , ' - g ' , ' - emit - llvm ' , ' - m32 ' , ' - o - ' , includes , src ] <nl> + command = [ shared . CLANG , ' - c ' , ' - g ' , ' - emit - llvm ' , ' - m32 ' ] + shared . COMPILER_OPTS + [ ' - o - ' , includes , src ] <nl> with get_temp_file ( ' . bc ' ) as out : ret = subprocess . call ( command , stdout = out ) <nl> if ret ! = 0 : raise RuntimeError ( ' Could not compile dlmalloc . ' ) <nl> return out . name <nl> mmm a / settings . py <nl> ppp b / settings . py <nl> <nl> <nl> LLVM_ROOT = os . path . expanduser ( ' ~ / Dev / llvm - 2 . 9 / cbuild / bin ' ) <nl> <nl> - COMPILER_OPTS = [ ' - m32 ' ] # Need to build as 32bit arch , for now - <nl> - # various errors on 64bit compilation <nl> - # WARNING : ' - g ' here will generate llvm bitcode that lli will crash on ! <nl> + COMPILER_OPTS = [ ] <nl> <nl> SPIDERMONKEY_ENGINE = [ os . path . expanduser ( ' ~ / Dev / mozilla - central / js / src / js ' ) , ' - m ' , ' - n ' ] <nl> V8_ENGINE = [ os . path . expanduser ( ' ~ / Dev / v8 / d8 ' ) ] <nl> mmm a / tests / runner . py <nl> ppp b / tests / runner . py <nl> def build ( self , src , dirname , filename , output_processor = None , main_file = None , a <nl> os . remove ( f + ' . o . ll ' ) <nl> except : <nl> pass <nl> - output = Popen ( [ COMPILER , ' - DEMSCRIPTEN ' , ' - emit - llvm ' ] + COMPILER_OPTS + COMPILER_TEST_OPTS + <nl> + output = Popen ( [ COMPILER , ' - DEMSCRIPTEN ' , ' - emit - llvm ' , ' - m32 ' ] + COMPILER_OPTS + COMPILER_TEST_OPTS + <nl> [ ' - I ' , dirname , ' - I ' , os . path . join ( dirname , ' include ' ) ] + <nl> map ( lambda include : ' - I ' + include , includes ) + <nl> [ ' - c ' , f , ' - o ' , f + ' . o ' ] , <nl>
harmonize use of COMPILER_ARGS
emscripten-core/emscripten
8643df88d3b7484156caa9ab9378affbfeb895dc
2011-09-24T00:23:22Z
mmm a / tensorflow / BUILD <nl> ppp b / tensorflow / BUILD <nl> filegroup ( <nl> " / / tensorflow / contrib / tensor_forest / kernels / v4 : all_files " , <nl> " / / tensorflow / contrib / tensor_forest / proto : all_files " , <nl> " / / tensorflow / contrib / tensorboard : all_files " , <nl> + " / / tensorflow / contrib / tensorboard / db : all_files " , <nl> " / / tensorflow / contrib / testing : all_files " , <nl> " / / tensorflow / contrib / text : all_files " , <nl> " / / tensorflow / contrib / tfprof : all_files " , <nl> mmm a / tensorflow / compiler / jit / mark_for_compilation_pass_test . cc <nl> ppp b / tensorflow / compiler / jit / mark_for_compilation_pass_test . cc <nl> TEST ( XlaCompilationTest , UnsupportedTypes ) { <nl> Node * a = ops : : SourceOp ( <nl> " Const " , builder . opts ( ) <nl> . WithName ( " A " ) <nl> - . WithAttr ( " dtype " , DT_COMPLEX64 ) <nl> - . WithAttr ( " value " , Tensor ( DT_COMPLEX64 , TensorShape ( ) ) ) ) ; <nl> + . WithAttr ( " dtype " , DT_COMPLEX128 ) <nl> + . WithAttr ( " value " , Tensor ( DT_COMPLEX128 , TensorShape ( ) ) ) ) ; <nl> Node * b = ops : : UnaryOp ( " Neg " , a , builder . opts ( ) . WithName ( " B " ) ) ; <nl> ops : : BinaryOp ( " MatMul " , a , b , builder . opts ( ) . WithName ( " C " ) ) ; <nl> TF_EXPECT_OK ( builder . ToGraph ( graph . get ( ) ) ) ; <nl> mmm a / tensorflow / compiler / jit / xla_cpu_device . cc <nl> ppp b / tensorflow / compiler / jit / xla_cpu_device . cc <nl> REGISTER_LOCAL_DEVICE_FACTORY ( DEVICE_XLA_CPU , XlaCpuDeviceFactory ) ; <nl> <nl> / / Kernel registrations <nl> <nl> - constexpr std : : array < DataType , 5 > kAllXlaCpuTypes = { <nl> - { DT_INT32 , DT_INT64 , DT_FLOAT , DT_DOUBLE , DT_BOOL } } ; <nl> + constexpr std : : array < DataType , 6 > kAllXlaCpuTypes = { <nl> + { DT_INT32 , DT_INT64 , DT_FLOAT , DT_DOUBLE , DT_COMPLEX64 , DT_BOOL } } ; <nl> <nl> REGISTER_XLA_LAUNCH_KERNEL ( DEVICE_XLA_CPU , XlaLocalLaunchOp , kAllXlaCpuTypes ) ; <nl> REGISTER_XLA_DEVICE_KERNELS ( DEVICE_XLA_CPU , kAllXlaCpuTypes ) ; <nl> mmm a / tensorflow / compiler / jit / xla_gpu_device . cc <nl> ppp b / tensorflow / compiler / jit / xla_gpu_device . cc <nl> REGISTER_LOCAL_DEVICE_FACTORY ( DEVICE_XLA_GPU , XlaGpuDeviceFactory ) ; <nl> <nl> / / Kernel registrations <nl> <nl> - constexpr std : : array < DataType , 5 > kAllXlaGpuTypes = { <nl> - { DT_INT32 , DT_INT64 , DT_FLOAT , DT_DOUBLE , DT_BOOL } } ; <nl> + constexpr std : : array < DataType , 6 > kAllXlaGpuTypes = { <nl> + { DT_INT32 , DT_INT64 , DT_FLOAT , DT_DOUBLE , DT_COMPLEX64 , DT_BOOL } } ; <nl> <nl> REGISTER_XLA_LAUNCH_KERNEL ( DEVICE_XLA_GPU , XlaLocalLaunchOp , kAllXlaGpuTypes ) ; <nl> REGISTER_XLA_DEVICE_KERNELS ( DEVICE_XLA_GPU , kAllXlaGpuTypes ) ; <nl> mmm a / tensorflow / compiler / tests / BUILD <nl> ppp b / tensorflow / compiler / tests / BUILD <nl> load ( " / / tensorflow : tensorflow . bzl " , " cuda_py_test " ) <nl> load ( " / / tensorflow / compiler / aot : tfcompile . bzl " , " tf_library " ) <nl> load ( " / / tensorflow / compiler / tests : build_defs . bzl " , " tf_xla_py_test " ) <nl> load ( " / / tensorflow / compiler / tests : build_defs . bzl " , " generate_backend_suites " ) <nl> + load ( <nl> + " / / tensorflow / core : platform / default / build_config_root . bzl " , <nl> + " tf_cuda_tests_tags " , <nl> + ) <nl> <nl> generate_backend_suites ( ) <nl> <nl> cc_library ( <nl> <nl> tf_cuda_cc_test ( <nl> name = " randomized_tests " , <nl> + size = " large " , <nl> # This test is randomized , so only run it if explicitly requested . <nl> tags = [ <nl> " manual " , <nl> " notap " , <nl> - ] , <nl> + ] + tf_cuda_tests_tags ( ) , <nl> deps = [ " : randomized_tests_library " ] , <nl> ) <nl> <nl> mmm a / tensorflow / compiler / tests / argminmax_test . py <nl> ppp b / tensorflow / compiler / tests / argminmax_test . py <nl> def _assertOpOutputMatchesExpected ( self , op , inp , expected ) : <nl> self . assertAllEqual ( result , expected ) <nl> <nl> def testArgMinMax ( self ) : <nl> - for dtype in self . numeric_types : <nl> + # Complex numbers do not support argmin / argmax . <nl> + minmax_types = set ( self . numeric_types ) - set ( self . complex_types ) <nl> + for dtype in minmax_types : <nl> self . _assertOpOutputMatchesExpected ( <nl> lambda x : math_ops . argmax ( x , axis = 0 , output_type = dtypes . int32 ) , <nl> np . array ( [ 1 , 10 , 27 , 3 , 3 , 4 ] , dtype = dtype ) , <nl> mmm a / tensorflow / compiler / tests / binary_ops_test . py <nl> ppp b / tensorflow / compiler / tests / binary_ops_test . py <nl> def testFloatOps ( self ) : <nl> dtype ( 4 ) , <nl> expected = np . array ( [ [ 16 ] , [ 81 ] ] , dtype = dtype ) ) <nl> <nl> + atan2_supported = self . device = = " XLA_GPU " <nl> + if atan2_supported : <nl> + self . _testBinary ( <nl> + math_ops . atan2 , <nl> + np . array ( [ 0 , np . sqrt ( 2 ) , 1 , np . sqrt ( 2 ) , 0 ] , dtype ) , <nl> + np . array ( [ 1 , np . sqrt ( 2 ) , 0 , - np . sqrt ( 2 ) , - 1 ] , dtype ) , <nl> + expected = np . array ( <nl> + [ 0 , np . pi / 4 , np . pi / 2 , np . pi * 3 / 4 , np . pi ] , dtype = dtype ) ) <nl> + <nl> self . _testBinary ( <nl> gen_math_ops . _reciprocal_grad , <nl> np . array ( [ 4 , - 3 , - 2 , 1 ] , dtype = dtype ) , <nl> def testNumericOps ( self ) : <nl> dtype ( 7 ) , <nl> expected = np . array ( [ [ - 6 ] , [ - 5 ] ] , dtype = dtype ) ) <nl> <nl> - self . _testBinary ( <nl> - math_ops . maximum , <nl> - np . array ( [ 1 , 2 ] , dtype = dtype ) , <nl> - np . array ( [ 10 , 20 ] , dtype = dtype ) , <nl> - expected = np . array ( [ 10 , 20 ] , dtype = dtype ) ) <nl> - self . _testBinary ( <nl> - math_ops . maximum , <nl> - dtype ( 5 ) , <nl> - np . array ( [ 1 , 20 ] , dtype = dtype ) , <nl> - expected = np . array ( [ 5 , 20 ] , dtype = dtype ) ) <nl> - self . _testBinary ( <nl> - math_ops . maximum , <nl> - np . array ( [ [ 10 ] , [ 2 ] ] , dtype = dtype ) , <nl> - dtype ( 7 ) , <nl> - expected = np . array ( [ [ 10 ] , [ 7 ] ] , dtype = dtype ) ) <nl> + if dtype not in self . complex_types : # min / max not supported for complex <nl> + self . _testBinary ( <nl> + math_ops . maximum , <nl> + np . array ( [ 1 , 2 ] , dtype = dtype ) , <nl> + np . array ( [ 10 , 20 ] , dtype = dtype ) , <nl> + expected = np . array ( [ 10 , 20 ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . maximum , <nl> + dtype ( 5 ) , <nl> + np . array ( [ 1 , 20 ] , dtype = dtype ) , <nl> + expected = np . array ( [ 5 , 20 ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . maximum , <nl> + np . array ( [ [ 10 ] , [ 2 ] ] , dtype = dtype ) , <nl> + dtype ( 7 ) , <nl> + expected = np . array ( [ [ 10 ] , [ 7 ] ] , dtype = dtype ) ) <nl> <nl> - self . _testBinary ( <nl> - math_ops . minimum , <nl> - np . array ( [ 1 , 20 ] , dtype = dtype ) , <nl> - np . array ( [ 10 , 2 ] , dtype = dtype ) , <nl> - expected = np . array ( [ 1 , 2 ] , dtype = dtype ) ) <nl> - self . _testBinary ( <nl> - math_ops . minimum , <nl> - dtype ( 5 ) , <nl> - np . array ( [ 1 , 20 ] , dtype = dtype ) , <nl> - expected = np . array ( [ 1 , 5 ] , dtype = dtype ) ) <nl> - self . _testBinary ( <nl> - math_ops . minimum , <nl> - np . array ( [ [ 10 ] , [ 2 ] ] , dtype = dtype ) , <nl> - dtype ( 7 ) , <nl> - expected = np . array ( [ [ 7 ] , [ 2 ] ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . minimum , <nl> + np . array ( [ 1 , 20 ] , dtype = dtype ) , <nl> + np . array ( [ 10 , 2 ] , dtype = dtype ) , <nl> + expected = np . array ( [ 1 , 2 ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . minimum , <nl> + dtype ( 5 ) , <nl> + np . array ( [ 1 , 20 ] , dtype = dtype ) , <nl> + expected = np . array ( [ 1 , 5 ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . minimum , <nl> + np . array ( [ [ 10 ] , [ 2 ] ] , dtype = dtype ) , <nl> + dtype ( 7 ) , <nl> + expected = np . array ( [ [ 7 ] , [ 2 ] ] , dtype = dtype ) ) <nl> <nl> self . _testBinary ( <nl> math_ops . multiply , <nl> def testNumericOps ( self ) : <nl> dtype ( 7 ) , <nl> expected = np . array ( [ [ 70 ] , [ 14 ] ] , dtype = dtype ) ) <nl> <nl> - self . _testBinary ( <nl> - math_ops . squared_difference , <nl> - np . array ( [ 1 , 2 ] , dtype = dtype ) , <nl> - np . array ( [ 10 , 20 ] , dtype = dtype ) , <nl> - expected = np . array ( [ 81 , 324 ] , dtype = dtype ) ) <nl> - self . _testBinary ( <nl> - math_ops . squared_difference , <nl> - dtype ( 5 ) , <nl> - np . array ( [ 1 , 2 ] , dtype = dtype ) , <nl> - expected = np . array ( [ 16 , 9 ] , dtype = dtype ) ) <nl> - self . _testBinary ( <nl> - math_ops . squared_difference , <nl> - np . array ( [ [ 1 ] , [ 2 ] ] , dtype = dtype ) , <nl> - dtype ( 7 ) , <nl> - expected = np . array ( [ [ 36 ] , [ 25 ] ] , dtype = dtype ) ) <nl> + # Complex support for squared_difference is incidental , see b / 68205550 <nl> + if dtype not in self . complex_types : <nl> + self . _testBinary ( <nl> + math_ops . squared_difference , <nl> + np . array ( [ 1 , 2 ] , dtype = dtype ) , <nl> + np . array ( [ 10 , 20 ] , dtype = dtype ) , <nl> + expected = np . array ( [ 81 , 324 ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . squared_difference , <nl> + dtype ( 5 ) , <nl> + np . array ( [ 1 , 2 ] , dtype = dtype ) , <nl> + expected = np . array ( [ 16 , 9 ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . squared_difference , <nl> + np . array ( [ [ 1 ] , [ 2 ] ] , dtype = dtype ) , <nl> + dtype ( 7 ) , <nl> + expected = np . array ( [ [ 36 ] , [ 25 ] ] , dtype = dtype ) ) <nl> <nl> self . _testBinary ( <nl> nn_ops . bias_add , <nl> def testNumericOps ( self ) : <nl> np . array ( [ 2 , - 1 ] , dtype = dtype ) , <nl> expected = np . array ( [ [ [ [ 3 , 1 ] , [ 5 , 3 ] ] ] ] , dtype = dtype ) ) <nl> <nl> + def testComplexOps ( self ) : <nl> + for dtype in self . complex_types : <nl> + ctypes = { np . complex64 : np . float32 } <nl> + self . _testBinary ( <nl> + math_ops . complex , <nl> + np . array ( [ [ [ [ - 1 , 2 ] , [ 2 , 0 ] ] ] ] , dtype = ctypes [ dtype ] ) , <nl> + np . array ( [ [ [ [ 2 , - 3 ] , [ 0 , 4 ] ] ] ] , dtype = ctypes [ dtype ] ) , <nl> + expected = np . array ( [ [ [ [ - 1 + 2j , 2 - 3j ] , [ 2 , 4j ] ] ] ] , dtype = dtype ) ) <nl> + <nl> + self . _testBinary ( <nl> + lambda x , y : math_ops . approximate_equal ( x , y , tolerance = 0 . 0001 ) , <nl> + np . array ( <nl> + [ [ [ [ - 1 + 2j , 2 . 00009999 - 3j ] , [ 2 - 3j , 3 + 4 . 01j ] ] ] ] , <nl> + dtype = dtype ) , <nl> + np . array ( <nl> + [ [ [ [ - 1 . 001 + 2j , 2 - 3j ] , [ 2 - 3 . 00009j , 3 + 4j ] ] ] ] , dtype = dtype ) , <nl> + expected = np . array ( [ [ [ [ False , True ] , [ True , False ] ] ] ] , dtype = dtype ) ) <nl> + <nl> + self . _testBinary ( <nl> + gen_math_ops . _real_div , <nl> + np . array ( [ 3 , 3j , - 1 . 5j , - 8 , 2 + 3j , 2 + 4j , 44 + 3j ] , dtype = dtype ) , <nl> + np . array ( [ 2 , - 2 , 7j , - 4j , 4 - 6j , 1 + 2j , 0 ] , dtype = dtype ) , <nl> + expected = np . array ( <nl> + [ <nl> + 1 . 5 , - 1 . 5j , - 0 . 2142857 , - 2j , ( 2 + 3j ) / ( 4 - 6j ) , 2 , <nl> + float ( " inf " ) <nl> + ] , <nl> + dtype = dtype ) ) <nl> + <nl> + # TODO ( b / 65408531 ) : support + test pow for cplx <nl> + <nl> + lhs = np . array ( [ 4 + 2j , - 3 - 1j , 2j , 1 ] , dtype = dtype ) <nl> + rhs = np . array ( [ 5 , - 6j , 7 - 3j , - 8j ] , dtype = dtype ) <nl> + self . _testBinary ( <nl> + gen_math_ops . _reciprocal_grad , lhs , rhs , expected = - rhs * lhs * lhs ) <nl> + <nl> + self . _testBinary ( <nl> + gen_math_ops . _sigmoid_grad , lhs , rhs , expected = rhs * lhs * ( 1 - lhs ) ) <nl> + <nl> + # TODO ( b / 65408531 ) : support + test _rsqrt_grad for cplx ( needs pow ) <nl> + <nl> + self . _testBinary ( <nl> + gen_math_ops . _sqrt_grad , lhs , rhs , expected = rhs / ( 2 * lhs ) ) <nl> + <nl> + self . _testBinary ( <nl> + gen_math_ops . _tanh_grad , lhs , rhs , expected = rhs * ( 1 - lhs * lhs ) ) <nl> + <nl> + def testComplexMath ( self ) : <nl> + for dtype in self . complex_types : <nl> + self . _testBinary ( <nl> + math_ops . add , <nl> + np . array ( [ 1 + 3j , 2 + 7j ] , dtype = dtype ) , <nl> + np . array ( [ 10 - 4j , 20 + 17j ] , dtype = dtype ) , <nl> + expected = np . array ( [ 11 - 1j , 22 + 24j ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . add , <nl> + dtype ( 5 - 7j ) , <nl> + np . array ( [ 1 + 2j , 2 + 4j ] , dtype = dtype ) , <nl> + expected = np . array ( [ 6 - 5j , 7 - 3j ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . add , <nl> + np . array ( [ [ 1 - 2j ] , [ 2 + 1j ] ] , dtype = dtype ) , <nl> + dtype ( 7 + 5j ) , <nl> + expected = np . array ( [ [ 8 + 3j ] , [ 9 + 6j ] ] , dtype = dtype ) ) <nl> + <nl> + self . _testBinary ( <nl> + math_ops . subtract , <nl> + np . array ( [ 1 + 3j , 2 + 7j ] , dtype = dtype ) , <nl> + np . array ( [ 10 - 4j , 20 + 17j ] , dtype = dtype ) , <nl> + expected = np . array ( [ - 9 + 7j , - 18 - 10j ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . subtract , <nl> + dtype ( 5 - 7j ) , <nl> + np . array ( [ 1 + 2j , 2 + 4j ] , dtype = dtype ) , <nl> + expected = np . array ( [ 4 - 9j , 3 - 11j ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . subtract , <nl> + np . array ( [ [ 1 - 2j ] , [ 2 + 1j ] ] , dtype = dtype ) , <nl> + dtype ( 7 + 5j ) , <nl> + expected = np . array ( [ [ - 6 - 7j ] , [ - 5 - 4j ] ] , dtype = dtype ) ) <nl> + <nl> + self . _testBinary ( <nl> + math_ops . multiply , <nl> + np . array ( [ 1 + 3j , 2 + 7j ] , dtype = dtype ) , <nl> + np . array ( [ 10 - 4j , 20 + 17j ] , dtype = dtype ) , <nl> + expected = np . array ( <nl> + [ ( 1 + 3j ) * ( 10 - 4j ) , ( 2 + 7j ) * ( 20 + 17j ) ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . multiply , <nl> + dtype ( 5 - 7j ) , <nl> + np . array ( [ 1 + 2j , 2 + 4j ] , dtype = dtype ) , <nl> + expected = np . array ( <nl> + [ ( 5 - 7j ) * ( 1 + 2j ) , ( 5 - 7j ) * ( 2 + 4j ) ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . multiply , <nl> + np . array ( [ [ 1 - 2j ] , [ 2 + 1j ] ] , dtype = dtype ) , <nl> + dtype ( 7 + 5j ) , <nl> + expected = np . array ( <nl> + [ [ ( 7 + 5j ) * ( 1 - 2j ) ] , [ ( 7 + 5j ) * ( 2 + 1j ) ] ] , dtype = dtype ) ) <nl> + <nl> + self . _testBinary ( <nl> + math_ops . div , <nl> + np . array ( [ 8 - 1j , 2 + 16j ] , dtype = dtype ) , <nl> + np . array ( [ 2 + 4j , 4 - 8j ] , dtype = dtype ) , <nl> + expected = np . array ( <nl> + [ ( 8 - 1j ) / ( 2 + 4j ) , ( 2 + 16j ) / ( 4 - 8j ) ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . div , <nl> + dtype ( 1 + 2j ) , <nl> + np . array ( [ 2 + 4j , 4 - 8j ] , dtype = dtype ) , <nl> + expected = np . array ( <nl> + [ ( 1 + 2j ) / ( 2 + 4j ) , ( 1 + 2j ) / ( 4 - 8j ) ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + math_ops . div , <nl> + np . array ( [ 2 + 4j , 4 - 8j ] , dtype = dtype ) , <nl> + dtype ( 1 + 2j ) , <nl> + expected = np . array ( <nl> + [ ( 2 + 4j ) / ( 1 + 2j ) , ( 4 - 8j ) / ( 1 + 2j ) ] , dtype = dtype ) ) <nl> + <nl> + # TODO ( b / 68205550 ) : math_ops . squared_difference shouldn ' t be supported . <nl> + <nl> + self . _testBinary ( <nl> + nn_ops . bias_add , <nl> + np . array ( [ [ 1 + 2j , 2 + 7j ] , [ 3 - 5j , 4 + 2j ] ] , dtype = dtype ) , <nl> + np . array ( [ 2 + 6j , - 1 - 3j ] , dtype = dtype ) , <nl> + expected = np . array ( [ [ 3 + 8j , 1 + 4j ] , [ 5 + 1j , 3 - 1j ] ] , dtype = dtype ) ) <nl> + self . _testBinary ( <nl> + nn_ops . bias_add , <nl> + np . array ( [ [ [ [ 1 + 4j , 2 - 1j ] , [ 3 + 7j , 4 ] ] ] ] , dtype = dtype ) , <nl> + np . array ( [ 2 + 1j , - 1 + 2j ] , dtype = dtype ) , <nl> + expected = np . array ( <nl> + [ [ [ [ 3 + 5j , 1 + 1j ] , [ 5 + 8j , 3 + 2j ] ] ] ] , dtype = dtype ) ) <nl> + <nl> def _testDivision ( self , dtype ) : <nl> " " " Test cases for division operators . " " " <nl> self . _testBinary ( <nl> def _testDivision ( self , dtype ) : <nl> dtype ( 2 ) , <nl> expected = np . array ( [ [ 5 ] , [ 2 ] ] , dtype = dtype ) ) <nl> <nl> - self . _testBinary ( <nl> - gen_math_ops . _floor_div , <nl> - np . array ( [ 3 , 3 , - 1 , - 9 , - 8 ] , dtype = dtype ) , <nl> - np . array ( [ 2 , - 2 , 7 , 2 , - 4 ] , dtype = dtype ) , <nl> - expected = np . array ( [ 1 , - 2 , - 1 , - 5 , 2 ] , dtype = dtype ) ) <nl> + if dtype not in self . complex_types : # floordiv unsupported for complex . <nl> + self . _testBinary ( <nl> + gen_math_ops . _floor_div , <nl> + np . array ( [ 3 , 3 , - 1 , - 9 , - 8 ] , dtype = dtype ) , <nl> + np . array ( [ 2 , - 2 , 7 , 2 , - 4 ] , dtype = dtype ) , <nl> + expected = np . array ( [ 1 , - 2 , - 1 , - 5 , 2 ] , dtype = dtype ) ) <nl> <nl> def testIntDivision ( self ) : <nl> for dtype in self . int_types : <nl> self . _testDivision ( dtype ) <nl> <nl> def testFloatDivision ( self ) : <nl> - for dtype in self . float_types : <nl> + for dtype in self . float_types + self . complex_types : <nl> self . _testDivision ( dtype ) <nl> <nl> def _testRemainder ( self , dtype ) : <nl> mmm a / tensorflow / compiler / tests / build_defs . bzl <nl> ppp b / tensorflow / compiler / tests / build_defs . bzl <nl> def tf_xla_py_test ( name , srcs = [ ] , deps = [ ] , tags = [ ] , data = [ ] , main = None , <nl> backend_deps = [ ] <nl> backend_data = [ ] <nl> if backend = = " cpu " : <nl> - backend_args + = [ " - - test_device = XLA_CPU " , <nl> - " - - types = DT_FLOAT , DT_DOUBLE , DT_INT32 , DT_INT64 , DT_BOOL " ] <nl> + backend_args + = [ <nl> + " - - test_device = XLA_CPU " , <nl> + " - - types = DT_FLOAT , DT_DOUBLE , DT_INT32 , DT_INT64 , DT_BOOL , DT_COMPLEX64 " <nl> + ] <nl> elif backend = = " gpu " : <nl> - backend_args + = [ " - - test_device = XLA_GPU " , <nl> - " - - types = DT_FLOAT , DT_DOUBLE , DT_INT32 , DT_INT64 , DT_BOOL " ] <nl> + backend_args + = [ <nl> + " - - test_device = XLA_GPU " , <nl> + " - - types = DT_FLOAT , DT_DOUBLE , DT_INT32 , DT_INT64 , DT_BOOL , DT_COMPLEX64 " <nl> + ] <nl> backend_tags + = [ " requires - gpu - sm35 " ] <nl> elif backend in plugins : <nl> backend_args + = [ " - - test_device = " + plugins [ backend ] [ " device " ] , <nl> mmm a / tensorflow / compiler / tests / gather_test . py <nl> ppp b / tensorflow / compiler / tests / gather_test . py <nl> <nl> <nl> FLAGS = flags . FLAGS <nl> <nl> - _TEST_TYPES = [ dtypes . float32 ] <nl> - <nl> <nl> class GatherTest ( xla_test . XLATestCase ) : <nl> <nl> def _buildParams ( self , data , dtype ) : <nl> def testScalar1D ( self ) : <nl> with self . test_session ( ) as session , self . test_scope ( ) : <nl> data = np . array ( [ 0 , 1 , 2 , 3 , 7 , 5 ] ) <nl> - for dtype in _TEST_TYPES : <nl> + for dtype in self . all_tf_types : <nl> for indices in 4 , [ 1 , 2 , 2 , 4 , 5 ] : <nl> params_np = self . _buildParams ( data , dtype ) <nl> params = array_ops . placeholder ( dtype = dtype ) <nl> def testScalar2D ( self ) : <nl> with self . test_session ( ) as session , self . test_scope ( ) : <nl> data = np . array ( [ [ 0 , 1 , 2 ] , [ 3 , 4 , 5 ] , [ 6 , 7 , 8 ] , [ 9 , 10 , 11 ] , <nl> [ 12 , 13 , 14 ] ] ) <nl> - for dtype in _TEST_TYPES : <nl> + for dtype in self . all_tf_types : <nl> for axis in 0 , 1 , - 1 : <nl> params_np = self . _buildParams ( data , dtype ) <nl> params = array_ops . placeholder ( dtype = dtype ) <nl> def testSimpleTwoD32 ( self ) : <nl> with self . test_session ( ) as session , self . test_scope ( ) : <nl> data = np . array ( [ [ 0 , 1 , 2 ] , [ 3 , 4 , 5 ] , [ 6 , 7 , 8 ] , [ 9 , 10 , 11 ] , <nl> [ 12 , 13 , 14 ] ] ) <nl> - for dtype in _TEST_TYPES : <nl> + for dtype in self . all_tf_types : <nl> for axis in 0 , 1 , - 1 : <nl> params_np = self . _buildParams ( data , dtype ) <nl> params = array_ops . placeholder ( dtype = dtype ) <nl> def testSimpleTwoD32_Int64Indices ( self ) : <nl> [ 12 , 13 , 14 ] ] ) <nl> # The indices must be in bounds for any axis . <nl> indices_np = np . array ( [ 0 , 1 , 0 , 2 ] ) <nl> - for dtype in _TEST_TYPES : <nl> + for dtype in self . all_tf_types : <nl> for axis in 0 , 1 , - 1 : <nl> params_np = self . _buildParams ( data , dtype ) <nl> params = array_ops . placeholder ( dtype = dtype ) <nl> def testHigherRank ( self ) : <nl> " " " Check that scalar and empty indices shapes work as well . " " " <nl> shape = ( 2 , 1 , 3 , 2 ) <nl> for indices_shape in ( ) , ( 0 , ) , ( 2 , 0 ) , ( 2 , 3 ) : <nl> - for dtype in _TEST_TYPES : <nl> + for dtype in self . all_tf_types : <nl> for axis in 0 , 1 , 2 , 3 , - 1 , - 2 : <nl> params = self . _buildParams ( np . random . randn ( * shape ) , dtype ) <nl> indices = np . random . randint ( shape [ axis ] , size = indices_shape ) <nl> mmm a / tensorflow / compiler / tests / nary_ops_test . py <nl> ppp b / tensorflow / compiler / tests / nary_ops_test . py <nl> def testFloat ( self ) : <nl> np . array ( [ 42 ] , dtype = np . float32 ) ] , <nl> expected = np . array ( [ 48 ] , dtype = np . float32 ) ) <nl> <nl> + def testComplex ( self ) : <nl> + for dtype in self . complex_types : <nl> + self . _testNAry ( <nl> + math_ops . add_n , [ np . array ( [ [ 1 + 2j , 2 - 3j , 3 + 4j ] ] , dtype = dtype ) ] , <nl> + expected = np . array ( [ [ 1 + 2j , 2 - 3j , 3 + 4j ] ] , dtype = dtype ) ) <nl> + <nl> + self . _testNAry ( <nl> + math_ops . add_n , [ <nl> + np . array ( [ 1 + 2j , 2 - 3j ] , dtype = dtype ) , <nl> + np . array ( [ 10j , 20 ] , dtype = dtype ) <nl> + ] , <nl> + expected = np . array ( [ 1 + 12j , 22 - 3j ] , dtype = dtype ) ) <nl> + self . _testNAry ( <nl> + math_ops . add_n , [ <nl> + np . array ( [ - 4 , 5j ] , dtype = dtype ) , <nl> + np . array ( [ 2 + 10j , - 2 ] , dtype = dtype ) , <nl> + np . array ( [ 42j , 3 + 3j ] , dtype = dtype ) <nl> + ] , <nl> + expected = np . array ( [ - 2 + 52j , 1 + 8j ] , dtype = dtype ) ) <nl> + <nl> @ unittest . skip ( " IdentityN is temporarily CompilationOnly as workaround " ) <nl> def testIdentityN ( self ) : <nl> self . _testNAryLists ( array_ops . identity_n , <nl> mmm a / tensorflow / compiler / tests / random_ops_test . py <nl> ppp b / tensorflow / compiler / tests / random_ops_test . py <nl> <nl> class RandomOpsTest ( XLATestCase ) : <nl> " " " Test cases for random - number generating operators . " " " <nl> <nl> + def _random_types ( self ) : <nl> + return set ( self . numeric_types ) - set ( self . complex_types ) <nl> + <nl> def _testRngIsNotConstant ( self , rng , dtype ) : <nl> # Tests that ' rng ' does not always return the same value . <nl> with self . test_session ( ) as sess : <nl> def testRandomUniformIsNotConstant ( self ) : <nl> def rng ( dtype ) : <nl> return random_ops . random_uniform ( shape = [ 2 ] , dtype = dtype , <nl> maxval = 1000000 ) <nl> - for dtype in self . numeric_types : <nl> + <nl> + for dtype in self . _random_types ( ) : <nl> self . _testRngIsNotConstant ( rng , dtype ) <nl> <nl> def testRandomNormalIsNotConstant ( self ) : <nl> def rng ( dtype ) : <nl> self . _testRngIsNotConstant ( rng , dtype ) <nl> <nl> def testRandomUniformIsInRange ( self ) : <nl> - for dtype in self . numeric_types : <nl> + for dtype in self . _random_types ( ) : <nl> with self . test_session ( ) as sess : <nl> with self . test_scope ( ) : <nl> x = random_ops . random_uniform ( shape = [ 1000 ] , dtype = dtype , minval = - 2 , <nl> mmm a / tensorflow / compiler / tests / randomized_tests . cc <nl> ppp b / tensorflow / compiler / tests / randomized_tests . cc <nl> namespace { <nl> / / Command line flags : see main ( ) below . <nl> int64 tf_xla_random_seed = 0 ; <nl> int32 tf_xla_test_repetitions = 20 ; <nl> - int64 tf_xla_max_tensor_size = 100000LL ; <nl> + int64 tf_xla_max_tensor_size = 10000LL ; <nl> string * tf_xla_test_device_ptr ; / / initial value set in main ( ) <nl> bool tf_xla_test_use_jit = true ; <nl> <nl> string LocalDeviceToFullDeviceName ( const string & device ) { <nl> return strings : : StrCat ( " / job : localhost / replica : 0 / task : 0 / device : " , device ) ; <nl> } <nl> <nl> - constexpr std : : array < DataType , 3 > kAllXlaTypes = { <nl> - { DT_INT32 , DT_FLOAT , DT_BOOL } } ; <nl> + constexpr std : : array < DataType , 4 > kAllXlaTypes = { <nl> + { DT_INT32 , DT_FLOAT , DT_BOOL , DT_COMPLEX64 } } ; <nl> <nl> / / An OpTestBuilder is a graph builder class that takes as input an operator to <nl> / / test , its inputs and attributes , and builds a graph that executes the <nl> OpTest : : OpTest ( ) { <nl> void OpTest : : Repeatedly ( const std : : function < TestResult ( void ) > & fn ) { <nl> int const max_repetitions = tf_xla_test_repetitions ; <nl> int valid_test_runs = 0 ; <nl> - / / We run up to 20 * max_repetitions times ; the idea is that if we roll the <nl> + / / We run up to 100 * max_repetitions times ; the idea is that if we roll the <nl> / / dice enough times we will find some valid parameters . We want to put an <nl> / / upper limit on the number iterations just in case the probability of <nl> / / finding feasible parameters is very low . <nl> - for ( int i = 0 ; ! HasFailure ( ) & & i < max_repetitions * 20 & & <nl> + for ( int i = 0 ; ! HasFailure ( ) & & i < max_repetitions * 100 & & <nl> valid_test_runs < max_repetitions ; <nl> + + i ) { <nl> TestResult result = fn ( ) ; <nl> Tensor OpTest : : RandomTensor ( DataType dtype , gtl : : ArraySlice < int64 > shape ) { <nl> } ) ; <nl> break ; <nl> } <nl> + case DT_COMPLEX64 : { <nl> + std : : uniform_real_distribution < float > distribution ( - 1 . 0f , 1 . 0f ) ; <nl> + test : : FillFn < complex64 > ( & tensor , [ this , & distribution ] ( int i ) { <nl> + return complex64 ( distribution ( generator ( ) ) , distribution ( generator ( ) ) ) ; <nl> + } ) ; <nl> + break ; <nl> + } <nl> case DT_INT32 : { <nl> std : : uniform_int_distribution < int32 > distribution ( - ( 1 < < 20 ) , 1 < < 20 ) ; <nl> test : : FillFn < int32 > ( & tensor , [ this , & distribution ] ( int i ) - > int32 { <nl> std : : vector < int32 > OpTest : : AsInt32s ( const std : : vector < int64 > & int64s ) { <nl> <nl> / / Functions for comparing tensors . <nl> <nl> + template < typename T > <nl> + double Abs ( T x ) { <nl> + return std : : fabs ( x ) ; <nl> + } <nl> + <nl> + template < > <nl> + double Abs < complex64 > ( complex64 x ) { <nl> + return std : : abs ( x ) ; <nl> + } <nl> + <nl> template < typename T > <nl> bool IsClose ( const T & x , const T & y , double atol , double rtol ) { <nl> if ( std : : isnan ( x ) & & std : : isnan ( y ) ) return true ; <nl> if ( x = = y ) return true ; / / Allow inf = = inf . <nl> - return fabs ( x - y ) < atol + rtol * fabs ( x ) ; <nl> + return Abs ( x - y ) < atol + rtol * Abs ( x ) ; <nl> + } <nl> + <nl> + template < > <nl> + bool IsClose < complex64 > ( const complex64 & x , const complex64 & y , double atol , <nl> + double rtol ) { <nl> + if ( std : : isnan ( x . real ( ) ) & & std : : isnan ( y . real ( ) ) ) { <nl> + if ( std : : isnan ( x . imag ( ) ) & & std : : isnan ( y . imag ( ) ) ) { <nl> + return true ; <nl> + } <nl> + if ( x . imag ( ) = = y . imag ( ) ) return true ; / / Allow inf = = inf . <nl> + return Abs ( x . imag ( ) - y . imag ( ) ) < atol + rtol * Abs ( x . imag ( ) ) ; <nl> + } else if ( std : : isnan ( x . imag ( ) ) & & std : : isnan ( y . imag ( ) ) ) { <nl> + if ( x . real ( ) = = y . real ( ) ) return true ; / / Allow inf = = inf . <nl> + return Abs ( x . real ( ) - y . real ( ) ) < atol + rtol * Abs ( x . real ( ) ) ; <nl> + } <nl> + if ( x = = y ) return true ; / / Allow inf = = inf . <nl> + return Abs ( x - y ) < atol + rtol * Abs ( x ) ; <nl> + } <nl> + <nl> + template < typename T > <nl> + string Str ( T x ) { <nl> + return strings : : StrCat ( x ) ; <nl> + } <nl> + template < > <nl> + string Str < complex64 > ( complex64 x ) { <nl> + return strings : : StrCat ( " ( " , x . real ( ) , " , " , x . imag ( ) , " ) " ) ; <nl> } <nl> <nl> template < typename T > <nl> Status TensorsAreCloseImpl ( const Tensor & x , const Tensor & y , double atol , <nl> for ( int i = 0 ; i < Tx . size ( ) ; + + i ) { <nl> if ( ! IsClose ( Tx ( i ) , Ty ( i ) , atol , rtol ) ) { <nl> return errors : : InvalidArgument ( strings : : StrCat ( <nl> - i , " - th tensor element isn ' t close : " , Tx ( i ) , " vs . " , Ty ( i ) , <nl> - " . x = " , x . DebugString ( ) , " y = " , y . DebugString ( ) , " atol = " , atol , <nl> - " rtol = " , rtol , " tol = " , atol + rtol * std : : fabs ( Tx ( i ) ) ) ) ; <nl> + i , " - th tensor element isn ' t close : " , Str ( Tx ( i ) ) , " vs . " , <nl> + Str ( Ty ( i ) ) , " . x = " , x . DebugString ( ) , " y = " , y . DebugString ( ) , <nl> + " atol = " , atol , " rtol = " , rtol , <nl> + " tol = " , atol + rtol * Abs ( Tx ( i ) ) ) ) ; <nl> } <nl> } <nl> return Status : : OK ( ) ; <nl> Status TensorsAreClose ( const Tensor & a , const Tensor & b , double atol , <nl> return TensorsAreCloseImpl < float > ( a , b , atol , rtol ) ; <nl> case DT_DOUBLE : <nl> return TensorsAreCloseImpl < double > ( a , b , atol , rtol ) ; <nl> + case DT_COMPLEX64 : <nl> + return TensorsAreCloseImpl < complex64 > ( a , b , atol , rtol ) ; <nl> case DT_INT32 : <nl> return TensorsAreEqualImpl < int32 > ( a , b ) ; <nl> case DT_INT64 : <nl> Tensor AsIntTensor ( DataType dtype , const std : : vector < int64 > & values ) { <nl> <nl> TEST_F ( OpTest , Abs ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " Abs " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> TEST_F ( OpTest , Acosh ) { <nl> <nl> TEST_F ( OpTest , Add ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Add " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , Add ) { <nl> <nl> TEST_F ( OpTest , AddN ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> int n = std : : uniform_int_distribution < int > ( 1 , 5 ) ( generator ( ) ) ; <nl> <nl> auto shape = RandomDims ( ) ; <nl> TEST_F ( OpTest , All ) { <nl> } ) ; <nl> } <nl> <nl> + TEST_F ( OpTest , Angle ) { <nl> + Repeatedly ( [ this ] ( ) { <nl> + return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Angle " ) <nl> + . RandomInput ( DT_COMPLEX64 ) <nl> + . Attr ( " T " , DT_COMPLEX64 ) ) ; <nl> + } ) ; <nl> + } <nl> + <nl> TEST_F ( OpTest , Any ) { <nl> Repeatedly ( [ this ] ( ) { <nl> std : : vector < int64 > data_dims = RandomDims ( ) ; <nl> TEST_F ( OpTest , Any ) { <nl> <nl> TEST_F ( OpTest , ApproximateEqual ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - auto dims = RandomDims ( ) ; <nl> + auto dims = BroadcastableDims ( ) ; <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " ApproximateEqual " ) <nl> - . RandomInput ( DT_FLOAT , dims ) <nl> - . RandomInput ( DT_FLOAT , dims ) <nl> + . RandomInput ( type , dims . first ) <nl> + . RandomInput ( type , dims . second ) <nl> . Attr ( " T " , DT_FLOAT ) ) ; <nl> } ) ; <nl> } <nl> TEST_F ( OpTest , Atanh ) { <nl> } ) ; <nl> } <nl> <nl> + TEST_F ( OpTest , Atan2 ) { <nl> + Repeatedly ( [ this ] ( ) { <nl> + auto dims = BroadcastableDims ( ) ; <nl> + return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Atan2 " ) <nl> + . RandomInput ( DT_FLOAT , dims . first ) <nl> + . RandomInput ( DT_FLOAT , dims . second ) <nl> + . Attr ( " T " , DT_FLOAT ) ) ; <nl> + } ) ; <nl> + } <nl> + <nl> TEST_F ( OpTest , AvgPool ) { <nl> Repeatedly ( [ this ] ( ) { <nl> std : : uniform_int_distribution < int > random_int ( 1 , 5 ) ; <nl> TEST_F ( OpTest , AvgPool3DGrad ) { <nl> <nl> TEST_F ( OpTest , BatchMatMul ) { <nl> Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> std : : vector < int64 > output_dims = RandomDims ( 2 , 5 , 0 , 7 ) ; <nl> int64 ndims = output_dims . size ( ) ; <nl> int64 inner_dim = RandomDim ( ) ; <nl> TEST_F ( OpTest , BatchMatMul ) { <nl> } <nl> <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " BatchMatMul " ) <nl> - . RandomInput ( DT_FLOAT , x_dims ) <nl> - . RandomInput ( DT_FLOAT , y_dims ) <nl> - . Attr ( " T " , DT_FLOAT ) <nl> + . RandomInput ( type , x_dims ) <nl> + . RandomInput ( type , y_dims ) <nl> + . Attr ( " T " , type ) <nl> . Attr ( " adj_x " , adj_x ) <nl> . Attr ( " adj_y " , adj_y ) ) ; <nl> } ) ; <nl> TEST_F ( OpTest , BatchToSpace ) { <nl> CHECK ( crops . CopyFrom ( AsIntTensor ( DT_INT32 , crop_vals ) , <nl> TensorShape ( { num_block_dims , 2 } ) ) ) ; <nl> <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " BatchToSpace " ) <nl> - . RandomInput ( DT_FLOAT , input_dims ) <nl> + . RandomInput ( type , input_dims ) <nl> . Input ( crops ) <nl> - . Attr ( " T " , DT_FLOAT ) <nl> + . Attr ( " T " , type ) <nl> . Attr ( " block_size " , block_size ) ) ; <nl> } ) ; <nl> } <nl> TEST_F ( OpTest , BatchToSpaceND ) { <nl> CHECK ( crops . CopyFrom ( AsIntTensor ( DT_INT32 , crop_vals ) , <nl> TensorShape ( { num_block_dims , 2 } ) ) ) ; <nl> <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " BatchToSpaceND " ) <nl> - . RandomInput ( DT_FLOAT , input_dims ) <nl> + . RandomInput ( type , input_dims ) <nl> . Input ( test : : AsTensor < int32 > ( <nl> std : : vector < int32 > ( block_dims . begin ( ) , block_dims . end ( ) ) ) ) <nl> . Input ( crops ) <nl> - . Attr ( " T " , DT_FLOAT ) ) ; <nl> + . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , BiasAdd ) { <nl> auto x_dims = RandomDims ( 2 , kDefaultMaxRank ) ; <nl> auto y_dims = { x_dims [ x_dims . size ( ) - 1 ] } ; <nl> / / TODO ( phawkins ) : test both data formats . <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " BiasAdd " ) <nl> - . RandomInput ( DT_FLOAT , x_dims ) <nl> - . RandomInput ( DT_FLOAT , y_dims ) <nl> - . Attr ( " T " , DT_FLOAT ) ) ; <nl> + . RandomInput ( type , x_dims ) <nl> + . RandomInput ( type , y_dims ) <nl> + . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , BiasAddGrad ) { <nl> Repeatedly ( [ this ] ( ) { <nl> / / TODO ( phawkins ) : test both data formats . <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " BiasAddGrad " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> + OpTestBuilder ( " BiasAddGrad " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , BiasAddV1 ) { <nl> Repeatedly ( [ this ] ( ) { <nl> auto x_dims = RandomDims ( 2 , kDefaultMaxRank ) ; <nl> auto y_dims = { x_dims [ x_dims . size ( ) - 1 ] } ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " BiasAddV1 " ) <nl> - . RandomInput ( DT_FLOAT , x_dims ) <nl> - . RandomInput ( DT_FLOAT , y_dims ) <nl> - . Attr ( " T " , DT_FLOAT ) ) ; <nl> + . RandomInput ( type , x_dims ) <nl> + . RandomInput ( type , y_dims ) <nl> + . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , BitwiseOr ) { <nl> TEST_F ( OpTest , BroadcastArgs ) { <nl> Repeatedly ( [ this ] ( ) { <nl> / / TODO ( phawkins ) : only int32 seems to be implemented in Tensorflow . <nl> - / / DataType type = Choose < DataType > ( { DT_INT32 , DT_INT64 } ) ; <nl> + / / auto type = Choose < DataType > ( { DT_INT32 , DT_INT64 } ) ; <nl> DataType type = DT_INT32 ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> TEST_F ( OpTest , BroadcastArgs ) { <nl> TEST_F ( OpTest , BroadcastGradientArgs ) { <nl> Repeatedly ( [ this ] ( ) { <nl> / / TODO ( phawkins ) : only int32 seems to be implemented in Tensorflow . <nl> - / / DataType type = Choose < DataType > ( { DT_INT32 , DT_INT64 } ) ; <nl> + / / auto type = Choose < DataType > ( { DT_INT32 , DT_INT64 } ) ; <nl> DataType type = DT_INT32 ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> TEST_F ( OpTest , BroadcastGradientArgs ) { <nl> TEST_F ( OpTest , Cast ) { <nl> Repeatedly ( [ this ] ( ) { <nl> DataType src_type , dst_type ; <nl> - src_type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_BOOL } ) ; <nl> - dst_type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_BOOL } ) ; <nl> + src_type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_BOOL , DT_COMPLEX64 } ) ; <nl> + dst_type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_BOOL , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Cast " ) <nl> . RandomInput ( src_type ) <nl> . Attr ( " SrcT " , src_type ) <nl> TEST_F ( OpTest , Ceil ) { <nl> } ) ; <nl> } <nl> <nl> + TEST_F ( OpTest , Complex ) { <nl> + Repeatedly ( [ this ] ( ) { <nl> + auto dims = BroadcastableDims ( ) ; <nl> + return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Complex " ) <nl> + . RandomInput ( DT_FLOAT , dims . first ) <nl> + . RandomInput ( DT_FLOAT , dims . second ) <nl> + . Attr ( " T " , DT_FLOAT ) ) ; <nl> + } ) ; <nl> + } <nl> + <nl> TEST_F ( OpTest , Concat ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> int n = std : : uniform_int_distribution < int > ( 2 , 5 ) ( generator ( ) ) ; <nl> <nl> std : : vector < int64 > dims = RandomDims ( 1 ) ; <nl> TEST_F ( OpTest , ConcatOffset ) { <nl> } ) ; <nl> } <nl> <nl> + TEST_F ( OpTest , Conj ) { <nl> + Repeatedly ( [ this ] ( ) { <nl> + return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Conj " ) <nl> + . RandomInput ( DT_COMPLEX64 ) <nl> + . Attr ( " T " , DT_COMPLEX64 ) ) ; <nl> + } ) ; <nl> + } <nl> + <nl> TEST_F ( OpTest , Conv2D ) { <nl> Repeatedly ( [ this ] ( ) { <nl> WindowedSpatialDims d = ChooseWindowedSpatialDims ( 2 ) ; <nl> TEST_F ( OpTest , Conv2D ) { <nl> <nl> std : : vector < int64 > kernel_dims = { d . kernel_dims [ 0 ] , d . kernel_dims [ 1 ] , <nl> features_in , features_out } ; <nl> + DataType type = DT_FLOAT ; / / TODO ( b / 65408531 ) : COMPLEX_64 support <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " Conv2D " ) <nl> - . RandomInput ( DT_FLOAT , data_dims ) <nl> - . RandomInput ( DT_FLOAT , kernel_dims ) <nl> - . Attr ( " T " , DT_FLOAT ) <nl> + . RandomInput ( type , data_dims ) <nl> + . RandomInput ( type , kernel_dims ) <nl> + . Attr ( " T " , type ) <nl> . Attr ( " strides " , ImageDims ( FORMAT_NHWC , 1 , 1 , d . stride_dims ) ) <nl> . Attr ( " padding " , d . padding = = SAME ? " SAME " : " VALID " ) <nl> . Attr ( " data_format " , " NHWC " ) ) ; <nl> TEST_F ( OpTest , Conv2DBackpropFilter ) { <nl> ImageDims ( FORMAT_NHWC , batch , features_out , d . output_dims ) ; <nl> Tensor kernel_shape = test : : AsTensor < int32 > ( AsInt32s ( <nl> { d . kernel_dims [ 0 ] , d . kernel_dims [ 1 ] , features_in , features_out } ) ) ; <nl> + DataType type = DT_FLOAT ; / / TODO ( b / 65408531 ) : COMPLEX_64 support <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " Conv2DBackpropFilter " ) <nl> - . RandomInput ( DT_FLOAT , activations ) <nl> + . RandomInput ( type , activations ) <nl> . Input ( kernel_shape ) <nl> - . RandomInput ( DT_FLOAT , backprop ) <nl> - . Attr ( " T " , DT_FLOAT ) <nl> + . RandomInput ( type , backprop ) <nl> + . Attr ( " T " , type ) <nl> . Attr ( " strides " , ImageDims ( FORMAT_NHWC , 1 , 1 , d . stride_dims ) ) <nl> . Attr ( " padding " , d . padding = = SAME ? " SAME " : " VALID " ) <nl> . Attr ( " data_format " , " NHWC " ) ) ; <nl> TEST_F ( OpTest , Conv2DBackpropInput ) { <nl> ImageDims ( FORMAT_NHWC , batch , features_out , d . output_dims ) ; <nl> std : : vector < int64 > kernel = { d . kernel_dims [ 0 ] , d . kernel_dims [ 1 ] , <nl> features_in , features_out } ; <nl> + DataType type = DT_FLOAT ; / / TODO ( b / 65408531 ) : COMPLEX_64 support <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " Conv2DBackpropInput " ) <nl> . Input ( in_shape ) <nl> - . RandomInput ( DT_FLOAT , kernel ) <nl> - . RandomInput ( DT_FLOAT , backprop ) <nl> - . Attr ( " T " , DT_FLOAT ) <nl> + . RandomInput ( type , kernel ) <nl> + . RandomInput ( type , backprop ) <nl> + . Attr ( " T " , type ) <nl> . Attr ( " strides " , ImageDims ( FORMAT_NHWC , 1 , 1 , d . stride_dims ) ) <nl> . Attr ( " padding " , d . padding = = SAME ? " SAME " : " VALID " ) <nl> . Attr ( " data_format " , " NHWC " ) ) ; <nl> TEST_F ( OpTest , Conv3D ) { <nl> <nl> std : : vector < int64 > kernel = { d . kernel_dims [ 0 ] , d . kernel_dims [ 1 ] , <nl> d . kernel_dims [ 2 ] , features_in , features_out } ; <nl> + DataType type = DT_FLOAT ; / / TODO ( b / 65408531 ) : COMPLEX_64 support <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " Conv3D " ) <nl> - . RandomInput ( DT_FLOAT , data ) <nl> - . RandomInput ( DT_FLOAT , kernel ) <nl> - . Attr ( " T " , DT_FLOAT ) <nl> + . RandomInput ( type , data ) <nl> + . RandomInput ( type , kernel ) <nl> + . Attr ( " T " , type ) <nl> . Attr ( " strides " , ImageDims ( FORMAT_NHWC , 1 , 1 , d . stride_dims ) ) <nl> . Attr ( " padding " , d . padding = = SAME ? " SAME " : " VALID " ) ) ; <nl> } ) ; <nl> TEST_F ( OpTest , Conv3DBackpropFilter ) { <nl> Tensor kernel_shape = test : : AsTensor < int32 > ( <nl> AsInt32s ( { d . kernel_dims [ 0 ] , d . kernel_dims [ 1 ] , d . kernel_dims [ 2 ] , <nl> features_in , features_out } ) ) ; <nl> + DataType type = DT_FLOAT ; / / TODO ( b / 65408531 ) : COMPLEX_64 support <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " Conv3DBackpropFilterV2 " ) <nl> - . RandomInput ( DT_FLOAT , activations ) <nl> + . RandomInput ( type , activations ) <nl> . Input ( kernel_shape ) <nl> - . RandomInput ( DT_FLOAT , backprop ) <nl> - . Attr ( " T " , DT_FLOAT ) <nl> + . RandomInput ( type , backprop ) <nl> + . Attr ( " T " , type ) <nl> . Attr ( " strides " , ImageDims ( FORMAT_NHWC , 1 , 1 , d . stride_dims ) ) <nl> . Attr ( " padding " , d . padding = = SAME ? " SAME " : " VALID " ) ) ; <nl> } ) ; <nl> TEST_F ( OpTest , Conv3DBackpropInput ) { <nl> ImageDims ( FORMAT_NHWC , batch , features_out , d . output_dims ) ; <nl> std : : vector < int64 > kernel = { d . kernel_dims [ 0 ] , d . kernel_dims [ 1 ] , <nl> d . kernel_dims [ 2 ] , features_in , features_out } ; <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " Conv3DBackpropInputV2 " ) <nl> . Input ( in_shape ) <nl> - . RandomInput ( DT_FLOAT , kernel ) <nl> - . RandomInput ( DT_FLOAT , backprop ) <nl> - . Attr ( " T " , DT_FLOAT ) <nl> + . RandomInput ( type , kernel ) <nl> + . RandomInput ( type , backprop ) <nl> + . Attr ( " T " , type ) <nl> . Attr ( " strides " , ImageDims ( FORMAT_NHWC , 1 , 1 , d . stride_dims ) ) <nl> . Attr ( " padding " , d . padding = = SAME ? " SAME " : " VALID " ) ) ; <nl> } ) ; <nl> } <nl> <nl> + TEST_F ( OpTest , Cos ) { <nl> + Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> + return ExpectTfAndXlaOutputsAreClose ( <nl> + OpTestBuilder ( " Cos " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + TEST_F ( OpTest , Cosh ) { <nl> + Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> + return ExpectTfAndXlaOutputsAreClose ( <nl> + OpTestBuilder ( " Cosh " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> + } ) ; <nl> + } <nl> + <nl> TEST_F ( OpTest , DepthToSpace ) { <nl> Repeatedly ( [ this ] ( ) { <nl> int64 block = RandomDim ( 2 , 5 ) ; <nl> TEST_F ( OpTest , DepthToSpace ) { <nl> input_dims [ 1 ] = ( input_dims [ 1 ] + ( block - 1 ) ) / block ; <nl> input_dims [ 2 ] = ( input_dims [ 2 ] + ( block - 1 ) ) / block ; <nl> input_dims [ 3 ] * = block * block ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " DepthToSpace " ) <nl> - . RandomInput ( DT_FLOAT , input_dims ) <nl> - . Attr ( " T " , DT_FLOAT ) <nl> + . RandomInput ( type , input_dims ) <nl> + . Attr ( " T " , type ) <nl> . Attr ( " block_size " , block ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , DepthwiseConv2DNative ) { <nl> + if ( 1 ) return ; <nl> Repeatedly ( [ this ] ( ) { <nl> WindowedSpatialDims d = ChooseWindowedSpatialDims ( 2 ) ; <nl> std : : uniform_int_distribution < int > random_int ( 1 , 5 ) ; <nl> TEST_F ( OpTest , DepthwiseConv2DNative ) { <nl> <nl> std : : vector < int64 > kernel_dims = { d . kernel_dims [ 0 ] , d . kernel_dims [ 1 ] , <nl> features_in , depth_multiplier } ; <nl> + std : : vector < int64 > strides = ImageDims ( FORMAT_NHWC , 1 , 1 , d . stride_dims ) ; <nl> + strides [ 2 ] = strides [ 1 ] ; / / Current impl only supports equal strides <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " DepthwiseConv2dNative " ) <nl> . RandomInput ( DT_FLOAT , input_dims ) <nl> . RandomInput ( DT_FLOAT , kernel_dims ) <nl> . Attr ( " T " , DT_FLOAT ) <nl> - . Attr ( " strides " , ImageDims ( FORMAT_NHWC , 1 , 1 , d . stride_dims ) ) <nl> + . Attr ( " strides " , strides ) <nl> . Attr ( " padding " , d . padding = = SAME ? " SAME " : " VALID " ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , DepthwiseConv2DBackpropFilter ) { <nl> + if ( 1 ) return ; <nl> Repeatedly ( [ this ] ( ) { <nl> WindowedSpatialDims d = ChooseWindowedSpatialDims ( 2 ) ; <nl> std : : uniform_int_distribution < int > random_int ( 1 , 5 ) ; <nl> TEST_F ( OpTest , DepthwiseConv2DBackpropFilter ) { <nl> FORMAT_NHWC , batch , features_in * depth_multiplier , d . output_dims ) ; <nl> Tensor kernel_shape = test : : AsTensor < int32 > ( AsInt32s ( <nl> { d . kernel_dims [ 0 ] , d . kernel_dims [ 1 ] , features_in , depth_multiplier } ) ) ; <nl> + std : : vector < int64 > strides = ImageDims ( FORMAT_NHWC , 1 , 1 , d . stride_dims ) ; <nl> + strides [ 2 ] = strides [ 1 ] ; / / Current impl only supports equal strides <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " DepthwiseConv2dNativeBackpropFilter " ) <nl> . RandomInput ( DT_FLOAT , activations ) <nl> . Input ( kernel_shape ) <nl> . RandomInput ( DT_FLOAT , backprop ) <nl> . Attr ( " T " , DT_FLOAT ) <nl> - . Attr ( " strides " , ImageDims ( FORMAT_NHWC , 1 , 1 , d . stride_dims ) ) <nl> + . Attr ( " strides " , strides ) <nl> . Attr ( " padding " , d . padding = = SAME ? " SAME " : " VALID " ) <nl> . Attr ( " data_format " , " NHWC " ) ) ; <nl> } ) ; <nl> } <nl> <nl> - TEST_F ( OpTest , Cos ) { <nl> - Repeatedly ( [ this ] ( ) { <nl> - return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Cos " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> - } ) ; <nl> - } <nl> - <nl> - TEST_F ( OpTest , Cosh ) { <nl> - Repeatedly ( [ this ] ( ) { <nl> - return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Cosh " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> - } ) ; <nl> - } <nl> - <nl> TEST_F ( OpTest , DepthwiseConv2DBackpropInput ) { <nl> + if ( 1 ) return ; <nl> Repeatedly ( [ this ] ( ) { <nl> WindowedSpatialDims d = ChooseWindowedSpatialDims ( 2 ) ; <nl> std : : uniform_int_distribution < int > random_int ( 1 , 5 ) ; <nl> TEST_F ( OpTest , DepthwiseConv2DBackpropInput ) { <nl> FORMAT_NHWC , batch , features_in * depth_multiplier , d . output_dims ) ; <nl> std : : vector < int64 > kernel = { d . kernel_dims [ 0 ] , d . kernel_dims [ 1 ] , <nl> features_in , depth_multiplier } ; <nl> + std : : vector < int64 > strides = ImageDims ( FORMAT_NHWC , 1 , 1 , d . stride_dims ) ; <nl> + strides [ 2 ] = strides [ 1 ] ; / / Current impl only supports equal strides <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " DepthwiseConv2dNativeBackpropInput " ) <nl> . Input ( in_shape ) <nl> . RandomInput ( DT_FLOAT , kernel ) <nl> . RandomInput ( DT_FLOAT , backprop ) <nl> . Attr ( " T " , DT_FLOAT ) <nl> - . Attr ( " strides " , ImageDims ( FORMAT_NHWC , 1 , 1 , d . stride_dims ) ) <nl> + . Attr ( " strides " , strides ) <nl> . Attr ( " padding " , d . padding = = SAME ? " SAME " : " VALID " ) <nl> . Attr ( " data_format " , " NHWC " ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , Diag ) { <nl> + if ( 1 ) return ; <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> std : : vector < int64 > dims ; <nl> / / Diag causes a quadratic blowup in output size . <nl> int64 size ; <nl> TEST_F ( OpTest , Diag ) { <nl> <nl> TEST_F ( OpTest , DiagPart ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> auto dims = RandomDims ( 1 , 3 ) ; <nl> / / Duplicate the random dims . <nl> std : : vector < int64 > doubled_dims ( dims . size ( ) * 2 ) ; <nl> TEST_F ( OpTest , DiagPart ) { <nl> <nl> TEST_F ( OpTest , Div ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Div " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , Div ) { <nl> <nl> TEST_F ( OpTest , DynamicStitch ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> int n = std : : uniform_int_distribution < int > ( 2 , 5 ) ( generator ( ) ) ; <nl> OpTestBuilder builder ( " DynamicStitch " ) ; <nl> builder . Attr ( " T " , type ) ; <nl> TEST_F ( OpTest , SeluGrad ) { <nl> <nl> TEST_F ( OpTest , Equal ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Equal " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , Equal ) { <nl> <nl> TEST_F ( OpTest , Exp ) { <nl> Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Exp " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> + OpTestBuilder ( " Exp " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , Expm1 ) { <nl> Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Expm1 " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> + OpTestBuilder ( " Expm1 " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , ExpandDims ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> std : : vector < int64 > in_dims = RandomDims ( ) ; <nl> Tensor dim ( DT_INT32 , TensorShape ( ) ) ; <nl> std : : uniform_int_distribution < int32 > d ( - 1 - in_dims . size ( ) , in_dims . size ( ) ) ; <nl> TEST_F ( OpTest , ExpandDims ) { <nl> <nl> TEST_F ( OpTest , Fill ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> std : : vector < int64 > dims = RandomDims ( ) ; <nl> std : : vector < int32 > shape ( dims . begin ( ) , dims . end ( ) ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> TEST_F ( OpTest , FloorDiv ) { <nl> <nl> TEST_F ( OpTest , FloorMod ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " FloorMod " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , FloorMod ) { <nl> <nl> TEST_F ( OpTest , Greater ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Greater " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , Greater ) { <nl> <nl> TEST_F ( OpTest , GreaterEqual ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " GreaterEqual " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , GreaterEqual ) { <nl> } ) ; <nl> } <nl> <nl> + TEST_F ( OpTest , Imag ) { <nl> + Repeatedly ( [ this ] ( ) { <nl> + return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Imag " ) <nl> + . RandomInput ( DT_COMPLEX64 ) <nl> + . Attr ( " T " , DT_COMPLEX64 ) ) ; <nl> + } ) ; <nl> + } <nl> + <nl> TEST_F ( OpTest , Invert ) { <nl> Repeatedly ( [ this ] ( ) { <nl> DataType type = DT_INT32 ; <nl> TEST_F ( OpTest , L2Loss ) { <nl> <nl> TEST_F ( OpTest , Less ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Less " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , Less ) { <nl> <nl> TEST_F ( OpTest , LessEqual ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " LessEqual " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , LinSpace ) { <nl> return test : : AsScalar < int64 > ( x ) ; <nl> } ; <nl> std : : uniform_int_distribution < int > distribution ( - 50 , 50 ) ; <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_INT64 } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_INT64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " LinSpace " ) <nl> . RandomInput ( DT_FLOAT , { } ) <nl> TEST_F ( OpTest , LinSpace ) { <nl> <nl> TEST_F ( OpTest , Log ) { <nl> Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Log " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> + OpTestBuilder ( " Log " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , Log1p ) { <nl> Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Log1p " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> + OpTestBuilder ( " Log1p " ) . RandomInput ( type ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , MatMul ) { <nl> std : : swap ( b_dims [ 0 ] , b_dims [ 1 ] ) ; <nl> } <nl> <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " MatMul " ) <nl> - . RandomInput ( DT_FLOAT , a_dims ) <nl> - . RandomInput ( DT_FLOAT , b_dims ) <nl> - . Attr ( " T " , DT_FLOAT ) <nl> + . RandomInput ( type , a_dims ) <nl> + . RandomInput ( type , b_dims ) <nl> + . Attr ( " T " , type ) <nl> . Attr ( " transpose_a " , transpose_a ) <nl> . Attr ( " transpose_b " , transpose_b ) ) ; <nl> } ) ; <nl> TEST_F ( OpTest , MatMul ) { <nl> <nl> TEST_F ( OpTest , MatrixDiag ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " MatrixDiag " ) <nl> . RandomInput ( type , RandomDims ( 1 ) ) <nl> . Attr ( " T " , type ) ) ; <nl> TEST_F ( OpTest , MatrixDiag ) { <nl> <nl> TEST_F ( OpTest , MatrixDiagPart ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " MatrixDiagPart " ) <nl> . RandomInput ( type , RandomDims ( 2 ) ) <nl> . Attr ( " T " , type ) ) ; <nl> TEST_F ( OpTest , MatrixDiagPart ) { <nl> <nl> TEST_F ( OpTest , Max ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> std : : vector < int64 > data_dims = RandomDims ( ) ; <nl> Tensor indices = RandomReductionIndices ( data_dims . size ( ) ) ; <nl> bool keep_dims = Choose < bool > ( { false , true } ) ; <nl> TEST_F ( OpTest , Max ) { <nl> <nl> TEST_F ( OpTest , Maximum ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Maximum " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , MaxPool3D ) { <nl> <nl> TEST_F ( OpTest , Mean ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> / / TODO ( phawkins ) : CPU and XLA differ output for reducing across a <nl> / / size - 0 dimension ( nan vs 0 ) . For now , require size > = 1 . <nl> std : : vector < int64 > data_dims = RandomDims ( 0 , kDefaultMaxRank , 1 ) ; <nl> TEST_F ( OpTest , Mean ) { <nl> <nl> TEST_F ( OpTest , Min ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> std : : vector < int64 > data_dims = RandomDims ( ) ; <nl> Tensor indices = RandomReductionIndices ( data_dims . size ( ) ) ; <nl> bool keep_dims = Choose < bool > ( { false , true } ) ; <nl> TEST_F ( OpTest , Min ) { <nl> <nl> TEST_F ( OpTest , Minimum ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Minimum " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , Mod ) { <nl> <nl> TEST_F ( OpTest , Mul ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Mul " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , Mul ) { <nl> <nl> TEST_F ( OpTest , Neg ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " Neg " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> TEST_F ( OpTest , Neg ) { <nl> <nl> TEST_F ( OpTest , NotEqual ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " NotEqual " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , NotEqual ) { <nl> <nl> TEST_F ( OpTest , OneHot ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> <nl> std : : vector < int64 > dims = RandomDims ( ) ; <nl> int num_dims = dims . size ( ) ; <nl> TEST_F ( OpTest , OneHot ) { <nl> <nl> TEST_F ( OpTest , OnesLike ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " OnesLike " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> TEST_F ( OpTest , OnesLike ) { <nl> <nl> TEST_F ( OpTest , Pack ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> int n = std : : uniform_int_distribution < int > ( 1 , 5 ) ( generator ( ) ) ; <nl> <nl> std : : vector < int64 > dims = RandomDims ( ) ; <nl> TEST_F ( OpTest , Pack ) { <nl> / / TODO ( b / 31741898 ) : crashes on GPU . <nl> TEST_F ( OpTest , Pad ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> std : : vector < int64 > t_dims = RandomDims ( ) ; <nl> <nl> / / TODO ( b / 31741996 ) : re - enable DT_INT64 when bug is fixed . <nl> TEST_F ( OpTest , Pow ) { <nl> / / nontermination . <nl> Repeatedly ( [ this ] ( ) { <nl> auto dims = BroadcastableDims ( ) ; <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Pow " ) <nl> - . RandomInput ( DT_FLOAT , dims . first ) <nl> - . RandomInput ( DT_FLOAT , dims . second ) <nl> - . Attr ( " T " , DT_FLOAT ) ) ; <nl> + . RandomInput ( type , dims . first ) <nl> + . RandomInput ( type , dims . second ) <nl> + . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , Prod ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> std : : vector < int64 > data_dims = RandomDims ( ) ; <nl> Tensor indices = RandomReductionIndices ( data_dims . size ( ) ) ; <nl> bool keep_dims = Choose < bool > ( { false , true } ) ; <nl> TEST_F ( OpTest , Range ) { <nl> <nl> TEST_F ( OpTest , Rank ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " Rank " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> + TEST_F ( OpTest , Real ) { <nl> + Repeatedly ( [ this ] ( ) { <nl> + return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Real " ) <nl> + . RandomInput ( DT_COMPLEX64 ) <nl> + . Attr ( " T " , DT_COMPLEX64 ) ) ; <nl> + } ) ; <nl> + } <nl> + <nl> TEST_F ( OpTest , RealDiv ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = DT_FLOAT ; <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " RealDiv " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , RealDiv ) { <nl> <nl> TEST_F ( OpTest , Reciprocal ) { <nl> Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Reciprocal " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> + OpTestBuilder ( " Reciprocal " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , ReciprocalGrad ) { <nl> Repeatedly ( [ this ] ( ) { <nl> std : : vector < int64 > dims = RandomDims ( ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " ReciprocalGrad " ) <nl> - . RandomInput ( DT_FLOAT , dims ) <nl> - . RandomInput ( DT_FLOAT , dims ) <nl> - . Attr ( " T " , DT_FLOAT ) ) ; <nl> + . RandomInput ( type , dims ) <nl> + . RandomInput ( type , dims ) <nl> + . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> TEST_F ( OpTest , Relu ) { <nl> TEST_F ( OpTest , ReluGrad ) { <nl> <nl> TEST_F ( OpTest , Reshape ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> std : : vector < int64 > dims = RandomDims ( ) ; <nl> std : : bernoulli_distribution random_bool ; <nl> std : : vector < int64 > dims_before , dims_after ; <nl> TEST_F ( OpTest , Reshape ) { <nl> TEST_F ( OpTest , Reverse ) { <nl> Repeatedly ( [ this ] ( ) { <nl> std : : vector < int64 > dims = RandomDims ( 1 ) ; <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> int64 rank = dims . size ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Reverse " ) <nl> . RandomInput ( type , dims ) <nl> . RandomInput ( DT_BOOL , { rank } ) <nl> - . Attr ( " T " , DT_FLOAT ) ) ; <nl> + . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , ReverseV2 ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> std : : vector < int64 > data_dims = RandomDims ( ) ; <nl> Tensor indices = RandomReductionIndices ( data_dims . size ( ) ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " ReverseV2 " ) <nl> . RandomInput ( type , data_dims ) <nl> . Input ( indices ) <nl> - . Attr ( " T " , DT_FLOAT ) ) ; <nl> + . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , Round ) { <nl> <nl> TEST_F ( OpTest , Rsqrt ) { <nl> Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Rsqrt " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> + OpTestBuilder ( " Rsqrt " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , RsqrtGrad ) { <nl> Repeatedly ( [ this ] ( ) { <nl> auto dims = RandomDims ( ) ; <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " RsqrtGrad " ) <nl> - . RandomInput ( DT_FLOAT , dims ) <nl> - . RandomInput ( DT_FLOAT , dims ) <nl> - . Attr ( " T " , DT_FLOAT ) ) ; <nl> + . RandomInput ( type , dims ) <nl> + . RandomInput ( type , dims ) <nl> + . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , Shape ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " Shape " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> TEST_F ( OpTest , Shape ) { <nl> <nl> TEST_F ( OpTest , ShapeN ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> int n = std : : uniform_int_distribution < int > ( 1 , 5 ) ( generator ( ) ) ; <nl> OpTestBuilder builder ( " ShapeN " ) ; <nl> builder . Attr ( " T " , type ) ; <nl> TEST_F ( OpTest , ShapeN ) { <nl> <nl> TEST_F ( OpTest , Sigmoid ) { <nl> Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Sigmoid " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> + OpTestBuilder ( " Sigmoid " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , SigmoidGrad ) { <nl> Repeatedly ( [ this ] ( ) { <nl> auto dims = RandomDims ( ) ; <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " SigmoidGrad " ) <nl> - . RandomInput ( DT_FLOAT , dims ) <nl> - . RandomInput ( DT_FLOAT , dims ) <nl> - . Attr ( " T " , DT_FLOAT ) ) ; <nl> + . RandomInput ( type , dims ) <nl> + . RandomInput ( type , dims ) <nl> + . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , Sign ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " Sign " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> TEST_F ( OpTest , Sign ) { <nl> <nl> TEST_F ( OpTest , Sin ) { <nl> Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Sin " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> + OpTestBuilder ( " Sin " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , Sinh ) { <nl> Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Sinh " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> + OpTestBuilder ( " Sinh " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , Size ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " Size " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> TEST_F ( OpTest , Size ) { <nl> <nl> TEST_F ( OpTest , Slice ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> std : : vector < int64 > data_dims = RandomDims ( ) ; <nl> <nl> std : : vector < int32 > begin ( data_dims . size ( ) ) , size ( data_dims . size ( ) ) ; <nl> TEST_F ( OpTest , SpaceToBatch ) { <nl> CHECK ( paddings . CopyFrom ( AsIntTensor ( DT_INT32 , padding_vals ) , <nl> TensorShape ( { num_block_dims , 2 } ) ) ) ; <nl> <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " SpaceToBatch " ) <nl> - . RandomInput ( DT_FLOAT , input_dims ) <nl> + . RandomInput ( type , input_dims ) <nl> . Input ( paddings ) <nl> - . Attr ( " T " , DT_FLOAT ) <nl> + . Attr ( " T " , type ) <nl> . Attr ( " block_size " , block_size ) ) ; <nl> } ) ; <nl> } <nl> TEST_F ( OpTest , SpaceToBatchND ) { <nl> CHECK ( paddings . CopyFrom ( AsIntTensor ( DT_INT32 , padding_vals ) , <nl> TensorShape ( { num_block_dims , 2 } ) ) ) ; <nl> <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " SpaceToBatchND " ) <nl> - . RandomInput ( DT_FLOAT , input_dims ) <nl> + . RandomInput ( type , input_dims ) <nl> . Input ( test : : AsTensor < int32 > ( <nl> std : : vector < int32 > ( block_dims . begin ( ) , block_dims . end ( ) ) ) ) <nl> . Input ( paddings ) <nl> - . Attr ( " T " , DT_FLOAT ) ) ; <nl> + . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , SparseSoftmaxCrossEntropyWithLogits ) { <nl> <nl> TEST_F ( OpTest , Split ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> std : : vector < int64 > dims = RandomDims ( 1 ) ; <nl> std : : uniform_int_distribution < int > ud ; <nl> int32 dim = std : : uniform_int_distribution < int32 > ( <nl> TEST_F ( OpTest , Split ) { <nl> <nl> TEST_F ( OpTest , Sqrt ) { <nl> Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Sqrt " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> + OpTestBuilder ( " Sqrt " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , SqrtGrad ) { <nl> Repeatedly ( [ this ] ( ) { <nl> auto dims = RandomDims ( ) ; <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " SqrtGrad " ) <nl> - . RandomInput ( DT_FLOAT , dims ) <nl> - . RandomInput ( DT_FLOAT , dims ) <nl> - . Attr ( " T " , DT_FLOAT ) ) ; <nl> + . RandomInput ( type , dims ) <nl> + . RandomInput ( type , dims ) <nl> + . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , SquaredDifference ) { <nl> <nl> TEST_F ( OpTest , Square ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " Square " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> TEST_F ( OpTest , Square ) { <nl> <nl> TEST_F ( OpTest , Squeeze ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> std : : vector < int64 > t_dims = RandomDims ( 0 , kDefaultMaxRank , 0 , 5 ) ; <nl> std : : bernoulli_distribution random_bool ; <nl> std : : vector < int > squeeze_dims ; <nl> TEST_F ( OpTest , Squeeze ) { <nl> <nl> TEST_F ( OpTest , Sub ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " Sub " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , Sub ) { <nl> <nl> TEST_F ( OpTest , Sum ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> std : : vector < int64 > data_dims = RandomDims ( ) ; <nl> Tensor indices = RandomReductionIndices ( data_dims . size ( ) ) ; <nl> bool keep_dims = Choose < bool > ( { false , true } ) ; <nl> TEST_F ( OpTest , Sum ) { <nl> <nl> TEST_F ( OpTest , StridedSlice ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> std : : vector < int64 > data_dims = RandomDims ( ) ; <nl> std : : vector < int32 > begin ( data_dims . size ( ) ) , end ( data_dims . size ( ) ) ; <nl> std : : vector < int32 > strides ( data_dims . size ( ) ) ; <nl> TEST_F ( OpTest , StridedSlice ) { <nl> <nl> TEST_F ( OpTest , StridedSliceGrad ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> <nl> / / Dimensions of the forward input . <nl> std : : vector < int64 > dims = RandomDims ( ) ; <nl> TEST_F ( OpTest , StridedSliceGrad ) { <nl> <nl> TEST_F ( OpTest , Tan ) { <nl> Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Tan " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> + OpTestBuilder ( " Tan " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , Tanh ) { <nl> Repeatedly ( [ this ] ( ) { <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> - OpTestBuilder ( " Tanh " ) . RandomInput ( DT_FLOAT ) . Attr ( " T " , DT_FLOAT ) ) ; <nl> + OpTestBuilder ( " Tanh " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , TanhGrad ) { <nl> Repeatedly ( [ this ] ( ) { <nl> auto dims = RandomDims ( ) ; <nl> + auto type = Choose < DataType > ( { DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " TanhGrad " ) <nl> - . RandomInput ( DT_FLOAT , dims ) <nl> - . RandomInput ( DT_FLOAT , dims ) <nl> - . Attr ( " T " , DT_FLOAT ) ) ; <nl> + . RandomInput ( type , dims ) <nl> + . RandomInput ( type , dims ) <nl> + . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> } <nl> <nl> TEST_F ( OpTest , Tile ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> std : : vector < int64 > t_dims = RandomDims ( 1 ) ; <nl> std : : vector < int32 > multiples ( t_dims . size ( ) ) ; <nl> for ( int i = 0 ; i < t_dims . size ( ) ; + + i ) { <nl> TEST_F ( OpTest , Tile ) { <nl> <nl> TEST_F ( OpTest , Transpose ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( kAllXlaTypes ) ; <nl> + auto type = Choose < DataType > ( kAllXlaTypes ) ; <nl> std : : vector < int64 > data_dims = RandomDims ( ) ; <nl> std : : vector < int32 > perm ( data_dims . size ( ) ) ; <nl> std : : iota ( perm . begin ( ) , perm . end ( ) , 0 ) ; <nl> TEST_F ( OpTest , TruncateDiv ) { <nl> <nl> TEST_F ( OpTest , TruncateMod ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> auto dims = BroadcastableDims ( ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( OpTestBuilder ( " TruncateMod " ) <nl> . RandomInput ( type , dims . first ) <nl> TEST_F ( OpTest , TruncateMod ) { <nl> <nl> TEST_F ( OpTest , ZerosLike ) { <nl> Repeatedly ( [ this ] ( ) { <nl> - DataType type = Choose < DataType > ( { DT_INT32 , DT_FLOAT } ) ; <nl> + auto type = Choose < DataType > ( { DT_INT32 , DT_FLOAT , DT_COMPLEX64 } ) ; <nl> return ExpectTfAndXlaOutputsAreClose ( <nl> OpTestBuilder ( " ZerosLike " ) . RandomInput ( type ) . Attr ( " T " , type ) ) ; <nl> } ) ; <nl> mmm a / tensorflow / compiler / tests / unary_ops_test . py <nl> ppp b / tensorflow / compiler / tests / unary_ops_test . py <nl> def testFloatOps ( self ) : <nl> np . array ( [ - 1 , - 0 . 5 , 0 , 0 . 3 ] , dtype = dtype ) , <nl> expected = np . array ( [ - 1 , - 64 . 0 / 127 , 0 , 38 . 0 / 127 ] , dtype = dtype ) ) <nl> <nl> + def testComplexOps ( self ) : <nl> + for dtype in self . complex_types : <nl> + # TODO ( b / 65408531 ) : math_ops . acosh ( needs pow ) <nl> + # TODO ( b / 65408531 ) : math_ops . asinh ( needs pow ) <nl> + <nl> + # TODO ( b / 65408531 ) : Wider support for log ( needs atan2 ) . <nl> + atan2_supported = self . device = = " XLA_GPU " <nl> + if atan2_supported : <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . atanh , <nl> + np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) , <nl> + expected = np . arctanh ( <nl> + np . array ( [ 0 . 1 , 0 . 2j , 0 . 3 - 0 . 1j , 0 . 4 + 0 . 5j ] , dtype = dtype ) ) ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . cosh , <nl> + np . array ( [ 1j , 2 - 3j , 3 , 4 + 2j ] , dtype = dtype ) , <nl> + expected = np . cosh ( np . array ( [ 1j , 2 - 3j , 3 , 4 + 2j ] , dtype = dtype ) ) ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . sinh , <nl> + np . array ( [ 1 , 2j , 2 - 3j , 4 + 5j ] , dtype = dtype ) , <nl> + expected = np . sinh ( np . array ( [ 1 , 2j , 2 - 3j , 4 + 5j ] , dtype = dtype ) ) ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . exp , <nl> + np . array ( [ [ - 1 + 2j , 3j , 2 - 3j ] ] , dtype = dtype ) , <nl> + expected = np . exp ( np . array ( [ [ - 1 + 2j , 3j , 2 - 3j ] ] , dtype = dtype ) ) ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . expm1 , <nl> + np . array ( [ [ - 1 + 2j , 3j , 2 - 3j ] ] , dtype = dtype ) , <nl> + expected = np . expm1 ( np . array ( [ [ - 1 + 2j , 3j , 2 - 3j ] ] , dtype = dtype ) ) ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . reciprocal , <nl> + np . array ( [ [ 1 , 2j , 2 + 3j ] ] , dtype = dtype ) , <nl> + expected = 1 . 0 / np . array ( [ [ 1 , 2j , 2 + 3j ] ] , dtype = dtype ) ) <nl> + <nl> + if atan2_supported : <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . log , <nl> + np . array ( [ [ 5j , 3 - 2j ] ] , dtype = dtype ) , <nl> + expected = np . log ( np . array ( [ [ 5j , 3 - 2j ] ] , dtype = dtype ) ) ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . sin , <nl> + np . array ( [ [ 5j , 3 - 2j ] ] , dtype = dtype ) , <nl> + expected = np . sin ( np . array ( [ [ 5j , 3 - 2j ] ] , dtype = dtype ) ) ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . cos , <nl> + np . array ( [ [ 5j , 3 - 2j ] ] , dtype = dtype ) , <nl> + expected = np . cos ( np . array ( [ [ 5j , 3 - 2j ] ] , dtype = dtype ) ) ) <nl> + <nl> + # TODO ( b / 34703906 ) : improve log1p implementation and make tolerance <nl> + # tighter . <nl> + if atan2_supported : # TODO ( b / 34703906 ) : log support <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . log1p , <nl> + np . array ( [ [ 1e - 14 , 1e - 15j , 0 . 6 - 0 . 3j ] ] , dtype = dtype ) , <nl> + expected = np . log1p ( <nl> + np . array ( [ [ 1e - 14 , 1e - 15j , 0 . 6 - 0 . 3j ] ] , dtype = dtype ) ) ) <nl> + <nl> + # TODO ( b / 34703906 ) : math_ops . rsqrt ( needs pow ) <nl> + <nl> + # TODO ( b / 34703906 ) : math_ops . sigmoid ( needs tanh ) <nl> + <nl> + # TODO ( b / 34703906 ) : math_ops . sqrt ( needs pow ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . tan , <nl> + np . array ( [ 1 , 2j , 2 - 3j , 4 + 5j ] , dtype = dtype ) , <nl> + expected = np . tan ( np . array ( [ 1 , 2j , 2 - 3j , 4 + 5j ] , dtype = dtype ) ) ) <nl> + <nl> + # TODO ( b / 34703906 ) : math_ops . tanh ( as itself ) <nl> + <nl> + ctypes = { np . complex64 : np . float32 } <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . abs , <nl> + np . array ( [ [ 3 - 4j , - 1j , np . inf ] ] , dtype = dtype ) , <nl> + expected = np . array ( [ [ 5 , 1 , np . inf ] ] , dtype = ctypes [ dtype ] ) ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . negative , <nl> + np . array ( [ [ - 1 + 2j , - 3j ] ] , dtype = dtype ) , <nl> + expected = np . array ( [ [ 1 - 2j , 3j ] ] , dtype = dtype ) ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . square , <nl> + np . array ( [ [ - 2 - 3j , 3 + 4j , 5j ] ] , dtype = dtype ) , <nl> + expected = np . array ( [ [ - 2 - 3j , 3 + 4j , 5j ] ] , dtype = dtype ) * * 2 ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + array_ops . zeros_like , <nl> + np . array ( [ [ 4j , 3 - 2j ] , [ 2 , - 1j ] ] , dtype = dtype ) , <nl> + expected = np . array ( [ [ 0 , 0 ] , [ 0 , 0 ] ] , dtype = dtype ) ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + array_ops . ones_like , <nl> + np . array ( [ [ - 4j , 3 + 2j ] , [ 2 , - 1j ] ] , dtype = dtype ) , <nl> + expected = np . array ( [ [ 1 , 1 ] , [ 1 , 1 ] ] , dtype = dtype ) ) <nl> + <nl> + if atan2_supported : # TODO ( b / 34703906 ) : atan2 support <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . angle , <nl> + np . array ( [ 1 + 3j , - 4 + 7j , 2 . 7 , - 3j ] , dtype = dtype ) , <nl> + expected = np . angle ( <nl> + np . array ( [ 1 + 3j , - 4 + 7j , 2 . 7 , - 3j ] , dtype = dtype ) ) ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . conj , <nl> + np . array ( [ 1 + 3j , - 4 + 7j , 2 . 7 , - 3j ] , dtype = dtype ) , <nl> + expected = np . array ( [ 1 - 3j , - 4 - 7j , 2 . 7 , 3j ] , dtype = dtype ) ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . imag , <nl> + np . array ( [ 1 + 3j , - 4 + 7j , 2 . 7 , - 3j ] , dtype = dtype ) , <nl> + expected = np . array ( [ 3 , 7 , 0 , - 3 ] , dtype = ctypes [ dtype ] ) ) <nl> + <nl> + self . _assertOpOutputMatchesExpected ( <nl> + math_ops . real , <nl> + np . array ( [ 1 + 3j , - 4 + 7j , 2 . 7 , - 3j ] , dtype = dtype ) , <nl> + expected = np . array ( [ 1 , - 4 , 2 . 7 , 0 ] , dtype = ctypes [ dtype ] ) ) <nl> + <nl> def testIntOps ( self ) : <nl> for dtype in self . int_types : <nl> self . _assertOpOutputMatchesExpected ( <nl> def testBiasAddGrad ( self ) : <nl> <nl> def testCast ( self ) : <nl> shapes = [ [ ] , [ 4 ] , [ 2 , 3 ] , [ 2 , 0 , 4 ] ] <nl> - types = [ dtypes . bool , dtypes . int32 , dtypes . float32 ] <nl> + types = [ dtypes . bool , dtypes . int32 , dtypes . float32 ] + self . complex_tf_types <nl> for shape in shapes : <nl> for src_type in types : <nl> for dst_type in types : <nl> src = np . arange ( np . prod ( shape ) ) . astype ( src_type . as_numpy_dtype ) <nl> + if src_type in self . complex_tf_types : <nl> + src + = ( np . arange ( np . prod ( shape ) ) * 2j ) . astype ( <nl> + src_type . as_numpy_dtype ) <nl> src = src . reshape ( shape ) <nl> <nl> dst = src . astype ( dst_type . as_numpy_dtype ) <nl> mmm a / tensorflow / compiler / tests / variable_ops_test . py <nl> ppp b / tensorflow / compiler / tests / variable_ops_test . py <nl> def testOneWriteOneOutput ( self ) : <nl> # Regression test for a bug where computations with one non - constant <nl> # output and one variable update were mishandled . <nl> for dtype in self . numeric_types : <nl> - init = np . array ( [ [ 1 , 2 ] , [ 3 , 4 ] ] , dtype = dtype ) <nl> + init = np . array ( [ [ 1 , 2j ] , [ 3 , 4 ] ] ) . astype ( dtype ) <nl> with self . test_session ( ) as sess , self . test_scope ( ) : <nl> v = resource_variable_ops . ResourceVariable ( init ) <nl> sess . run ( variables . variables_initializer ( [ v ] ) ) <nl> def testOneWriteOneOutput ( self ) : <nl> x = v . assign_add ( p ) <nl> with ops . control_dependencies ( [ x ] ) : <nl> y = v . read_value ( ) <nl> - self . assertAllClose ( np . array ( [ [ 2 , 3 ] , [ 4 , 5 ] ] , dtype = dtype ) , <nl> - sess . run ( y , { p : 1 } ) ) <nl> + self . assertAllClose ( <nl> + np . array ( [ [ 2 , 1 + 2j ] , [ 4 , 5 ] ] ) . astype ( dtype ) , sess . run ( y , { <nl> + p : 1 <nl> + } ) ) <nl> <nl> def testSparseRead0DIndices ( self ) : <nl> for dtype in self . numeric_types : <nl> - init = np . array ( [ [ 0 , 1 , 2 , 3 ] , [ 4 , 5 , 6 , 7 ] , [ 8 , 9 , 10 , 11 ] ] , dtype = dtype ) <nl> + init = np . array ( [ [ 0 , 1 , 2 , 3 ] , [ 4 , 5 , 6 , 7 ] , [ 8j , 9 , 10 , <nl> + 11 ] ] ) . astype ( dtype ) <nl> with self . test_session ( ) as sess , self . test_scope ( ) : <nl> v = resource_variable_ops . ResourceVariable ( init ) <nl> sess . run ( variables . variables_initializer ( [ v ] ) ) <nl> x = v . sparse_read ( 2 ) <nl> - self . assertAllClose ( np . array ( [ 8 , 9 , 10 , 11 ] , dtype = dtype ) , sess . run ( x ) ) <nl> + self . assertAllClose ( <nl> + np . array ( [ 8j , 9 , 10 , 11 ] ) . astype ( dtype ) , sess . run ( x ) ) <nl> <nl> def testSparseRead1DIndices ( self ) : <nl> for dtype in self . numeric_types : <nl> - init = np . array ( [ [ 0 , 1 , 2 , 3 ] , [ 4 , 5 , 6 , 7 ] , [ 8 , 9 , 10 , 11 ] ] , dtype = dtype ) <nl> + init = np . array ( [ [ 0 , 1 , 2 , 3 ] , [ 4 , 5 , 6j , 7 ] , [ 8 , 9 , 10 , <nl> + 11 ] ] ) . astype ( dtype ) <nl> with self . test_session ( ) as sess , self . test_scope ( ) : <nl> v = resource_variable_ops . ResourceVariable ( init ) <nl> sess . run ( variables . variables_initializer ( [ v ] ) ) <nl> x = v . sparse_read ( [ 2 , 1 ] ) <nl> self . assertAllClose ( <nl> - np . array ( [ [ 8 , 9 , 10 , 11 ] , [ 4 , 5 , 6 , 7 ] ] , dtype = dtype ) , sess . run ( x ) ) <nl> + np . array ( [ [ 8 , 9 , 10 , 11 ] , [ 4 , 5 , 6j , 7 ] ] ) . astype ( dtype ) , <nl> + sess . run ( x ) ) <nl> <nl> def testSparseRead2DIndices ( self ) : <nl> for dtype in self . numeric_types : <nl> - init = np . array ( [ [ 0 , 1 , 2 , 3 ] , [ 4 , 5 , 6 , 7 ] , [ 8 , 9 , 10 , 11 ] ] , dtype = dtype ) <nl> + init = np . array ( [ [ 0 , 1 , 2j , 3 ] , [ 4 , 5 , 6 , 7 ] , [ 8 , 9 , 10 , <nl> + 11 ] ] ) . astype ( dtype ) <nl> with self . test_session ( ) as sess , self . test_scope ( ) : <nl> v = resource_variable_ops . ResourceVariable ( init ) <nl> sess . run ( variables . variables_initializer ( [ v ] ) ) <nl> x = v . sparse_read ( [ [ 2 , 1 ] , [ 0 , 2 ] ] ) <nl> self . assertAllClose ( <nl> - np . array ( <nl> - [ [ [ 8 , 9 , 10 , 11 ] , [ 4 , 5 , 6 , 7 ] ] , [ [ 0 , 1 , 2 , 3 ] , [ 8 , 9 , 10 , <nl> - 11 ] ] ] , <nl> - dtype = dtype ) , sess . run ( x ) ) <nl> + np . array ( [ [ [ 8 , 9 , 10 , 11 ] , [ 4 , 5 , 6 , 7 ] ] , <nl> + [ [ 0 , 1 , 2j , 3 ] , [ 8 , 9 , 10 , 11 ] ] ] ) . astype ( dtype ) , <nl> + sess . run ( x ) ) <nl> <nl> def testSparseRead2DIndices3DTensor ( self ) : <nl> for dtype in self . numeric_types : <nl> - init = np . array ( <nl> - [ [ [ 0 , 1 , 2 ] , [ 3 , 4 , 5 ] ] , [ [ 10 , 11 , 12 ] , [ 13 , 14 , 15 ] ] , <nl> - [ [ 20 , 21 , 22 ] , [ 23 , 24 , 25 ] ] , [ [ 30 , 31 , 32 ] , [ 33 , 34 , 35 ] ] ] , <nl> - dtype = dtype ) <nl> + init = np . array ( [ [ [ 0 , 1 , 2 ] , [ 3 , 4 , 5 ] ] , [ [ 10 , 11 , 12 ] , [ 13 , 14 , 15 ] ] , <nl> + [ [ 20 , 21 , 22 ] , [ 23 , 24j , 25 ] ] , <nl> + [ [ 30 , 31 , 32 ] , [ 33 , 34 , 35 ] ] ] ) . astype ( dtype ) <nl> with self . test_session ( ) as sess , self . test_scope ( ) : <nl> v = resource_variable_ops . ResourceVariable ( init ) <nl> sess . run ( variables . variables_initializer ( [ v ] ) ) <nl> x = v . sparse_read ( [ [ 2 , 1 ] , [ 3 , 0 ] ] ) <nl> self . assertAllClose ( <nl> np . array ( <nl> - [ [ [ [ 20 , 21 , 22 ] , [ 23 , 24 , 25 ] ] , [ [ 10 , 11 , 12 ] , [ 13 , 14 , 15 ] ] ] , <nl> + [ [ [ [ 20 , 21 , 22 ] , [ 23 , 24j , 25 ] ] , [ [ 10 , 11 , 12 ] , [ 13 , 14 , 15 ] ] ] , <nl> [ [ [ 30 , 31 , 32 ] , [ 33 , 34 , 35 ] ] , [ [ 0 , 1 , 2 ] , [ 3 , 4 , 5 ] ] ] ] , <nl> - dtype = dtype ) , sess . run ( x ) ) <nl> + ) . astype ( dtype ) , sess . run ( x ) ) <nl> <nl> def testReadWrite ( self ) : <nl> " " " Tests initialization , reading , and writing a resource variable . " " " <nl> - with self . test_session ( ) as session : <nl> - with self . test_scope ( ) : <nl> - with variable_scope . variable_scope ( " ascope " , use_resource = True ) : <nl> - x = variable_scope . get_variable ( <nl> - " x " , <nl> - shape = [ ] , <nl> - dtype = dtypes . float32 , <nl> - initializer = init_ops . constant_initializer ( 2 ) ) <nl> - a = x . read_value ( ) <nl> - with ops . control_dependencies ( [ a ] ) : <nl> - b = state_ops . assign ( x , 47 ) <nl> - with ops . control_dependencies ( [ b ] ) : <nl> - c = x . read_value ( ) <nl> - with ops . control_dependencies ( [ c ] ) : <nl> - d = state_ops . assign_add ( x , 3 ) <nl> - with ops . control_dependencies ( [ d ] ) : <nl> - e = x . read_value ( ) <nl> - <nl> - session . run ( variables . global_variables_initializer ( ) ) <nl> - v1 , v2 , v3 = session . run ( [ a , c , e ] ) <nl> - self . assertAllClose ( 2 . 0 , v1 ) <nl> - self . assertAllClose ( 47 . 0 , v2 ) <nl> - self . assertAllClose ( 50 . 0 , v3 ) <nl> + for dtype in self . numeric_types : <nl> + with self . test_session ( ) as session : <nl> + print ( ops . get_default_graph ( ) ) <nl> + with self . test_scope ( ) : <nl> + with variable_scope . variable_scope ( " ascope " , use_resource = True ) : <nl> + x = variable_scope . get_variable ( <nl> + " x " , <nl> + shape = [ ] , <nl> + dtype = dtype , <nl> + initializer = init_ops . constant_initializer ( 2 ) ) <nl> + a = x . read_value ( ) <nl> + with ops . control_dependencies ( [ a ] ) : <nl> + b = state_ops . assign ( x , dtype ( 47 ) ) <nl> + with ops . control_dependencies ( [ b ] ) : <nl> + c = x . read_value ( ) <nl> + with ops . control_dependencies ( [ c ] ) : <nl> + d = state_ops . assign_add ( x , np . array ( 6 + 2j ) . astype ( dtype ) ) <nl> + with ops . control_dependencies ( [ d ] ) : <nl> + e = state_ops . assign_sub ( x , dtype ( 3 ) ) <nl> + with ops . control_dependencies ( [ e ] ) : <nl> + f = x . read_value ( ) <nl> + <nl> + session . run ( variables . global_variables_initializer ( ) ) <nl> + v1 , v2 , v3 = session . run ( [ a , c , f ] ) <nl> + self . assertAllClose ( dtype ( 2 ) , v1 ) <nl> + self . assertAllClose ( dtype ( 47 ) , v2 ) <nl> + self . assertAllClose ( np . array ( 50 + 2j ) . astype ( dtype ) , v3 ) <nl> <nl> def testTraining ( self ) : <nl> " " " Tests a gradient descent step for a simple model . " " " <nl> mmm a / tensorflow / compiler / tests / xla_test . py <nl> ppp b / tensorflow / compiler / tests / xla_test . py <nl> def __init__ ( self , method_name = ' runTest ' ) : <nl> self . float_tf_types = [ <nl> dtype for dtype in self . all_tf_types if dtype . is_floating <nl> ] <nl> - self . numeric_tf_types = self . int_tf_types + self . float_tf_types <nl> + self . complex_tf_types = [ <nl> + dtype for dtype in self . all_tf_types if dtype . is_complex <nl> + ] <nl> + self . numeric_tf_types = ( <nl> + self . int_tf_types + self . float_tf_types + self . complex_tf_types ) <nl> <nl> self . all_types = [ dtype . as_numpy_dtype for dtype in self . all_tf_types ] <nl> self . int_types = [ dtype . as_numpy_dtype for dtype in self . int_tf_types ] <nl> self . float_types = [ dtype . as_numpy_dtype for dtype in self . float_tf_types ] <nl> - self . numeric_types = self . int_types + self . float_types <nl> + self . complex_types = [ <nl> + dtype . as_numpy_dtype for dtype in self . complex_tf_types <nl> + ] <nl> + self . numeric_types = self . int_types + self . float_types + self . complex_types <nl> <nl> # Parse the manifest file , if any , into a regex identifying tests to <nl> # disable <nl> mmm a / tensorflow / compiler / tf2xla / kernels / batch_matmul_op . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / batch_matmul_op . cc <nl> class BatchMatMulOp : public XlaOpKernel { <nl> xla : : ComputationBuilder * builder = ctx - > builder ( ) ; <nl> <nl> xla : : ComputationDataHandle x_handle = ctx - > Input ( 0 ) ; <nl> + if ( BaseType ( input_type ( 0 ) ) = = DT_COMPLEX64 & & adj_x_ ) { <nl> + x_handle = builder - > Conj ( x_handle ) ; <nl> + } <nl> xla : : ComputationDataHandle y_handle = ctx - > Input ( 1 ) ; <nl> + if ( BaseType ( input_type ( 1 ) ) = = DT_COMPLEX64 & & adj_y_ ) { <nl> + y_handle = builder - > Conj ( y_handle ) ; <nl> + } <nl> <nl> / / Reshape input tensors into 3D tensors by flattening the batch <nl> / / dimensions . This makes it easier to unroll the batch dimension . <nl> mmm a / tensorflow / compiler / tf2xla / kernels / binary_ops . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / binary_ops . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - / / Native XLA implementations of simple unary Ops <nl> + / / Native XLA implementations of simple binary Ops <nl> <nl> # include " tensorflow / compiler / tf2xla / kernels / cwise_ops . h " <nl> # include " tensorflow / compiler / tf2xla / xla_helpers . h " <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / client / client_library . h " <nl> # include " tensorflow / compiler / xla / client / computation_builder . h " <nl> # include " tensorflow / core / framework / kernel_def_builder . h " <nl> + # include " tensorflow / core / framework / op_kernel . h " <nl> <nl> namespace tensorflow { <nl> namespace { <nl> XLA_MAKE_BINARY ( Sub , b - > Sub ( lhs , rhs , extend_dimensions ) ) ; <nl> XLA_MAKE_BINARY ( Mul , b - > Mul ( lhs , rhs , extend_dimensions ) ) ; <nl> XLA_MAKE_BINARY ( Div , b - > Div ( lhs , rhs , extend_dimensions ) ) ; <nl> <nl> + XLA_MAKE_BINARY ( Atan2 , b - > Atan2 ( lhs , rhs , extend_dimensions ) ) ; <nl> + XLA_MAKE_BINARY ( Complex , b - > Complex ( lhs , rhs , extend_dimensions ) ) ; <nl> + <nl> / / Implementation of FloorDiv . Pseudo - code : <nl> / / if ( ( x < 0 ) ! = ( y < 0 ) ) { <nl> / / T abs_x = std : : abs ( x ) ; <nl> class ApproximateEqualOp : public XlaOpKernel { <nl> / / Computes the max of the scalar input x and 0 . <nl> void Compile ( XlaOpKernelContext * ctx ) override { <nl> xla : : ComputationBuilder * b = ctx - > builder ( ) ; <nl> - auto result = b - > Lt ( b - > Abs ( b - > Sub ( ctx - > Input ( 0 ) , ctx - > Input ( 1 ) ) ) , <nl> - XlaHelpers : : FloatLiteral ( b , input_type ( 0 ) , tolerance_ ) ) ; <nl> + auto abs = b - > Abs ( b - > Sub ( ctx - > Input ( 0 ) , ctx - > Input ( 1 ) ) ) ; <nl> + auto abs_shape = b - > GetShape ( abs ) ; <nl> + OP_REQUIRES_OK ( ctx , abs_shape . status ( ) ) ; <nl> + auto abs_type = abs_shape . ValueOrDie ( ) - > element_type ( ) ; <nl> + auto result = b - > Lt ( <nl> + abs , b - > ConvertElementType ( b - > ConstantR0 < float > ( tolerance_ ) , abs_type ) ) ; <nl> ctx - > SetOutput ( 0 , result ) ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / tf2xla / kernels / cast_op . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / cast_op . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / tf2xla / xla_helpers . h " <nl> # include " tensorflow / compiler / tf2xla / xla_op_kernel . h " <nl> # include " tensorflow / compiler / tf2xla / xla_op_registry . h " <nl> + # include " tensorflow / compiler / xla / primitive_util . h " <nl> # include " tensorflow / core / framework / kernel_def_builder . h " <nl> <nl> namespace tensorflow { <nl> class CastOp : public XlaOpKernel { <nl> output = input ; <nl> } else if ( dst_dtype_ = = DT_BOOL ) { <nl> output = builder - > Ne ( input , XlaHelpers : : Zero ( builder , src_dtype_ ) ) ; <nl> + } else if ( xla : : primitive_util : : IsComplexType ( src_type_ ) & & <nl> + ! xla : : primitive_util : : IsComplexType ( dst_type_ ) ) { <nl> + / / As in cast_op . h , we replicate the numpy behavior of truncating the <nl> + / / imaginary part . <nl> + output = builder - > ConvertElementType ( builder - > Real ( input ) , dst_type_ ) ; <nl> } else { <nl> output = builder - > ConvertElementType ( input , dst_type_ ) ; <nl> } <nl> mmm a / tensorflow / compiler / tf2xla / kernels / gather_op . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / gather_op . cc <nl> void GatherOpDynamicSlice : : Compile ( XlaOpKernelContext * context ) { <nl> errors : : InvalidArgument ( " indices must be int32 or int64 " ) ) ; <nl> <nl> xla : : ComputationDataHandle gather = XlaComputeGatherDynamicSlice ( <nl> - context , input , input_shape , indices , indices_shape , axis , DT_FLOAT , <nl> + context , input , input_shape , indices , indices_shape , axis , input_type ( 0 ) , <nl> index_type , builder ) ; <nl> context - > SetOutput ( 0 , gather ) ; <nl> } <nl> mmm a / tensorflow / compiler / tf2xla / kernels / matmul_op . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / matmul_op . cc <nl> limitations under the License . <nl> namespace tensorflow { <nl> namespace { <nl> <nl> + constexpr std : : array < DataType , 4 > kMatmulTypes = { <nl> + { DT_HALF , DT_FLOAT , DT_DOUBLE , DT_COMPLEX64 } } ; <nl> + <nl> class MatMulOp : public XlaOpKernel { <nl> public : <nl> explicit MatMulOp ( OpKernelConstruction * ctx , bool is_sparse = false ) <nl> class MatMulOp : public XlaOpKernel { <nl> bool transpose_b_ ; <nl> } ; <nl> <nl> - REGISTER_XLA_OP ( Name ( " MatMul " ) . TypeConstraint ( " T " , kFloatTypes ) , MatMulOp ) ; <nl> + REGISTER_XLA_OP ( Name ( " MatMul " ) . TypeConstraint ( " T " , kMatmulTypes ) , MatMulOp ) ; <nl> <nl> class SparseMatMulOp : public MatMulOp { <nl> public : <nl> mmm a / tensorflow / compiler / tf2xla / kernels / training_ops . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / training_ops . cc <nl> class ResourceApplyGradientDescent : public XlaOpKernel { <nl> OP_REQUIRES_OK ( ctx , ctx - > AssignVariable ( 0 , ctx - > input_type ( 1 ) , handle ) ) ; <nl> } <nl> } ; <nl> - REGISTER_XLA_OP ( Name ( " ResourceApplyGradientDescent " ) , <nl> - ResourceApplyGradientDescent ) ; <nl> + REGISTER_XLA_OP ( <nl> + Name ( " ResourceApplyGradientDescent " ) . TypeConstraint ( " T " , kFloatTypes ) , <nl> + ResourceApplyGradientDescent ) ; <nl> <nl> class ResourceApplyMomentum : public XlaOpKernel { <nl> public : <nl> class ResourceApplyMomentum : public XlaOpKernel { <nl> private : <nl> bool use_nesterov_ ; <nl> } ; <nl> - REGISTER_XLA_OP ( Name ( " ResourceApplyMomentum " ) , ResourceApplyMomentum ) ; <nl> + REGISTER_XLA_OP ( Name ( " ResourceApplyMomentum " ) . TypeConstraint ( " T " , kFloatTypes ) , <nl> + ResourceApplyMomentum ) ; <nl> <nl> class ResourceApplyAdagrad : public XlaOpKernel { <nl> public : <nl> class ResourceApplyAdagrad : public XlaOpKernel { <nl> OP_REQUIRES_OK ( ctx , ctx - > AssignVariable ( 1 , type , accum ) ) ; <nl> } <nl> } ; <nl> - REGISTER_XLA_OP ( Name ( " ResourceApplyAdagrad " ) , ResourceApplyAdagrad ) ; <nl> + REGISTER_XLA_OP ( Name ( " ResourceApplyAdagrad " ) . TypeConstraint ( " T " , kFloatTypes ) , <nl> + ResourceApplyAdagrad ) ; <nl> <nl> class ResourceApplyAdam : public XlaOpKernel { <nl> public : <nl> class ResourceApplyAdam : public XlaOpKernel { <nl> private : <nl> DataType dtype_ ; <nl> } ; <nl> - REGISTER_XLA_OP ( Name ( " ResourceApplyAdam " ) , ResourceApplyAdam ) ; <nl> + REGISTER_XLA_OP ( Name ( " ResourceApplyAdam " ) . TypeConstraint ( " T " , kFloatTypes ) , <nl> + ResourceApplyAdam ) ; <nl> <nl> class ResourceApplyRMSProp : public XlaOpKernel { <nl> public : <nl> class ResourceApplyRMSProp : public XlaOpKernel { <nl> OP_REQUIRES_OK ( ctx , ctx - > AssignVariable ( 2 , type , new_mom ) ) ; <nl> } <nl> } ; <nl> - REGISTER_XLA_OP ( Name ( " ResourceApplyRMSProp " ) , ResourceApplyRMSProp ) ; <nl> + REGISTER_XLA_OP ( Name ( " ResourceApplyRMSProp " ) . TypeConstraint ( " T " , kFloatTypes ) , <nl> + ResourceApplyRMSProp ) ; <nl> <nl> void CompileFtrl ( XlaOpKernelContext * ctx , DataType dtype , <nl> bool has_l2_shrinkage ) { <nl> class ResourceApplyFtrl : public XlaOpKernel { <nl> private : <nl> DataType dtype_ ; <nl> } ; <nl> - REGISTER_XLA_OP ( Name ( " ResourceApplyFtrl " ) , ResourceApplyFtrl ) ; <nl> + REGISTER_XLA_OP ( Name ( " ResourceApplyFtrl " ) . TypeConstraint ( " T " , kFloatTypes ) , <nl> + ResourceApplyFtrl ) ; <nl> <nl> class ResourceApplyFtrlV2 : public XlaOpKernel { <nl> public : <nl> class ResourceApplyFtrlV2 : public XlaOpKernel { <nl> private : <nl> DataType dtype_ ; <nl> } ; <nl> - REGISTER_XLA_OP ( Name ( " ResourceApplyFtrlV2 " ) , ResourceApplyFtrlV2 ) ; <nl> + REGISTER_XLA_OP ( Name ( " ResourceApplyFtrlV2 " ) . TypeConstraint ( " T " , kFloatTypes ) , <nl> + ResourceApplyFtrlV2 ) ; <nl> <nl> } / / namespace <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / compiler / tf2xla / kernels / unary_ops . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / unary_ops . cc <nl> namespace { <nl> } ; \ <nl> REGISTER_XLA_OP ( Name ( # NAME ) , NAME # # Op ) ; <nl> <nl> + XLAJIT_MAKE_UNARY ( ComplexAbs , b - > Abs ( x ) ) ; <nl> + <nl> + XLAJIT_MAKE_UNARY ( Angle , b - > Atan2 ( b - > Imag ( x ) , b - > Real ( x ) ) ) ; <nl> + <nl> + XLAJIT_MAKE_UNARY ( Conj , b - > Conj ( x ) ) ; <nl> + <nl> / / Return x if x > 0 , otherwise - x . <nl> XLAJIT_MAKE_UNARY ( Abs , b - > Abs ( x ) ) ; <nl> <nl> XLAJIT_MAKE_UNARY ( Square , b - > Mul ( x , x ) ) ; <nl> XLAJIT_MAKE_UNARY ( Tan , b - > Div ( b - > Sin ( x ) , b - > Cos ( x ) ) ) ; <nl> XLAJIT_MAKE_UNARY ( Tanh , b - > Tanh ( x ) ) ; <nl> <nl> + XLAJIT_MAKE_UNARY ( Real , b - > Real ( x ) ) ; <nl> + XLAJIT_MAKE_UNARY ( Imag , b - > Imag ( x ) ) ; <nl> + <nl> # undef XLAJIT_MAKE_UNARY <nl> <nl> } / / namespace <nl> mmm a / tensorflow / compiler / tf2xla / xla_helpers . cc <nl> ppp b / tensorflow / compiler / tf2xla / xla_helpers . cc <nl> xla : : ComputationDataHandle XlaHelpers : : IntegerLiteral ( <nl> case xla : : F64 : <nl> literal = * xla : : Literal : : CreateR0 < double > ( value ) ; <nl> break ; <nl> + case xla : : C64 : <nl> + literal = * xla : : Literal : : CreateR0 < complex64 > ( value ) ; <nl> + break ; <nl> case xla : : PRED : <nl> LOG ( FATAL ) < < " pred element type is not integral " ; <nl> case xla : : S16 : <nl> xla : : ComputationDataHandle XlaHelpers : : FloatLiteral ( xla : : ComputationBuilder * b , <nl> case xla : : F64 : <nl> return b - > ConstantR0 < double > ( value ) ; <nl> break ; <nl> + case xla : : C64 : <nl> + return b - > ConstantR0 < complex64 > ( value ) ; <nl> + break ; <nl> default : <nl> LOG ( FATAL ) < < " unhandled element type " < < type ; <nl> } <nl> mmm a / tensorflow / compiler / tf2xla / xla_op_registry . h <nl> ppp b / tensorflow / compiler / tf2xla / xla_op_registry . h <nl> extern const char * const DEVICE_XLA_GPU ; <nl> <nl> constexpr std : : array < DataType , 3 > kFloatTypes = { <nl> { DT_HALF , DT_FLOAT , DT_DOUBLE } } ; <nl> - constexpr std : : array < DataType , 7 > kNumericTypes = { <nl> - { DT_UINT32 , DT_UINT64 , DT_INT32 , DT_INT64 , DT_HALF , DT_FLOAT , DT_DOUBLE } } ; <nl> + constexpr std : : array < DataType , 8 > kNumericTypes = { <nl> + { DT_UINT32 , DT_UINT64 , DT_INT32 , DT_INT64 , DT_HALF , DT_FLOAT , DT_DOUBLE , <nl> + DT_COMPLEX64 } } ; <nl> <nl> - constexpr std : : array < DataType , 7 > kCpuAllTypes = { <nl> - { DT_UINT32 , DT_UINT64 , DT_INT32 , DT_INT64 , DT_FLOAT , DT_DOUBLE , DT_BOOL } } ; <nl> + constexpr std : : array < DataType , 8 > kCpuAllTypes = { <nl> + { DT_UINT32 , DT_UINT64 , DT_INT32 , DT_INT64 , DT_FLOAT , DT_DOUBLE , <nl> + DT_COMPLEX64 , DT_BOOL } } ; <nl> <nl> - constexpr std : : array < DataType , 7 > kGpuAllTypes = { <nl> - { DT_UINT32 , DT_UINT64 , DT_INT32 , DT_INT64 , DT_FLOAT , DT_DOUBLE , DT_BOOL } } ; <nl> + constexpr std : : array < DataType , 8 > kGpuAllTypes = { <nl> + { DT_UINT32 , DT_UINT64 , DT_INT32 , DT_INT64 , DT_FLOAT , DT_DOUBLE , <nl> + DT_COMPLEX64 , DT_BOOL } } ; <nl> <nl> / / Class that manages registrations of operators and devices for the XLA JIT . <nl> / / Not thread - safe . <nl> mmm a / tensorflow / compiler / xla / array . h <nl> ppp b / tensorflow / compiler / xla / array . h <nl> class Array { <nl> } <nl> <nl> / / Advances the specified set of indexes and returns true if we haven ' t <nl> - / / wrapped around ( i . e . result isnt { 0 , 0 , . . . } ) . <nl> + / / wrapped around ( i . e . result isn ' t { 0 , 0 , . . . } ) . <nl> bool next_index ( std : : vector < int64 > * index ) const { <nl> CHECK_EQ ( index - > size ( ) , sizes_ . size ( ) ) ; <nl> for ( int64 i = sizes_ . size ( ) - 1 ; i > = 0 ; - - i ) { <nl> mmm a / tensorflow / compiler / xla / client / computation_builder . cc <nl> ppp b / tensorflow / compiler / xla / client / computation_builder . cc <nl> ComputationDataHandle ComputationBuilder : : CustomCall ( <nl> return ParseOpResponse ( s , & response ) ; <nl> } <nl> <nl> + ComputationDataHandle ComputationBuilder : : Complex ( <nl> + const ComputationDataHandle & real , const ComputationDataHandle & imag , <nl> + tensorflow : : gtl : : ArraySlice < int64 > broadcast_dimensions ) { <nl> + return BinaryOp ( BINOP_COMPLEX , real , imag , broadcast_dimensions ) ; <nl> + } <nl> + <nl> + ComputationDataHandle ComputationBuilder : : Conj ( <nl> + const ComputationDataHandle & operand ) { <nl> + return Complex ( Real ( operand ) , Neg ( Imag ( operand ) ) ) ; <nl> + } <nl> + <nl> ComputationDataHandle ComputationBuilder : : Add ( <nl> const ComputationDataHandle & lhs , const ComputationDataHandle & rhs , <nl> tensorflow : : gtl : : ArraySlice < int64 > broadcast_dimensions ) { <nl> ComputationDataHandle ComputationBuilder : : Abs ( <nl> return UnaryOp ( UNOP_ABS , operand ) ; <nl> } <nl> <nl> + ComputationDataHandle ComputationBuilder : : Atan2 ( <nl> + const ComputationDataHandle & y , const ComputationDataHandle & x , <nl> + tensorflow : : gtl : : ArraySlice < int64 > broadcast_dimensions ) { <nl> + return BinaryOp ( BINOP_ATAN2 , y , x , broadcast_dimensions ) ; <nl> + } <nl> + <nl> ComputationDataHandle ComputationBuilder : : Exp ( <nl> const ComputationDataHandle & operand ) { <nl> return UnaryOp ( UNOP_EXP , operand ) ; <nl> ComputationDataHandle ComputationBuilder : : Tanh ( <nl> return UnaryOp ( UNOP_TANH , operand ) ; <nl> } <nl> <nl> + ComputationDataHandle ComputationBuilder : : Real ( <nl> + const ComputationDataHandle & operand ) { <nl> + return UnaryOp ( UNOP_REAL , operand ) ; <nl> + } <nl> + <nl> + ComputationDataHandle ComputationBuilder : : Imag ( <nl> + const ComputationDataHandle & operand ) { <nl> + return UnaryOp ( UNOP_IMAG , operand ) ; <nl> + } <nl> + <nl> ComputationDataHandle ComputationBuilder : : IsFinite ( <nl> const ComputationDataHandle & operand ) { <nl> return UnaryOp ( UNOP_IS_FINITE , operand ) ; <nl> mmm a / tensorflow / compiler / xla / client / computation_builder . h <nl> ppp b / tensorflow / compiler / xla / client / computation_builder . h <nl> class ComputationBuilder { <nl> / / of the operands is a scalar , or an explicit broadcast dimension is given <nl> / / ( see g3doc for more details ) . <nl> <nl> + / / Enqueues a complex compose instruction onto the computation . <nl> + ComputationDataHandle Complex ( <nl> + const ComputationDataHandle & real , const ComputationDataHandle & imag , <nl> + tensorflow : : gtl : : ArraySlice < int64 > broadcast_dimensions = { } ) ; <nl> + <nl> + / / Enqueues a complex conjugate instruction onto the computation . <nl> + ComputationDataHandle Conj ( const ComputationDataHandle & operand ) ; <nl> + <nl> / / Enqueues an add instruction onto the computation . <nl> ComputationDataHandle Add ( <nl> const ComputationDataHandle & lhs , const ComputationDataHandle & rhs , <nl> class ComputationBuilder { <nl> / / Enqueues an abs instruction onto the computation . <nl> ComputationDataHandle Abs ( const ComputationDataHandle & operand ) ; <nl> <nl> + / / Enqueues a atan2 instruction onto the computation . <nl> + ComputationDataHandle Atan2 ( <nl> + const ComputationDataHandle & y , const ComputationDataHandle & x , <nl> + tensorflow : : gtl : : ArraySlice < int64 > broadcast_dimensions = { } ) ; <nl> + <nl> / / Enqueues an exp instruction onto the computation . <nl> ComputationDataHandle Exp ( const ComputationDataHandle & operand ) ; <nl> <nl> class ComputationBuilder { <nl> / / Enqueues a tanh instruction onto the computation . <nl> ComputationDataHandle Tanh ( const ComputationDataHandle & operand ) ; <nl> <nl> + / / Enqueues a real - part instruction onto the computation . <nl> + ComputationDataHandle Real ( const ComputationDataHandle & operand ) ; <nl> + <nl> + / / Enqueues an imaginary - part instruction onto the computation . <nl> + ComputationDataHandle Imag ( const ComputationDataHandle & operand ) ; <nl> + <nl> / / Enqueues a float32 sqrt instruction onto the computation . <nl> / / ( float32 is specified as there is an implicit float32 0 . 5f constant <nl> / / exponent ) . <nl> mmm a / tensorflow / compiler / xla / literal_util . cc <nl> ppp b / tensorflow / compiler / xla / literal_util . cc <nl> Status Literal : : Copy ( const Literal & src_literal , <nl> return * Literal : : CreateR0 < float > ( 0 ) ; <nl> case F64 : <nl> return * Literal : : CreateR0 < double > ( 0 ) ; <nl> + case C64 : <nl> + return * Literal : : CreateR0 < complex64 > ( 0 ) ; <nl> case PRED : <nl> return * Literal : : CreateR0 < bool > ( false ) ; <nl> case S16 : <nl> Status Literal : : Copy ( const Literal & src_literal , <nl> return * Literal : : CreateR0 < float > ( 1 ) ; <nl> case F64 : <nl> return * Literal : : CreateR0 < double > ( 1 ) ; <nl> + case C64 : <nl> + return * Literal : : CreateR0 < complex64 > ( 1 ) ; <nl> case PRED : <nl> return * Literal : : CreateR0 < bool > ( true ) ; <nl> case S16 : <nl> Status Literal : : Copy ( const Literal & src_literal , <nl> case F64 : <nl> return * Literal : : CreateR0 < double > ( <nl> - std : : numeric_limits < double > : : infinity ( ) ) ; <nl> + case C64 : <nl> + LOG ( FATAL ) < < " C64 element type has no minimum value " ; <nl> case PRED : <nl> return * Literal : : CreateR0 < bool > ( false ) ; <nl> case S16 : <nl> mmm a / tensorflow / compiler / xla / service / algebraic_simplifier . cc <nl> ppp b / tensorflow / compiler / xla / service / algebraic_simplifier . cc <nl> class AlgebraicSimplifierVisitor : public DfsHloVisitorWithDefault { <nl> <nl> Status HandleConvert ( HloInstruction * convert ) override ; <nl> <nl> + Status HandleReal ( HloInstruction * real , HloInstruction * operand ) override ; <nl> + Status HandleImag ( HloInstruction * imag , HloInstruction * operand ) override ; <nl> + <nl> Status HandleConvolution ( HloInstruction * convolution , HloInstruction * lhs , <nl> HloInstruction * rhs , const Window & window ) override ; <nl> <nl> Status AlgebraicSimplifierVisitor : : HandleConvert ( HloInstruction * convert ) { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + / / Real ( Complex ( r , i ) ) - > r <nl> + Status AlgebraicSimplifierVisitor : : HandleReal ( HloInstruction * real , <nl> + HloInstruction * operand ) { <nl> + if ( operand - > opcode ( ) = = HloOpcode : : kComplex ) { <nl> + return ReplaceInstruction ( real , operand - > mutable_operand ( 0 ) ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + / / Imag ( Complex ( r , i ) ) - > i <nl> + Status AlgebraicSimplifierVisitor : : HandleImag ( HloInstruction * imag , <nl> + HloInstruction * operand ) { <nl> + if ( operand - > opcode ( ) = = HloOpcode : : kComplex ) { <nl> + return ReplaceInstruction ( imag , operand - > mutable_operand ( 1 ) ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> Status AlgebraicSimplifierVisitor : : HandlePad ( HloInstruction * pad ) { <nl> / / Eliminate nop pads ( padding all zero ) , and replace a pad with negative <nl> / / padding with a pad with non - negative padding followed by a slice . <nl> Status AlgebraicSimplifierVisitor : : HandleWhile ( HloInstruction * while_op ) { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - / / Remove while loops with static trip count of 1 . <nl> + / / Remove while loops with static trip count of 0 . <nl> optional < int64 > trip_count = GetLoopTripCount ( while_op ) ; <nl> if ( trip_count & & * trip_count = = 0 ) { <nl> / / The loop never executes , so the value of the loop is the value of its <nl> Status AlgebraicSimplifierVisitor : : HandleWhile ( HloInstruction * while_op ) { <nl> changed_ = true ; <nl> return Status : : OK ( ) ; <nl> } <nl> + <nl> + / / Transform while loops with static trip count of 1 into a call op , then <nl> + / / inline the call . <nl> if ( trip_count & & * trip_count = = 1 ) { <nl> - / / Transform the while loop into a call op , then inline the call . <nl> auto computation = while_op - > parent ( ) ; <nl> auto call_op = computation - > AddInstruction ( HloInstruction : : CreateCall ( <nl> while_op - > shape ( ) , while_op - > operands ( ) , while_op - > while_body ( ) ) ) ; <nl> mmm a / tensorflow / compiler / xla / service / algebraic_simplifier_test . cc <nl> ppp b / tensorflow / compiler / xla / service / algebraic_simplifier_test . cc <nl> TEST_F ( AlgebraicSimplifierTest , DivOneArray ) { <nl> EXPECT_EQ ( root , param0 ) ; <nl> } <nl> <nl> + / / Test that real ( complex ( r , i ) ) is simplified to r . <nl> + TEST_F ( AlgebraicSimplifierTest , RealOfComplex ) { <nl> + Shape r2f32 = ShapeUtil : : MakeShape ( F32 , { 2 , 2 } ) ; <nl> + HloComputation : : Builder builder ( TestName ( ) ) ; <nl> + HloInstruction * param0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , r2f32 , " param0 " ) ) ; <nl> + HloInstruction * param1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , r2f32 , " param1 " ) ) ; <nl> + HloInstruction * cplx = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( ShapeUtil : : ChangeElementType ( r2f32 , C64 ) , <nl> + HloOpcode : : kComplex , param0 , param1 ) ) ; <nl> + HloInstruction * real = builder . AddInstruction ( <nl> + HloInstruction : : CreateUnary ( r2f32 , HloOpcode : : kReal , cplx ) ) ; <nl> + <nl> + auto module = CreateNewModule ( ) ; <nl> + auto computation = module - > AddEntryComputation ( builder . Build ( ) ) ; <nl> + HloInstruction * root = computation - > root_instruction ( ) ; <nl> + EXPECT_EQ ( root , real ) ; <nl> + AlgebraicSimplifier simplifier ( / * is_layout_sensitive = * / false , <nl> + non_bitcasting_callback ( ) ) ; <nl> + ASSERT_TRUE ( simplifier . Run ( module . get ( ) ) . ValueOrDie ( ) ) ; <nl> + root = computation - > root_instruction ( ) ; <nl> + EXPECT_EQ ( root , param0 ) ; <nl> + } <nl> + <nl> + / / Test that imag ( complex ( r , i ) ) is simplified to i . <nl> + TEST_F ( AlgebraicSimplifierTest , ImagOfComplex ) { <nl> + Shape r2f32 = ShapeUtil : : MakeShape ( F32 , { 2 , 2 } ) ; <nl> + HloComputation : : Builder builder ( TestName ( ) ) ; <nl> + HloInstruction * param0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , r2f32 , " param0 " ) ) ; <nl> + HloInstruction * param1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , r2f32 , " param1 " ) ) ; <nl> + HloInstruction * cplx = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( ShapeUtil : : ChangeElementType ( r2f32 , C64 ) , <nl> + HloOpcode : : kComplex , param0 , param1 ) ) ; <nl> + HloInstruction * imag = builder . AddInstruction ( <nl> + HloInstruction : : CreateUnary ( r2f32 , HloOpcode : : kImag , cplx ) ) ; <nl> + <nl> + auto module = CreateNewModule ( ) ; <nl> + auto computation = module - > AddEntryComputation ( builder . Build ( ) ) ; <nl> + HloInstruction * root = computation - > root_instruction ( ) ; <nl> + EXPECT_EQ ( root , imag ) ; <nl> + AlgebraicSimplifier simplifier ( / * is_layout_sensitive = * / false , <nl> + non_bitcasting_callback ( ) ) ; <nl> + ASSERT_TRUE ( simplifier . Run ( module . get ( ) ) . ValueOrDie ( ) ) ; <nl> + root = computation - > root_instruction ( ) ; <nl> + EXPECT_EQ ( root , param1 ) ; <nl> + } <nl> + <nl> / / Test that get_element ( make_tuple ( { A , B } ) , 1 ) is simplified to B <nl> TEST_F ( AlgebraicSimplifierTest , SelectMakeTuple ) { <nl> Shape r0f32 = ShapeUtil : : MakeShape ( F32 , { } ) ; <nl> mmm a / tensorflow / compiler / xla / service / buffer_assignment_test . cc <nl> ppp b / tensorflow / compiler / xla / service / buffer_assignment_test . cc <nl> TEST_F ( BufferAssignmentTest , TupleCallAsOutput ) { <nl> auto assignment = RunBufferAssignment ( module . get ( ) ) ; <nl> <nl> EXPECT_EQ ( 3 , assignment - > Allocations ( ) . size ( ) ) ; <nl> - / / Buffers for call are co - located with the sub - computation . <nl> + / / Buffers for call are colocated with the sub - computation . <nl> EXPECT_EQ ( GetAllocation ( * assignment , call , / * index = * / { } ) , <nl> GetAllocation ( * assignment , sub_tuple , / * index = * / { } ) ) ; <nl> EXPECT_EQ ( GetAllocation ( * assignment , call , / * index = * / { 0 } ) , <nl> TEST_F ( BufferAssignmentTest , TupleChainedCallAsOutput ) { <nl> <nl> auto assignment = RunBufferAssignment ( module . get ( ) ) ; <nl> <nl> - / / Buffers for call are co - located with the sub - computations . <nl> + / / Buffers for call are colocated with the sub - computations . <nl> EXPECT_EQ ( GetAllocation ( * assignment , a_call , / * index = * / { } ) , <nl> GetAllocation ( * assignment , b_call , / * index = * / { } ) ) ; <nl> EXPECT_EQ ( GetAllocation ( * assignment , b_call , / * index = * / { } ) , <nl> mmm a / tensorflow / compiler / xla / service / cpu / cpu_compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / cpu_compiler . cc <nl> Status CpuCompiler : : RunHloPasses ( HloModule * module , bool is_aot_compile ) { <nl> [ ] ( const Shape & , const Shape & ) { return false ; } , <nl> / * enable_dot_simplification = * / false ) ; <nl> pass . AddPass < TupleSimplifier > ( ) ; <nl> + pass . AddPass < HloDCE > ( ) ; <nl> pass . AddPass < ReshapeMover > ( ) ; <nl> pass . AddPass < HloConstantFolding > ( ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / cpu / dot_op_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / dot_op_emitter . cc <nl> DotOpEmitter : : DotOpEmitter ( const HloInstruction & dot , bool transpose_lhs , <nl> llvm : : Value * executable_run_options_value , llvm : : IRBuilder < > * ir_builder , <nl> const HloModuleConfig & hlo_module_config ) { <nl> PrimitiveType type = target_array . GetShape ( ) . element_type ( ) ; <nl> - TF_RET_CHECK ( F32 = = type | | F64 = = type ) ; <nl> + TF_RET_CHECK ( F32 = = type | | F64 = = type | | C64 = = type ) ; <nl> DotOpEmitter dot_emitter ( dot , transpose_lhs , transpose_rhs , target_array , <nl> lhs_array , rhs_array , executable_run_options_value , <nl> ir_builder , hlo_module_config ) ; <nl> tensorflow : : Status DotOpEmitter : : Emit ( ) { <nl> llvm : : BasicBlock * preheader_bb = reduction_loop - > GetPreheaderBasicBlock ( ) ; <nl> ir_builder_ - > SetInsertPoint ( preheader_bb - > getTerminator ( ) ) ; <nl> <nl> - ir_builder_ - > CreateStore ( llvm : : ConstantFP : : get ( accum_type , 0 . 0 ) , <nl> + ir_builder_ - > CreateStore ( llvm : : Constant : : getNullValue ( accum_type ) , <nl> accum_address ) ; <nl> <nl> / / Body basic block of reduction loop : <nl> tensorflow : : Status DotOpEmitter : : Emit ( ) { <nl> llvm : : Value * rhs_element = <nl> rhs_array_ . EmitReadArrayElement ( rhs_index , ir_builder_ ) ; <nl> <nl> - llvm : : Value * product = ir_builder_ - > CreateFMul ( lhs_element , rhs_element ) ; <nl> llvm : : Value * accum = ir_builder_ - > CreateLoad ( accum_address ) ; <nl> - llvm : : Value * updated_accum = ir_builder_ - > CreateFAdd ( accum , product ) ; <nl> + llvm : : Value * updated_accum ; <nl> + if ( ShapeUtil : : ElementIsComplex ( lhs_shape ) ) { <nl> + auto real = [ & ] ( llvm : : Value * x ) { <nl> + return ir_builder_ - > CreateExtractValue ( x , { 0 } ) ; <nl> + } ; <nl> + auto imag = [ & ] ( llvm : : Value * x ) { <nl> + return ir_builder_ - > CreateExtractValue ( x , { 1 } ) ; <nl> + } ; <nl> + llvm : : Value * product_real = ir_builder_ - > CreateFSub ( <nl> + ir_builder_ - > CreateFMul ( real ( lhs_element ) , real ( rhs_element ) ) , <nl> + ir_builder_ - > CreateFMul ( imag ( lhs_element ) , imag ( rhs_element ) ) ) ; <nl> + llvm : : Value * product_imag = ir_builder_ - > CreateFAdd ( <nl> + ir_builder_ - > CreateFMul ( real ( lhs_element ) , imag ( rhs_element ) ) , <nl> + ir_builder_ - > CreateFMul ( imag ( lhs_element ) , real ( rhs_element ) ) ) ; <nl> + updated_accum = ir_builder_ - > CreateInsertValue ( <nl> + accum , ir_builder_ - > CreateFAdd ( real ( accum ) , product_real ) , { 0 } ) ; <nl> + updated_accum = ir_builder_ - > CreateInsertValue ( <nl> + updated_accum , ir_builder_ - > CreateFAdd ( imag ( accum ) , product_imag ) , { 1 } ) ; <nl> + } else { <nl> + llvm : : Value * product = ir_builder_ - > CreateFMul ( lhs_element , rhs_element ) ; <nl> + updated_accum = ir_builder_ - > CreateFAdd ( accum , product ) ; <nl> + } <nl> ir_builder_ - > CreateStore ( updated_accum , accum_address ) ; <nl> <nl> / / Exit basic block of reduction loop . <nl> tensorflow : : Status DotOpEmitter : : Emit ( ) { <nl> <nl> tensorflow : : Status DotOpEmitter : : EmitScalarDot ( ) { <nl> / / A scalar dot is just a scalar multiply . <nl> + llvm : : Value * result ; <nl> llvm : : Value * lhs_value = <nl> lhs_array_ . EmitReadArrayElement ( / * index = * / { } , ir_builder_ ) ; <nl> llvm : : Value * rhs_value = <nl> rhs_array_ . EmitReadArrayElement ( / * index = * / { } , ir_builder_ ) ; <nl> - llvm : : Value * result = ir_builder_ - > CreateFMul ( lhs_value , rhs_value ) ; <nl> + if ( ShapeUtil : : ElementIsComplex ( lhs_array_ . GetShape ( ) ) ) { <nl> + # define REAL ( x ) ir_builder_ - > CreateExtractValue ( x , { 0 } ) <nl> + # define IMAG ( x ) ir_builder_ - > CreateExtractValue ( x , { 1 } ) <nl> + llvm : : Value * real = ir_builder_ - > CreateFSub ( <nl> + ir_builder_ - > CreateFMul ( REAL ( lhs_value ) , REAL ( rhs_value ) ) , <nl> + ir_builder_ - > CreateFMul ( IMAG ( lhs_value ) , IMAG ( rhs_value ) ) ) ; <nl> + llvm : : Value * imag = ir_builder_ - > CreateFAdd ( <nl> + ir_builder_ - > CreateFMul ( REAL ( lhs_value ) , IMAG ( rhs_value ) ) , <nl> + ir_builder_ - > CreateFMul ( IMAG ( lhs_value ) , REAL ( rhs_value ) ) ) ; <nl> + # undef IMAG <nl> + # undef REAL <nl> + result = llvm : : ConstantAggregateZero : : get ( lhs_array_ . GetElementLlvmType ( ) ) ; <nl> + result = ir_builder_ - > CreateInsertValue ( result , real , { 0 } ) ; <nl> + result = ir_builder_ - > CreateInsertValue ( result , imag , { 1 } ) ; <nl> + } else { <nl> + result = ir_builder_ - > CreateFMul ( lhs_value , rhs_value ) ; <nl> + } <nl> target_array_ . EmitWriteArrayElement ( / * index = * / { } , result , ir_builder_ ) ; <nl> return tensorflow : : Status : : OK ( ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / cpu / elemental_ir_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / elemental_ir_emitter . cc <nl> StatusOr < llvm : : Value * > CpuElementalIrEmitter : : EmitFloatUnaryOp ( <nl> } <nl> / / Create function type for the function . <nl> llvm : : FunctionType * function_type = llvm : : FunctionType : : get ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( element_type , ir_builder_ ) , <nl> - llvm_ir : : PrimitiveTypeToIrType ( element_type , ir_builder_ ) , <nl> + llvm_ir : : PrimitiveTypeToIrType ( element_type , module_ ) , <nl> + llvm_ir : : PrimitiveTypeToIrType ( element_type , module_ ) , <nl> / * isVarArg = * / false ) ; <nl> / / Create function declaration for ' tanhf ' . <nl> llvm : : Function * function = <nl> mmm a / tensorflow / compiler / xla / service / cpu / ir_emission_utils . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / ir_emission_utils . cc <nl> bool PotentiallyImplementedAsEigenConvolution ( <nl> ShapeUtil : : HasZeroElements ( kernel_shape ) ) { <nl> return false ; <nl> } <nl> + / / TODO ( b / 65408531 ) : Explore using Eigen dot for complex64 type . <nl> + if ( ShapeUtil : : ElementIsComplex ( input_shape ) | | <nl> + ShapeUtil : : ElementIsComplex ( kernel_shape ) ) { <nl> + return false ; <nl> + } <nl> + <nl> const ConvolutionDimensionNumbers & dnums = <nl> convolution . convolution_dimension_numbers ( ) ; <nl> / / Only 1D and 2D convolutions are supported at the moment . <nl> mmm a / tensorflow / compiler / xla / service / cpu / ir_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / ir_emitter . cc <nl> Status IrEmitter : : HandleConstant ( HloInstruction * constant , <nl> MinimumAlignmentForShape ( literal . shape ( ) ) ) ; <nl> } else { <nl> llvm : : Constant * initializer = <nl> - llvm_ir : : ConvertLiteralToIrConstant ( literal , & ir_builder_ ) ; <nl> + llvm_ir : : ConvertLiteralToIrConstant ( literal , module_ ) ; <nl> global_for_const = new llvm : : GlobalVariable ( <nl> / * Module = * / * module_ , <nl> / * Type = * / initializer - > getType ( ) , <nl> Status IrEmitter : : HandleGetTupleElement ( HloInstruction * get_tuple_element , <nl> const Shape & shape = get_tuple_element - > shape ( ) ; <nl> emitted_value_ [ get_tuple_element ] = llvm_ir : : EmitGetTupleElement ( <nl> shape , get_tuple_element - > tuple_index ( ) , MinimumAlignmentForShape ( shape ) , <nl> - GetEmittedValueFor ( operand ) , & ir_builder_ ) ; <nl> + GetEmittedValueFor ( operand ) , & ir_builder_ , module_ ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status IrEmitter : : HandleSelect ( HloInstruction * select , HloInstruction * pred , <nl> <nl> if ( ShapeUtil : : IsTuple ( select - > shape ( ) ) ) { <nl> TF_RETURN_IF_ERROR ( EmitTargetAddressForOp ( select ) ) ; <nl> - llvm_ir : : EmitTupleSelect ( GetIrArrayFor ( select ) , GetIrArrayFor ( pred ) , <nl> - GetEmittedValueFor ( on_true ) , <nl> - GetEmittedValueFor ( on_false ) , & ir_builder_ ) ; <nl> + llvm_ir : : EmitTupleSelect ( <nl> + GetIrArrayFor ( select ) , GetIrArrayFor ( pred ) , GetEmittedValueFor ( on_true ) , <nl> + GetEmittedValueFor ( on_false ) , & ir_builder_ , module_ ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status IrEmitter : : HandleInfeed ( HloInstruction * infeed ) { <nl> tuple_element_addresses . push_back ( tuple_element_address ) ; <nl> } <nl> <nl> - llvm_ir : : EmitTuple ( infeed_array , tuple_element_addresses , & ir_builder_ ) ; <nl> + llvm_ir : : EmitTuple ( infeed_array , tuple_element_addresses , & ir_builder_ , <nl> + module_ ) ; <nl> } else { <nl> TF_RETURN_IF_ERROR ( EmitXfeedTransfer ( XfeedKind : : kInfeed , shape , <nl> GetEmittedValueFor ( infeed ) ) ) ; <nl> Status IrEmitter : : HandleOutfeed ( HloInstruction * outfeed ) { <nl> ShapeUtil : : GetTupleElementShape ( operand_shape , i ) ; <nl> llvm : : Value * tuple_element = llvm_ir : : EmitGetTupleElement ( <nl> tuple_element_shape , i , MinimumAlignmentForShape ( tuple_element_shape ) , <nl> - value , & ir_builder_ ) ; <nl> + value , & ir_builder_ , module_ ) ; <nl> TF_RETURN_IF_ERROR ( EmitXfeedTransfer ( XfeedKind : : kOutfeed , <nl> tuple_element_shape , tuple_element ) ) ; <nl> } <nl> Status IrEmitter : : HandleTuple ( <nl> for ( auto operand : operands ) { <nl> base_ptrs . push_back ( GetEmittedValueFor ( operand ) ) ; <nl> } <nl> - llvm_ir : : EmitTuple ( GetIrArrayFor ( tuple ) , base_ptrs , & ir_builder_ ) ; <nl> + llvm_ir : : EmitTuple ( GetIrArrayFor ( tuple ) , base_ptrs , & ir_builder_ , module_ ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status IrEmitter : : HandleReduceWindow ( HloInstruction * reduce_window , <nl> / / the initial value on the reduce_window . <nl> PrimitiveType operand_element_type = operand - > shape ( ) . element_type ( ) ; <nl> llvm : : Value * accumulator_address = llvm_ir : : EmitAllocaAtFunctionEntry ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( operand_element_type , & ir_builder_ ) , <nl> + llvm_ir : : PrimitiveTypeToIrType ( operand_element_type , module_ ) , <nl> " reduce_window_accumulator_address " , & ir_builder_ , <nl> MinimumAlignmentForPrimitiveType ( operand_element_type ) ) ; <nl> ir_builder_ . CreateStore ( ir_builder_ . CreateLoad ( GetEmittedValueFor ( <nl> Status IrEmitter : : HandleSelectAndScatter ( HloInstruction * select_and_scatter ) { <nl> / / Allocate space to keep the currently selected value , its index , and <nl> / / the boolean initialized_flag , which is initially set to false . <nl> llvm : : Value * selected_value_address = llvm_ir : : EmitAllocaAtFunctionEntry ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( operand_element_type , & ir_builder_ ) , <nl> + llvm_ir : : PrimitiveTypeToIrType ( operand_element_type , module_ ) , <nl> " selected_value_address " , & ir_builder_ , <nl> MinimumAlignmentForPrimitiveType ( operand_element_type ) ) ; <nl> llvm : : Value * selected_index_address = <nl> Status IrEmitter : : HandleSelectAndScatter ( HloInstruction * select_and_scatter ) { <nl> / / If the ' select ' function returns false , update the selected value and the <nl> / / index to the currently visiting operand . <nl> llvm : : Value * cond = ir_builder_ . CreateICmpNE ( <nl> - result , llvm : : ConstantInt : : get ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( PRED , & ir_builder_ ) , 0 ) , <nl> + result , <nl> + llvm : : ConstantInt : : get ( llvm_ir : : PrimitiveTypeToIrType ( PRED , module_ ) , 0 ) , <nl> " boolean_predicate " ) ; <nl> llvm_ir : : LlvmIfData if_select_lhs = <nl> llvm_ir : : EmitIfThenElse ( cond , " if - select - lhs " , & ir_builder_ ) ; <nl> Status IrEmitter : : HandleDot ( HloInstruction * dot , HloInstruction * lhs , <nl> HloInstruction * rhs ) { <nl> TF_RETURN_IF_ERROR ( ElementTypesSameAndSupported ( <nl> / * instruction = * / * dot , / * operands = * / { lhs , rhs } , <nl> - / * supported_types = * / { F32 , F64 } ) ) ; <nl> + / * supported_types = * / { F32 , F64 , C64 } ) ) ; <nl> <nl> llvm_ir : : IrArray lhs_array ( GetIrArrayFor ( lhs ) ) ; <nl> llvm_ir : : IrArray rhs_array ( GetIrArrayFor ( rhs ) ) ; <nl> Status IrEmitter : : HandleConvolution ( HloInstruction * convolution , <nl> const Window & window ) { <nl> TF_RETURN_IF_ERROR ( ElementTypesSameAndSupported ( <nl> / * instruction = * / * convolution , / * operands = * / { lhs , rhs } , <nl> - / * supported_types = * / { F32 } ) ) ; <nl> + / * supported_types = * / { F32 , C64 } ) ) ; <nl> <nl> const ConvolutionDimensionNumbers & dnums = <nl> convolution - > convolution_dimension_numbers ( ) ; <nl> Status IrEmitter : : HandleConvolution ( HloInstruction * convolution , <nl> / / the output entry at the given index . <nl> PrimitiveType lhs_element_type = lhs - > shape ( ) . element_type ( ) ; <nl> llvm : : Value * sum_address = llvm_ir : : EmitAllocaAtFunctionEntry ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( lhs_element_type , & ir_builder_ ) , <nl> + llvm_ir : : PrimitiveTypeToIrType ( lhs_element_type , module_ ) , <nl> " convolution_sum_address " , & ir_builder_ , <nl> MinimumAlignmentForPrimitiveType ( lhs_element_type ) ) ; <nl> ir_builder_ . CreateStore ( <nl> Status IrEmitter : : HandleBatchNormTraining ( HloInstruction * batch_norm_training ) { <nl> PrimitiveType element_type = operand - > shape ( ) . element_type ( ) ; <nl> / / Used to calculate E ( X ) . <nl> llvm : : Value * sum_address = llvm_ir : : EmitAllocaAtFunctionEntry ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( element_type , & ir_builder_ ) , <nl> + llvm_ir : : PrimitiveTypeToIrType ( element_type , module_ ) , <nl> " sum_address " , & ir_builder_ , <nl> MinimumAlignmentForPrimitiveType ( element_type ) ) ; <nl> <nl> / / Used to calculate E ( X ^ 2 ) . <nl> llvm : : Value * sum_square_address = <nl> llvm_ir : : EmitAllocaAtFunctionEntry ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( element_type , & ir_builder_ ) , <nl> + llvm_ir : : PrimitiveTypeToIrType ( element_type , module_ ) , <nl> " sum_square_address " , & ir_builder_ , <nl> MinimumAlignmentForPrimitiveType ( element_type ) ) ; <nl> <nl> Status IrEmitter : : HandleBatchNormTraining ( HloInstruction * batch_norm_training ) { <nl> . EmitLoop ( IrName ( batch_norm_training , " normalize " ) ) ) ; <nl> <nl> llvm_ir : : EmitTuple ( GetIrArrayFor ( batch_norm_training ) , <nl> - { normalized , mean , var } , & ir_builder_ ) ; <nl> + { normalized , mean , var } , & ir_builder_ , module_ ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> IrEmitter : : ReductionGenerator IrEmitter : : MatchReductionGenerator ( <nl> } <nl> <nl> const Shape & root_shape = root_instruction - > shape ( ) ; <nl> + if ( ShapeUtil : : ElementIsComplex ( root_shape ) ) { <nl> + / / TODO ( b / 65408531 ) : Complex add could by done via bitcast to < float x [ 2N ] > <nl> + / / Complex multiply would be more challenging . We could perhaps use a <nl> + / / strided load to get all reals in a vector , all imags in a vector , or use <nl> + / / CreateShuffleVector on a bitcast to float x [ 2N ] . <nl> + * failure_reason = " complex values not supported " ; <nl> + return nullptr ; <nl> + } <nl> bool root_is_floating_point = ShapeUtil : : ElementIsFloating ( root_shape ) ; <nl> bool root_is_integral = ShapeUtil : : ElementIsIntegral ( root_shape ) ; <nl> bool root_is_signed = ShapeUtil : : ElementIsSigned ( root_shape ) ; <nl> IrEmitter : : ReductionGenerator IrEmitter : : MatchReductionGenerator ( <nl> / / This is visually similar to ElementalIrEmitter , though conceptually we ' re <nl> / / doing something different here . ElementalIrEmitter emits scalar operations <nl> / / while these emit scalar or vector operations depending on the type of the <nl> - / / operands . <nl> + / / operands . See CreateShardedVectorType for the actual types in use here . <nl> switch ( root_instruction - > opcode ( ) ) { <nl> default : <nl> * failure_reason = " did not recognize root instruction opcode " ; <nl> IrEmitter : : ShardedVectorType IrEmitter : : CreateShardedVectorType ( <nl> <nl> ShardedVectorType sharded_vector_type ; <nl> llvm : : Type * element_ir_type = <nl> - llvm_ir : : PrimitiveTypeToIrType ( element_type , & ir_builder_ ) ; <nl> + llvm_ir : : PrimitiveTypeToIrType ( element_type , module_ ) ; <nl> <nl> for ( int i = 0 , e = 1 + tensorflow : : Log2Ceiling ( element_count ) ; i < e ; i + + ) { <nl> / / For every power of two present in element_count , we generate one or more <nl> Status IrEmitter : : HandleReduce ( HloInstruction * reduce , HloInstruction * arg , <nl> / / Initialize an accumulator with init_value . <nl> PrimitiveType accumulator_type = reduce - > shape ( ) . element_type ( ) ; <nl> llvm : : AllocaInst * accumulator_addr = llvm_ir : : EmitAllocaAtFunctionEntry ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( accumulator_type , & ir_builder_ ) , <nl> + llvm_ir : : PrimitiveTypeToIrType ( accumulator_type , module_ ) , <nl> " accumulator " , & ir_builder_ , <nl> MinimumAlignmentForPrimitiveType ( accumulator_type ) ) ; <nl> llvm : : Value * init_value_addr = GetEmittedValueFor ( init_value ) ; <nl> Status IrEmitter : : HandleFusion ( HloInstruction * fusion ) { <nl> return Status : : OK ( ) ; <nl> } else if ( llvm_ir : : CanEmitFusedDynamicUpdateSliceInPlace ( fusion , <nl> assignment_ ) ) { <nl> + VLOG ( 3 ) < < " HandleFusion FusedDynamicUpdateSliceInPlace " ; <nl> CpuElementalIrEmitter elemental_emitter ( hlo_module_config_ , this , module_ ) ; <nl> TF_RETURN_IF_ERROR ( EmitTargetAddressForOp ( fusion ) ) ; <nl> <nl> Status IrEmitter : : HandleFusion ( HloInstruction * fusion ) { <nl> fusion , operands , GetIrArrayFor ( fusion ) , & elemental_emitter , <nl> & ir_builder_ ) ; <nl> } else if ( fusion - > fusion_kind ( ) = = HloInstruction : : FusionKind : : kLoop ) { <nl> + VLOG ( 3 ) < < " HandleFusion kLoop " ; <nl> CpuElementalIrEmitter elemental_emitter ( hlo_module_config_ , this , module_ ) ; <nl> auto operands = GetIrArraysForOperandsOf ( fusion ) ; <nl> FusedIrEmitter fused_emitter ( operands , & elemental_emitter ) ; <nl> Status IrEmitter : : HandleWhile ( HloInstruction * xla_while ) { <nl> { while_result } , IrName ( xla_while , " cond " ) ) ; <nl> llvm : : Value * while_predicate = ir_builder_ . CreateICmpNE ( <nl> while_condition , <nl> - llvm : : ConstantInt : : get ( llvm_ir : : PrimitiveTypeToIrType ( PRED , & ir_builder_ ) , <nl> - 0 ) ) ; <nl> + llvm : : ConstantInt : : get ( llvm_ir : : PrimitiveTypeToIrType ( PRED , module_ ) , 0 ) ) ; <nl> <nl> / / Branches to the body or to the while exit depending on the condition . <nl> llvm : : BasicBlock * body_bb = llvm : : BasicBlock : : Create ( <nl> void IrEmitter : : EmitTransferElements ( llvm : : Value * target , llvm : : Value * source , <nl> unsigned element_alignment = GCD ( <nl> primitive_type_size , MinimumAlignmentForPrimitiveType ( primitive_type ) ) ; <nl> llvm : : Type * primitive_ptr_type = llvm : : PointerType : : getUnqual ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( primitive_type , & ir_builder_ ) ) ; <nl> + llvm_ir : : PrimitiveTypeToIrType ( primitive_type , module_ ) ) ; <nl> <nl> if ( element_count = = 1 ) { <nl> auto * load_instruction = ir_builder_ . CreateAlignedLoad ( <nl> llvm : : Value * IrEmitter : : GetEmittedValueFor ( const HloInstruction * hlo ) { <nl> } <nl> <nl> llvm : : Type * IrEmitter : : IrShapeType ( const Shape & shape ) { <nl> - return llvm_ir : : ShapeToIrType ( shape , & ir_builder_ ) ; <nl> + return llvm_ir : : ShapeToIrType ( shape , module_ ) ; <nl> } <nl> <nl> std : : vector < llvm : : Type * > IrEmitter : : GetComputeFunctionParams ( ) { <nl> llvm : : Value * IrEmitter : : EmitArrayFunctionCall ( <nl> PrimitiveType return_type = return_shape . element_type ( ) ; <nl> llvm : : Value * return_value_buffer = <nl> llvm_ir : : EmitAllocaAtFunctionEntryWithCount ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( return_type , & ir_builder_ ) , elements , <nl> + llvm_ir : : PrimitiveTypeToIrType ( return_type , module_ ) , elements , <nl> tensorflow : : strings : : StrCat ( name , " _return_value_address " ) , <nl> & ir_builder_ , MinimumAlignmentForPrimitiveType ( return_type ) ) ; <nl> EmitArrayFunctionCallInto ( function , parameter_addresses , return_value_buffer , <nl> Status IrEmitter : : EmitTargetElementLoop ( <nl> for ( int64 i = 0 ; i < output_arrays . size ( ) ; + + i ) { <nl> tuple_operand_ptrs . push_back ( output_arrays [ i ] . GetBasePointer ( ) ) ; <nl> } <nl> - llvm_ir : : EmitTuple ( target_array , tuple_operand_ptrs , & ir_builder_ ) ; <nl> + llvm_ir : : EmitTuple ( target_array , tuple_operand_ptrs , & ir_builder_ , module_ ) ; <nl> <nl> } else { <nl> if ( ShouldEmitParallelLoopFor ( * target_op ) ) { <nl> mmm a / tensorflow / compiler / xla / service / dfs_hlo_visitor . h <nl> ppp b / tensorflow / compiler / xla / service / dfs_hlo_visitor . h <nl> class DfsHloVisitor { <nl> virtual Status HandleCopy ( HloInstruction * copy ) { <nl> return HandleElementwiseUnary ( copy ) ; <nl> } <nl> + virtual Status HandleComplex ( HloInstruction * complex , HloInstruction * real , <nl> + HloInstruction * imag ) { <nl> + return HandleElementwiseBinary ( complex ) ; <nl> + } <nl> virtual Status HandleMultiply ( HloInstruction * multiply , HloInstruction * lhs , <nl> HloInstruction * rhs ) { <nl> return HandleElementwiseBinary ( multiply ) ; <nl> class DfsHloVisitor { <nl> virtual Status HandleAbs ( HloInstruction * abs , HloInstruction * operand ) { <nl> return HandleElementwiseUnary ( abs ) ; <nl> } <nl> + virtual Status HandleAtan2 ( HloInstruction * atan2 , HloInstruction * y , <nl> + HloInstruction * x ) { <nl> + return HandleElementwiseBinary ( atan2 ) ; <nl> + } <nl> virtual Status HandleRound ( HloInstruction * round ) { <nl> return HandleElementwiseUnary ( round ) ; <nl> } <nl> class DfsHloVisitor { <nl> virtual Status HandleTanh ( HloInstruction * tanh , HloInstruction * operand ) { <nl> return HandleElementwiseUnary ( tanh ) ; <nl> } <nl> + virtual Status HandleReal ( HloInstruction * real , HloInstruction * operand ) { <nl> + return HandleElementwiseUnary ( real ) ; <nl> + } <nl> + virtual Status HandleImag ( HloInstruction * imag , HloInstruction * operand ) { <nl> + return HandleElementwiseUnary ( imag ) ; <nl> + } <nl> virtual Status HandleIsFinite ( HloInstruction * is_finite , <nl> HloInstruction * operand ) { <nl> return HandleElementwiseUnary ( is_finite ) ; <nl> mmm a / tensorflow / compiler / xla / service / elemental_ir_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / elemental_ir_emitter . cc <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitUnaryOp ( <nl> const HloInstruction * op , llvm : : Value * operand_value ) const { <nl> if ( op - > opcode ( ) = = HloOpcode : : kCopy ) { <nl> return operand_value ; <nl> + } else if ( operand_value - > getType ( ) - > isIntegerTy ( ) ) { <nl> + return EmitIntegerUnaryOp ( op , operand_value ) ; <nl> + } else if ( ShapeUtil : : ElementIsComplex ( op - > operand ( 0 ) - > shape ( ) ) ) { <nl> + return EmitComplexUnaryOp ( op , operand_value ) ; <nl> } else { <nl> - return operand_value - > getType ( ) - > isIntegerTy ( ) <nl> - ? EmitIntegerUnaryOp ( op , operand_value ) <nl> - : EmitFloatUnaryOp ( op , operand_value ) ; <nl> + return EmitFloatUnaryOp ( op , operand_value ) ; <nl> } <nl> } <nl> <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitIntegerUnaryOp ( <nl> } <nl> if ( primitive_util : : IsIntegralType ( to_type ) ) { <nl> return ir_builder_ - > CreateIntCast ( <nl> - operand_value , llvm_ir : : PrimitiveTypeToIrType ( to_type , ir_builder_ ) , <nl> + operand_value , llvm_ir : : PrimitiveTypeToIrType ( to_type , module_ ) , <nl> primitive_util : : IsSignedIntegralType ( to_type ) ) ; <nl> } <nl> if ( primitive_util : : IsFloatingPointType ( to_type ) ) { <nl> if ( primitive_util : : IsSignedIntegralType ( from_type ) ) { <nl> return ir_builder_ - > CreateSIToFP ( <nl> - operand_value , <nl> - llvm_ir : : PrimitiveTypeToIrType ( to_type , ir_builder_ ) ) ; <nl> + operand_value , llvm_ir : : PrimitiveTypeToIrType ( to_type , module_ ) ) ; <nl> } <nl> if ( primitive_util : : IsUnsignedIntegralType ( from_type ) | | <nl> from_type = = PRED ) { <nl> return ir_builder_ - > CreateUIToFP ( <nl> - operand_value , <nl> - llvm_ir : : PrimitiveTypeToIrType ( to_type , ir_builder_ ) ) ; <nl> + operand_value , llvm_ir : : PrimitiveTypeToIrType ( to_type , module_ ) ) ; <nl> + } <nl> + } <nl> + if ( primitive_util : : IsComplexType ( to_type ) ) { <nl> + auto to_ir_component_type = llvm_ir : : PrimitiveTypeToIrType ( <nl> + primitive_util : : ComplexComponentType ( to_type ) , module_ ) ; <nl> + if ( primitive_util : : IsSignedIntegralType ( from_type ) ) { <nl> + return ComposeComplex ( <nl> + op , <nl> + ir_builder_ - > CreateSIToFP ( operand_value , to_ir_component_type ) , <nl> + nullptr ) ; <nl> + } <nl> + if ( primitive_util : : IsUnsignedIntegralType ( from_type ) | | <nl> + from_type = = PRED ) { <nl> + return ComposeComplex ( <nl> + op , <nl> + ir_builder_ - > CreateUIToFP ( operand_value , to_ir_component_type ) , <nl> + nullptr ) ; <nl> } <nl> } <nl> return Unimplemented ( " conversion from primitive type % s to % s " , <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitIntegerUnaryOp ( <nl> bool is_signed = <nl> primitive_util : : IsSignedIntegralType ( op - > shape ( ) . element_type ( ) ) ; <nl> if ( is_signed ) { <nl> - auto type = llvm_ir : : PrimitiveTypeToIrType ( op - > shape ( ) . element_type ( ) , <nl> - ir_builder_ ) ; <nl> + auto type = <nl> + llvm_ir : : PrimitiveTypeToIrType ( op - > shape ( ) . element_type ( ) , module_ ) ; <nl> auto zero = llvm : : ConstantInt : : get ( type , 0 ) ; <nl> auto cmp = ir_builder_ - > CreateICmpSGE ( operand_value , zero ) ; <nl> return ir_builder_ - > CreateSelect ( cmp , operand_value , <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitIntegerUnaryOp ( <nl> case HloOpcode : : kSign : { <nl> bool is_signed = <nl> primitive_util : : IsSignedIntegralType ( op - > shape ( ) . element_type ( ) ) ; <nl> - auto type = llvm_ir : : PrimitiveTypeToIrType ( op - > shape ( ) . element_type ( ) , <nl> - ir_builder_ ) ; <nl> + auto type = <nl> + llvm_ir : : PrimitiveTypeToIrType ( op - > shape ( ) . element_type ( ) , module_ ) ; <nl> auto zero = llvm : : ConstantInt : : get ( type , 0 ) ; <nl> auto cmp = ir_builder_ - > CreateICmpEQ ( operand_value , zero ) ; <nl> if ( is_signed ) { <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitIntegerUnaryOp ( <nl> return ir_builder_ - > CreateZExt ( <nl> ir_builder_ - > CreateNot ( ir_builder_ - > CreateTrunc ( <nl> operand_value , ir_builder_ - > getInt1Ty ( ) ) ) , <nl> - llvm_ir : : PrimitiveTypeToIrType ( PRED , ir_builder_ ) ) ; <nl> + llvm_ir : : PrimitiveTypeToIrType ( PRED , module_ ) ) ; <nl> } else if ( primitive_util : : IsIntegralType ( type ) ) { <nl> return ir_builder_ - > CreateNot ( operand_value ) ; <nl> } <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitFloatUnaryOp ( <nl> if ( from_type = = to_type ) { <nl> return operand_value ; <nl> } <nl> + if ( primitive_util : : IsComplexType ( to_type ) ) { <nl> + PrimitiveType to_component_type = <nl> + primitive_util : : ComplexComponentType ( to_type ) ; <nl> + if ( from_type = = to_component_type ) { <nl> + return ComposeComplex ( op , operand_value , nullptr ) ; <nl> + } <nl> + return ComposeComplex ( <nl> + op , <nl> + ir_builder_ - > CreateFPCast ( <nl> + operand_value , <nl> + llvm_ir : : PrimitiveTypeToIrType ( to_component_type , module_ ) ) , <nl> + nullptr ) ; <nl> + } <nl> if ( primitive_util : : IsFloatingPointType ( to_type ) ) { <nl> return ir_builder_ - > CreateFPCast ( <nl> - operand_value , <nl> - llvm_ir : : PrimitiveTypeToIrType ( to_type , ir_builder_ ) ) ; <nl> + operand_value , llvm_ir : : PrimitiveTypeToIrType ( to_type , module_ ) ) ; <nl> } <nl> if ( primitive_util : : IsSignedIntegralType ( to_type ) ) { <nl> return ir_builder_ - > CreateFPToSI ( <nl> - operand_value , <nl> - llvm_ir : : PrimitiveTypeToIrType ( to_type , ir_builder_ ) ) ; <nl> + operand_value , llvm_ir : : PrimitiveTypeToIrType ( to_type , module_ ) ) ; <nl> } <nl> if ( primitive_util : : IsUnsignedIntegralType ( to_type ) ) { <nl> return ir_builder_ - > CreateFPToUI ( <nl> - operand_value , <nl> - llvm_ir : : PrimitiveTypeToIrType ( to_type , ir_builder_ ) ) ; <nl> + operand_value , llvm_ir : : PrimitiveTypeToIrType ( to_type , module_ ) ) ; <nl> } <nl> return Unimplemented ( " unhandled conversion operation : % s = > % s " , <nl> PrimitiveType_Name ( from_type ) . c_str ( ) , <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitFloatUnaryOp ( <nl> auto not_infinite = ir_builder_ - > CreateFCmpONE ( abs_value , infinity ) ; <nl> auto result_i1 = ir_builder_ - > CreateAnd ( equal_self , not_infinite ) ; <nl> return ir_builder_ - > CreateZExt ( <nl> - result_i1 , llvm_ir : : PrimitiveTypeToIrType ( PRED , ir_builder_ ) ) ; <nl> + result_i1 , llvm_ir : : PrimitiveTypeToIrType ( PRED , module_ ) ) ; <nl> } <nl> case HloOpcode : : kNegate : <nl> return ir_builder_ - > CreateFNeg ( operand_value ) ; <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitFloatUnaryOp ( <nl> } <nl> } <nl> <nl> + StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitComplexUnaryOp ( <nl> + const HloInstruction * op , llvm : : Value * operand_value ) const { <nl> + auto real = [ & ] ( llvm : : Value * x ) { <nl> + return ir_builder_ - > CreateExtractValue ( x , { 0 } ) ; <nl> + } ; <nl> + auto imag = [ & ] ( llvm : : Value * x ) { <nl> + return ir_builder_ - > CreateExtractValue ( x , { 1 } ) ; <nl> + } ; <nl> + switch ( op - > opcode ( ) ) { <nl> + / / TODO ( b / 65209142 ) : Angle / Log require atan2 . <nl> + / / case HloOpcode : : kAngle : <nl> + / / case HloOpcode : : kLog : / / log ( a + bi ) = . 5 * log ( a ^ 2 + b ^ 2 ) + i * atan2 ( b , a ) <nl> + case HloOpcode : : kConvert : { <nl> + PrimitiveType from_type = op - > operand ( 0 ) - > shape ( ) . element_type ( ) ; <nl> + TF_RET_CHECK ( primitive_util : : IsComplexType ( from_type ) ) ; <nl> + PrimitiveType to_type = op - > shape ( ) . element_type ( ) ; <nl> + TF_RET_CHECK ( primitive_util : : IsComplexType ( to_type ) ) ; <nl> + if ( from_type = = to_type ) { <nl> + return operand_value ; <nl> + } <nl> + PrimitiveType to_component_type = <nl> + primitive_util : : ComplexComponentType ( to_type ) ; <nl> + auto to_ir_component_type = <nl> + llvm_ir : : PrimitiveTypeToIrType ( to_component_type , module_ ) ; <nl> + return ComposeComplex ( <nl> + op , <nl> + ir_builder_ - > CreateFPCast ( real ( operand_value ) , to_ir_component_type ) , <nl> + ir_builder_ - > CreateFPCast ( imag ( operand_value ) , to_ir_component_type ) ) ; <nl> + } <nl> + case HloOpcode : : kExp : { <nl> + / / e ^ ( a + bi ) = e ^ a * ( cos ( b ) + sin ( b ) i ) <nl> + auto exp_a = llvm_ir : : EmitCallToIntrinsic ( <nl> + llvm : : Intrinsic : : exp , { real ( operand_value ) } , <nl> + { real ( operand_value ) - > getType ( ) } , ir_builder_ ) ; <nl> + auto cos_b = llvm_ir : : EmitCallToIntrinsic ( <nl> + llvm : : Intrinsic : : cos , { imag ( operand_value ) } , <nl> + { imag ( operand_value ) - > getType ( ) } , ir_builder_ ) ; <nl> + auto sin_b = llvm_ir : : EmitCallToIntrinsic ( <nl> + llvm : : Intrinsic : : sin , { imag ( operand_value ) } , <nl> + { imag ( operand_value ) - > getType ( ) } , ir_builder_ ) ; <nl> + return ComposeComplex ( op , ir_builder_ - > CreateFMul ( exp_a , cos_b ) , <nl> + ir_builder_ - > CreateFMul ( exp_a , sin_b ) ) ; <nl> + } <nl> + case HloOpcode : : kCos : { <nl> + / / cos ( z ) = . 5 ( e ^ ( iz ) + e ^ ( - iz ) ) <nl> + / / cos ( a + bi ) = . 5 ( e ^ ( - b + ai ) + e ^ ( b - ai ) ) <nl> + / / now , e ^ ( x + yi ) = e ^ x * ( cos ( y ) + sin ( y ) i ) , so we have <nl> + / / cos ( a + bi ) = . 5 ( e ^ - b * ( cos ( a ) + sin ( a ) i ) + e ^ b * ( cos ( - a ) + sin ( - a ) i ) ) <nl> + / / cos ( - x ) = cos ( x ) and sin ( - x ) = - sin ( x ) , so <nl> + / / cos ( a + bi ) = . 5 ( e ^ - b * ( cos ( a ) + sin ( a ) i ) + e ^ b * ( cos ( a ) - sin ( a ) i ) ) <nl> + / / = . 5 ( cos ( a ) * ( e ^ - b + e ^ b ) + i * sin ( a ) * ( e ^ - b - e ^ b ) ) <nl> + auto a = real ( operand_value ) ; <nl> + auto b = imag ( operand_value ) ; <nl> + auto type = a - > getType ( ) ; <nl> + auto exp_b = llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : exp , { b } , <nl> + { type } , ir_builder_ ) ; <nl> + auto half_exp_b = <nl> + ir_builder_ - > CreateFMul ( llvm : : ConstantFP : : get ( type , 0 . 5 ) , exp_b ) ; <nl> + auto half_exp_neg_b = <nl> + ir_builder_ - > CreateFDiv ( llvm : : ConstantFP : : get ( type , 0 . 5 ) , exp_b ) ; <nl> + auto cos_a = llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : cos , { a } , <nl> + { type } , ir_builder_ ) ; <nl> + auto sin_a = llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : sin , { a } , <nl> + { type } , ir_builder_ ) ; <nl> + return ComposeComplex ( <nl> + op , <nl> + ir_builder_ - > CreateFMul ( <nl> + cos_a , ir_builder_ - > CreateFAdd ( half_exp_neg_b , half_exp_b ) ) , <nl> + ir_builder_ - > CreateFMul ( <nl> + sin_a , ir_builder_ - > CreateFSub ( half_exp_neg_b , half_exp_b ) ) ) ; <nl> + } <nl> + case HloOpcode : : kSin : { <nl> + / / sin ( z ) = . 5i ( e ^ ( - iz ) - e ^ ( iz ) ) <nl> + / / sin ( a + bi ) = . 5i ( e ^ ( - i ( a + bi ) ) - e ^ ( i ( a + bi ) ) ) <nl> + / / = . 5i ( e ^ ( b - ai ) - e ^ ( - b + ai ) ) <nl> + / / now , e ^ ( x + yi ) = e ^ x * ( cos ( y ) + sin ( y ) i ) , so we have <nl> + / / sin ( a + bi ) = 0 . 5i ( e ^ b * ( cos ( - a ) + sin ( - a ) i ) - e ^ - b * ( cos ( a ) + sin ( a ) i ) ) <nl> + / / = 0 . 5 ( e ^ b * ( cos ( - a ) i - sin ( - a ) ) - e ^ - b * ( cos ( a ) i - sin ( a ) ) ) <nl> + / / cos ( - x ) = cos ( x ) and sin ( - x ) = - sin ( x ) , so <nl> + / / = 0 . 5 ( e ^ b * ( cos ( a ) i + sin ( a ) ) - e ^ - b * ( cos ( a ) i - sin ( a ) ) ) <nl> + / / = 0 . 5 ( sin ( a ) * ( e ^ b + e ^ - b ) + i * cos ( a ) * ( e ^ b - e ^ - b ) <nl> + auto a = real ( operand_value ) ; <nl> + auto b = imag ( operand_value ) ; <nl> + auto type = a - > getType ( ) ; <nl> + auto exp_b = llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : exp , { b } , <nl> + { type } , ir_builder_ ) ; <nl> + auto half_exp_b = <nl> + ir_builder_ - > CreateFMul ( llvm : : ConstantFP : : get ( type , 0 . 5 ) , exp_b ) ; <nl> + auto half_exp_neg_b = <nl> + ir_builder_ - > CreateFDiv ( llvm : : ConstantFP : : get ( type , 0 . 5 ) , exp_b ) ; <nl> + auto cos_a = llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : cos , { a } , <nl> + { type } , ir_builder_ ) ; <nl> + auto sin_a = llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : sin , { a } , <nl> + { type } , ir_builder_ ) ; <nl> + return ComposeComplex ( <nl> + op , <nl> + ir_builder_ - > CreateFMul ( <nl> + sin_a , ir_builder_ - > CreateFAdd ( half_exp_b , half_exp_neg_b ) ) , <nl> + ir_builder_ - > CreateFMul ( <nl> + cos_a , ir_builder_ - > CreateFSub ( half_exp_b , half_exp_neg_b ) ) ) ; <nl> + } <nl> + case HloOpcode : : kAbs : { <nl> + auto sum_sq = ir_builder_ - > CreateFAdd ( <nl> + ir_builder_ - > CreateFMul ( real ( operand_value ) , real ( operand_value ) ) , <nl> + ir_builder_ - > CreateFMul ( imag ( operand_value ) , imag ( operand_value ) ) ) ; <nl> + return llvm_ir : : EmitCallToIntrinsic ( llvm : : Intrinsic : : sqrt , { sum_sq } , <nl> + { sum_sq - > getType ( ) } , ir_builder_ ) ; <nl> + } <nl> + case HloOpcode : : kSign : { / / Sign ( c ) = c / | c | <nl> + auto sum_sq = ir_builder_ - > CreateFAdd ( <nl> + ir_builder_ - > CreateFMul ( real ( operand_value ) , real ( operand_value ) ) , <nl> + ir_builder_ - > CreateFMul ( imag ( operand_value ) , imag ( operand_value ) ) ) ; <nl> + auto cplx_abs = llvm_ir : : EmitCallToIntrinsic ( <nl> + llvm : : Intrinsic : : sqrt , { sum_sq } , { sum_sq - > getType ( ) } , ir_builder_ ) ; <nl> + auto type = cplx_abs - > getType ( ) ; <nl> + auto zero = llvm : : ConstantFP : : get ( type , 0 . 0 ) ; <nl> + auto oeq = ir_builder_ - > CreateFCmpOEQ ( cplx_abs , zero ) ; <nl> + return ir_builder_ - > CreateSelect ( <nl> + oeq , ComposeComplex ( op , zero , zero ) , <nl> + ComposeComplex ( <nl> + op , ir_builder_ - > CreateFDiv ( real ( operand_value ) , cplx_abs ) , <nl> + ir_builder_ - > CreateFDiv ( imag ( operand_value ) , cplx_abs ) ) ) ; <nl> + } <nl> + case HloOpcode : : kNegate : <nl> + return ComposeComplex ( op , ir_builder_ - > CreateFNeg ( real ( operand_value ) ) , <nl> + ir_builder_ - > CreateFNeg ( imag ( operand_value ) ) ) ; <nl> + case HloOpcode : : kReal : <nl> + return real ( operand_value ) ; <nl> + case HloOpcode : : kImag : <nl> + return imag ( operand_value ) ; <nl> + default : <nl> + return Unimplemented ( " unary complex op ' % s ' " , <nl> + HloOpcodeString ( op - > opcode ( ) ) . c_str ( ) ) ; <nl> + } <nl> + } <nl> + <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitBinaryOp ( <nl> const HloInstruction * op , llvm : : Value * lhs_value , <nl> llvm : : Value * rhs_value ) const { <nl> - return lhs_value - > getType ( ) - > isIntegerTy ( ) <nl> - ? EmitIntegerBinaryOp ( op , lhs_value , rhs_value , <nl> - primitive_util : : IsSignedIntegralType ( <nl> - op - > operand ( 0 ) - > shape ( ) . element_type ( ) ) ) <nl> - : EmitFloatBinaryOp ( op , lhs_value , rhs_value ) ; <nl> + PrimitiveType operand_type = op - > operand ( 0 ) - > shape ( ) . element_type ( ) ; <nl> + if ( lhs_value - > getType ( ) - > isIntegerTy ( ) ) { <nl> + return EmitIntegerBinaryOp ( <nl> + op , lhs_value , rhs_value , <nl> + primitive_util : : IsSignedIntegralType ( operand_type ) ) ; <nl> + } else if ( primitive_util : : IsComplexType ( operand_type ) ) { <nl> + return EmitComplexBinaryOp ( op , lhs_value , rhs_value ) ; <nl> + } else { <nl> + return EmitFloatBinaryOp ( op , lhs_value , rhs_value ) ; <nl> + } <nl> } <nl> <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitFloatBinaryOp ( <nl> const HloInstruction * op , llvm : : Value * lhs_value , <nl> llvm : : Value * rhs_value ) const { <nl> switch ( op - > opcode ( ) ) { <nl> + / / case HloOpcode : : kAtan2 : / / TODO ( b / 65209142 ) : CPU atan2 support <nl> + case HloOpcode : : kComplex : <nl> + return ComposeComplex ( op , lhs_value , rhs_value ) ; <nl> case HloOpcode : : kAdd : <nl> return ir_builder_ - > CreateFAdd ( lhs_value , rhs_value ) ; <nl> case HloOpcode : : kSubtract : <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitFloatBinaryOp ( <nl> } <nl> } <nl> <nl> + StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitComplexBinaryOp ( <nl> + const HloInstruction * op , llvm : : Value * lhs_value , <nl> + llvm : : Value * rhs_value ) const { <nl> + auto real = [ & ] ( llvm : : Value * x ) { <nl> + return ir_builder_ - > CreateExtractValue ( x , { 0 } ) ; <nl> + } ; <nl> + auto imag = [ & ] ( llvm : : Value * x ) { <nl> + return ir_builder_ - > CreateExtractValue ( x , { 1 } ) ; <nl> + } ; <nl> + switch ( op - > opcode ( ) ) { <nl> + case HloOpcode : : kAdd : <nl> + return ComposeComplex ( <nl> + op , ir_builder_ - > CreateFAdd ( real ( lhs_value ) , real ( rhs_value ) ) , <nl> + ir_builder_ - > CreateFAdd ( imag ( lhs_value ) , imag ( rhs_value ) ) ) ; <nl> + case HloOpcode : : kSubtract : <nl> + return ComposeComplex ( <nl> + op , ir_builder_ - > CreateFSub ( real ( lhs_value ) , real ( rhs_value ) ) , <nl> + ir_builder_ - > CreateFSub ( imag ( lhs_value ) , imag ( rhs_value ) ) ) ; <nl> + case HloOpcode : : kMultiply : <nl> + return ComposeComplex ( <nl> + op , <nl> + ir_builder_ - > CreateFSub ( <nl> + ir_builder_ - > CreateFMul ( real ( lhs_value ) , real ( rhs_value ) ) , <nl> + ir_builder_ - > CreateFMul ( imag ( lhs_value ) , imag ( rhs_value ) ) ) , <nl> + ir_builder_ - > CreateFAdd ( <nl> + ir_builder_ - > CreateFMul ( real ( lhs_value ) , imag ( rhs_value ) ) , <nl> + ir_builder_ - > CreateFMul ( imag ( lhs_value ) , real ( rhs_value ) ) ) ) ; <nl> + case HloOpcode : : kDivide : { <nl> + / / ( a + bi ) / ( c + di ) = ( ( a + bi ) ( c - di ) ) / ( ( c + di ) ( c - di ) ) <nl> + / / = ( ( ac + bd ) + ( bc - ad ) i ) / ( c ^ 2 + d ^ 2 ) <nl> + auto rhs_sum_sq = ir_builder_ - > CreateFAdd ( <nl> + ir_builder_ - > CreateFMul ( real ( rhs_value ) , real ( rhs_value ) ) , <nl> + ir_builder_ - > CreateFMul ( imag ( rhs_value ) , imag ( rhs_value ) ) ) ; <nl> + auto type = rhs_sum_sq - > getType ( ) ; <nl> + auto zero = llvm : : ConstantFP : : get ( type , 0 . 0 ) ; <nl> + auto oeq = ir_builder_ - > CreateFCmpOEQ ( rhs_sum_sq , zero ) ; <nl> + return ir_builder_ - > CreateSelect ( <nl> + oeq , ComposeComplex ( op , llvm : : ConstantFP : : getInfinity ( type ) , zero ) , <nl> + ComposeComplex ( <nl> + op , <nl> + ir_builder_ - > CreateFDiv ( <nl> + ir_builder_ - > CreateFAdd ( <nl> + ir_builder_ - > CreateFMul ( real ( lhs_value ) , real ( rhs_value ) ) , <nl> + ir_builder_ - > CreateFMul ( imag ( lhs_value ) , <nl> + imag ( rhs_value ) ) ) , <nl> + rhs_sum_sq ) , <nl> + ir_builder_ - > CreateFDiv ( <nl> + ir_builder_ - > CreateFSub ( <nl> + ir_builder_ - > CreateFMul ( imag ( lhs_value ) , real ( rhs_value ) ) , <nl> + ir_builder_ - > CreateFMul ( real ( lhs_value ) , <nl> + imag ( rhs_value ) ) ) , <nl> + rhs_sum_sq ) ) ) ; <nl> + } <nl> + / / LLVM comparisons can be " unordered " ( U ) or " ordered " ( O ) - - ordered <nl> + / / comparisons always return false when one of the operands is NaN , whereas <nl> + / / unordered comparisons return true . <nl> + / / <nl> + / / We use ordered comparisons for everything except kNe , where we use an <nl> + / / unordered comparison . This makes x ! = y equivalent to ! ( x = = y ) , and <nl> + / / matches C + + ' s semantics . <nl> + case HloOpcode : : kEq : <nl> + return ir_builder_ - > CreateAnd ( <nl> + llvm_ir : : EmitComparison ( llvm : : CmpInst : : FCMP_OEQ , real ( lhs_value ) , <nl> + real ( rhs_value ) , ir_builder_ ) , <nl> + llvm_ir : : EmitComparison ( llvm : : CmpInst : : FCMP_OEQ , imag ( lhs_value ) , <nl> + imag ( rhs_value ) , ir_builder_ ) ) ; <nl> + case HloOpcode : : kNe : <nl> + return ir_builder_ - > CreateOr ( <nl> + llvm_ir : : EmitComparison ( llvm : : CmpInst : : FCMP_UNE , real ( lhs_value ) , <nl> + real ( rhs_value ) , ir_builder_ ) , <nl> + llvm_ir : : EmitComparison ( llvm : : CmpInst : : FCMP_UNE , imag ( lhs_value ) , <nl> + imag ( rhs_value ) , ir_builder_ ) ) ; <nl> + <nl> + / / TODO ( b / 65209142 ) : requires arg ( z ) - > requires atan | atan2 intrinsic <nl> + / / case HloOpcode : : kPower : <nl> + / / / / ( a + bi ) ^ ( c + di ) = exp ( i ( c + di ) * arg ( a + bi ) ) * ( a * a + b * b ) ^ ( c / 2 + di / 2 ) <nl> + default : <nl> + return Unimplemented ( " binary complex op ' % s ' " , <nl> + HloOpcodeString ( op - > opcode ( ) ) . c_str ( ) ) ; <nl> + } <nl> + } <nl> + <nl> llvm : : Value * ElementalIrEmitter : : EmitFloatMax ( llvm : : Value * lhs_value , <nl> llvm : : Value * rhs_value ) const { <nl> return llvm_ir : : EmitFloatMax ( lhs_value , rhs_value , ir_builder_ ) ; <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitErfInv ( PrimitiveType prim_type , <nl> StatusOr < llvm : : Value * > ElementalIrEmitter : : EmitErfcInv ( <nl> PrimitiveType prim_type , llvm : : Value * value ) const { <nl> / / Compute erfcinv ( value ) by calculating erfinv ( 1 . 0 - value ) . <nl> - auto type = llvm_ir : : PrimitiveTypeToIrType ( prim_type , ir_builder_ ) ; <nl> + auto type = llvm_ir : : PrimitiveTypeToIrType ( prim_type , module_ ) ; <nl> auto one = llvm : : ConstantFP : : get ( type , 1 . 0 ) ; <nl> return EmitErfInv ( prim_type , ir_builder_ - > CreateFSub ( one , value ) ) ; <nl> } <nl> llvm_ir : : ElementGenerator ElementalIrEmitter : : MakeRngElementGenerator ( <nl> const { <nl> PrimitiveType param_prim_type = hlo - > operand ( 0 ) - > shape ( ) . element_type ( ) ; <nl> llvm : : Type * param_ir_type = <nl> - llvm_ir : : PrimitiveTypeToIrType ( param_prim_type , ir_builder_ ) ; <nl> + llvm_ir : : PrimitiveTypeToIrType ( param_prim_type , module_ ) ; <nl> <nl> / / Same values as PCG library <nl> / / https : / / github . com / imneme / pcg - c / blob / master / include / pcg_variants . h <nl> llvm_ir : : ElementGenerator ElementalIrEmitter : : MakeRngElementGenerator ( <nl> return ir_builder_ - > CreateZExt ( <nl> ir_builder_ - > CreateFCmpOLT ( get_next_uniform_float ( ) , p ) , <nl> llvm_ir : : PrimitiveTypeToIrType ( hlo - > shape ( ) . element_type ( ) , <nl> - ir_builder_ ) ) ; <nl> + module_ ) ) ; <nl> } <nl> default : <nl> return InvalidArgument ( <nl> llvm_ir : : ElementGenerator ElementalIrEmitter : : MakeElementGenerator ( <nl> case HloOpcode : : kCos : <nl> case HloOpcode : : kExp : <nl> case HloOpcode : : kFloor : <nl> + case HloOpcode : : kImag : <nl> case HloOpcode : : kIsFinite : <nl> case HloOpcode : : kLog : <nl> case HloOpcode : : kNegate : <nl> + case HloOpcode : : kReal : <nl> case HloOpcode : : kSign : <nl> case HloOpcode : : kSin : <nl> case HloOpcode : : kTanh : <nl> llvm_ir : : ElementGenerator ElementalIrEmitter : : MakeElementGenerator ( <nl> return EmitUnaryOp ( hlo , operand_value ) ; <nl> } ; <nl> case HloOpcode : : kAdd : <nl> + case HloOpcode : : kAtan2 : <nl> + case HloOpcode : : kComplex : <nl> case HloOpcode : : kDivide : <nl> case HloOpcode : : kEq : <nl> case HloOpcode : : kGe : <nl> llvm_ir : : ElementGenerator ElementalIrEmitter : : MakeElementGenerator ( <nl> } <nl> <nl> llvm_ir : : SetToFirstInsertPoint ( exit_block , ir_builder_ ) ; <nl> - llvm : : PHINode * output = ir_builder_ - > CreatePHI ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( hlo - > shape ( ) . element_type ( ) , <nl> - ir_builder_ ) , <nl> - hlo - > operands ( ) . size ( ) ) ; <nl> + llvm : : PHINode * output = <nl> + ir_builder_ - > CreatePHI ( llvm_ir : : PrimitiveTypeToIrType ( <nl> + hlo - > shape ( ) . element_type ( ) , module_ ) , <nl> + hlo - > operands ( ) . size ( ) ) ; <nl> auto prior_insert_point = ir_builder_ - > GetInsertPoint ( ) ; <nl> <nl> ir_builder_ - > SetInsertPoint ( init_block ) ; <nl> llvm_ir : : ElementGenerator ElementalIrEmitter : : MakeElementGenerator ( <nl> / / else - > return data from ' index ' . <nl> llvm : : Value * ret_value_addr = llvm_ir : : EmitAllocaAtFunctionEntry ( <nl> llvm_ir : : PrimitiveTypeToIrType ( hlo - > shape ( ) . element_type ( ) , <nl> - ir_builder_ ) , <nl> + module_ ) , <nl> " ret_value_addr " , ir_builder_ ) ; <nl> llvm_ir : : LlvmIfData if_data = llvm_ir : : EmitIfThenElse ( <nl> slice_intersection , " slice_intersection " , ir_builder_ ) ; <nl> llvm_ir : : ElementGenerator ElementalIrEmitter : : MakeElementGenerator ( <nl> / / } <nl> llvm : : Value * ret_value_addr = llvm_ir : : EmitAllocaAtFunctionEntry ( <nl> llvm_ir : : PrimitiveTypeToIrType ( hlo - > shape ( ) . element_type ( ) , <nl> - ir_builder_ ) , <nl> + module_ ) , <nl> " pad_result_addr " , ir_builder_ ) ; <nl> llvm_ir : : LlvmIfData if_data = <nl> llvm_ir : : EmitIfThenElse ( in_bounds , " in_bounds " , ir_builder_ ) ; <nl> llvm_ir : : ElementGenerator ElementalIrEmitter : : MakeElementGenerator ( <nl> ir_builder_ ) ; <nl> PrimitiveType primitive_type = hlo - > shape ( ) . element_type ( ) ; <nl> llvm : : Type * primitive_type_llvm = <nl> - llvm_ir : : PrimitiveTypeToIrType ( primitive_type , ir_builder_ ) ; <nl> + llvm_ir : : PrimitiveTypeToIrType ( primitive_type , module_ ) ; <nl> llvm : : Value * accumulator_alloca = llvm_ir : : EmitAllocaAtFunctionEntry ( <nl> primitive_type_llvm , " dot_acc " , ir_builder_ ) ; <nl> ir_builder_ - > CreateStore ( <nl> llvm_ir : : ElementGenerator ElementalIrEmitter : : MakeElementGenerator ( <nl> TF_ASSIGN_OR_RETURN ( llvm : : Value * lhs_value , lhs_generator ( lhs_index ) ) ; <nl> TF_ASSIGN_OR_RETURN ( llvm : : Value * rhs_value , rhs_generator ( rhs_index ) ) ; <nl> llvm : : Value * next_accumulator ; <nl> - if ( primitive_util : : IsFloatingPointType ( primitive_type ) ) { <nl> + if ( primitive_util : : IsComplexType ( primitive_type ) ) { <nl> + auto real = [ & ] ( llvm : : Value * x ) { <nl> + return ir_builder_ - > CreateExtractValue ( x , { 0 } ) ; <nl> + } ; <nl> + auto imag = [ & ] ( llvm : : Value * x ) { <nl> + return ir_builder_ - > CreateExtractValue ( x , { 1 } ) ; <nl> + } ; <nl> + llvm : : Value * product_real = ir_builder_ - > CreateFSub ( <nl> + ir_builder_ - > CreateFMul ( real ( lhs_value ) , real ( rhs_value ) ) , <nl> + ir_builder_ - > CreateFMul ( imag ( lhs_value ) , imag ( rhs_value ) ) ) ; <nl> + llvm : : Value * product_imag = ir_builder_ - > CreateFAdd ( <nl> + ir_builder_ - > CreateFMul ( real ( lhs_value ) , imag ( rhs_value ) ) , <nl> + ir_builder_ - > CreateFMul ( imag ( lhs_value ) , real ( rhs_value ) ) ) ; <nl> + next_accumulator = ir_builder_ - > CreateInsertValue ( <nl> + current_accumulator , <nl> + ir_builder_ - > CreateFAdd ( real ( current_accumulator ) , product_real ) , <nl> + { 0 } ) ; <nl> + next_accumulator = ir_builder_ - > CreateInsertValue ( <nl> + next_accumulator , <nl> + ir_builder_ - > CreateFAdd ( imag ( current_accumulator ) , product_imag ) , <nl> + { 1 } ) ; <nl> + } else if ( primitive_util : : IsFloatingPointType ( primitive_type ) ) { <nl> next_accumulator = ir_builder_ - > CreateFAdd ( <nl> current_accumulator , <nl> ir_builder_ - > CreateFMul ( lhs_value , rhs_value ) ) ; <nl> llvm_ir : : ElementGenerator ElementalIrEmitter : : MakeElementGenerator ( <nl> } <nl> } <nl> <nl> + llvm : : Value * ElementalIrEmitter : : ComposeComplex ( const HloInstruction * op , <nl> + llvm : : Value * real , <nl> + llvm : : Value * imag ) const { <nl> + auto cplx_type = <nl> + llvm_ir : : PrimitiveTypeToIrType ( op - > shape ( ) . element_type ( ) , module_ ) ; <nl> + auto complex = ir_builder_ - > CreateInsertValue ( <nl> + llvm : : ConstantAggregateZero : : get ( cplx_type ) , real , { 0 } ) ; <nl> + if ( imag ! = nullptr ) { <nl> + complex = ir_builder_ - > CreateInsertValue ( complex , imag , { 1 } ) ; <nl> + } <nl> + return complex ; <nl> + } <nl> + <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / service / elemental_ir_emitter . h <nl> ppp b / tensorflow / compiler / xla / service / elemental_ir_emitter . h <nl> class ElementalIrEmitter { <nl> const HloToElementGeneratorMap & operand_to_generator ) const ; <nl> <nl> llvm : : IRBuilder < > * ir_builder ( ) const { return ir_builder_ ; } <nl> + llvm : : Module * module ( ) const { return module_ ; } <nl> <nl> protected : <nl> virtual StatusOr < llvm : : Value * > EmitIntegerUnaryOp ( <nl> class ElementalIrEmitter { <nl> virtual StatusOr < llvm : : Value * > EmitFloatUnaryOp ( <nl> const HloInstruction * op , llvm : : Value * operand_value ) const ; <nl> <nl> + virtual StatusOr < llvm : : Value * > EmitComplexUnaryOp ( <nl> + const HloInstruction * op , llvm : : Value * operand_value ) const ; <nl> + <nl> virtual StatusOr < llvm : : Value * > EmitIntegerBinaryOp ( const HloInstruction * op , <nl> llvm : : Value * lhs_value , <nl> llvm : : Value * rhs_value , <nl> class ElementalIrEmitter { <nl> const HloInstruction * op , llvm : : Value * lhs_value , <nl> llvm : : Value * rhs_value ) const ; <nl> <nl> + virtual StatusOr < llvm : : Value * > EmitComplexBinaryOp ( <nl> + const HloInstruction * op , llvm : : Value * lhs_value , <nl> + llvm : : Value * rhs_value ) const ; <nl> + <nl> virtual llvm : : Value * EmitFloatMax ( llvm : : Value * lhs_value , <nl> llvm : : Value * rhs_value ) const ; <nl> <nl> class ElementalIrEmitter { <nl> / / compiled executable outside of the HLO code itself . <nl> const HloModuleConfig & hlo_module_config_ ; <nl> <nl> + protected : <nl> + / / Composes a complex struct . imag may be nullptr for simple cast operations . <nl> + llvm : : Value * ComposeComplex ( const HloInstruction * op , llvm : : Value * real , <nl> + llvm : : Value * imag ) const ; <nl> + <nl> private : <nl> / / Returns a ElementGenerator for a RNG HloInstruction . <nl> llvm_ir : : ElementGenerator MakeRngElementGenerator ( <nl> mmm a / tensorflow / compiler / xla / service / gpu / elemental_ir_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / elemental_ir_emitter . cc <nl> StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitFloatBinaryOp ( <nl> PrimitiveType rhs_input_type = op - > operand ( 1 ) - > shape ( ) . element_type ( ) ; <nl> PrimitiveType output_type = op - > shape ( ) . element_type ( ) ; <nl> switch ( op - > opcode ( ) ) { <nl> + case HloOpcode : : kAtan2 : <nl> + return EmitLibdeviceMathCall ( " __nv_atan2 " , { lhs_value , rhs_value } , <nl> + { lhs_input_type , rhs_input_type } , <nl> + output_type ) ; <nl> case HloOpcode : : kRemainder : { <nl> return EmitLibdeviceMathCall ( " __nv_fmod " , { lhs_value , rhs_value } , <nl> { lhs_input_type , rhs_input_type } , <nl> StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitFloatUnaryOp ( <nl> } <nl> } <nl> <nl> + StatusOr < llvm : : Value * > GpuElementalIrEmitter : : EmitComplexUnaryOp ( <nl> + const HloInstruction * op , llvm : : Value * operand_value ) const { <nl> + PrimitiveType input_type = op - > operand ( 0 ) - > shape ( ) . element_type ( ) ; <nl> + PrimitiveType component_type = <nl> + primitive_util : : IsComplexType ( input_type ) <nl> + ? primitive_util : : ComplexComponentType ( input_type ) <nl> + : input_type ; <nl> + auto real = [ & ] ( llvm : : Value * x ) { <nl> + return ir_builder_ - > CreateExtractValue ( x , { 0 } ) ; <nl> + } ; <nl> + auto imag = [ & ] ( llvm : : Value * x ) { <nl> + return ir_builder_ - > CreateExtractValue ( x , { 1 } ) ; <nl> + } ; <nl> + <nl> + switch ( op - > opcode ( ) ) { <nl> + case HloOpcode : : kLog : { <nl> + / / log ( a + bi ) = . 5 * log ( a ^ 2 + b ^ 2 ) + i * atan2 ( b , a ) <nl> + auto a = real ( operand_value ) ; <nl> + auto b = imag ( operand_value ) ; <nl> + llvm : : Type * llvm_ty = a - > getType ( ) ; <nl> + auto sum_sq = ir_builder_ - > CreateFAdd ( ir_builder_ - > CreateFMul ( a , a ) , <nl> + ir_builder_ - > CreateFMul ( b , b ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto log_sum_sq , <nl> + EmitLibdeviceMathCall ( " __nv_log " , { sum_sq } , { component_type } , <nl> + component_type ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto angle , EmitLibdeviceMathCall ( " __nv_atan2 " , { b , a } , <nl> + { component_type , component_type } , <nl> + component_type ) ) ; <nl> + auto one_half = llvm : : ConstantFP : : get ( llvm_ty , 0 . 5 ) ; <nl> + return ComposeComplex ( op , ir_builder_ - > CreateFMul ( one_half , log_sum_sq ) , <nl> + angle ) ; <nl> + } <nl> + / / TODO ( b / 65408531 ) : Implement kPower on GPU , where atan2 is available . <nl> + / / case HloOpcode : : kPower : <nl> + / / / / ( a + bi ) ^ ( c + di ) = exp ( i ( c + di ) * arg ( a + bi ) ) * ( a * a + b * b ) ^ ( 0 . 5 ( c + di ) ) <nl> + case HloOpcode : : kExp : { <nl> + / / e ^ ( a + bi ) = e ^ a * ( cos ( b ) + sin ( b ) i ) <nl> + auto b = imag ( operand_value ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto exp_a , EmitLibdeviceMathCall ( " __nv_exp " , { real ( operand_value ) } , <nl> + { component_type } , component_type ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto cos_b , EmitLibdeviceMathCall ( " __nv_cos " , { b } , { component_type } , <nl> + component_type ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto sin_b , EmitLibdeviceMathCall ( " __nv_sin " , { b } , { component_type } , <nl> + component_type ) ) ; <nl> + return ComposeComplex ( op , ir_builder_ - > CreateFMul ( exp_a , cos_b ) , <nl> + ir_builder_ - > CreateFMul ( exp_a , sin_b ) ) ; <nl> + } <nl> + case HloOpcode : : kCos : { <nl> + / / cos ( a + bi ) = . 5 ( cos ( a ) * ( e ^ - b + e ^ b ) + i * sin ( a ) * ( e ^ - b - e ^ b ) ) <nl> + auto a = real ( operand_value ) ; <nl> + auto llvm_ty = a - > getType ( ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto exp_b , EmitLibdeviceMathCall ( " __nv_exp " , { imag ( operand_value ) } , <nl> + { component_type } , component_type ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto cos_a , EmitLibdeviceMathCall ( " __nv_cos " , { a } , { component_type } , <nl> + component_type ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto sin_a , EmitLibdeviceMathCall ( " __nv_sin " , { a } , { component_type } , <nl> + component_type ) ) ; <nl> + auto half_exp_b = <nl> + ir_builder_ - > CreateFMul ( llvm : : ConstantFP : : get ( llvm_ty , 0 . 5 ) , exp_b ) ; <nl> + auto half_exp_neg_b = <nl> + ir_builder_ - > CreateFDiv ( llvm : : ConstantFP : : get ( llvm_ty , 0 . 5 ) , exp_b ) ; <nl> + return ComposeComplex ( <nl> + op , <nl> + ir_builder_ - > CreateFMul ( <nl> + cos_a , ir_builder_ - > CreateFAdd ( half_exp_neg_b , half_exp_b ) ) , <nl> + ir_builder_ - > CreateFMul ( <nl> + sin_a , ir_builder_ - > CreateFSub ( half_exp_neg_b , half_exp_b ) ) ) ; <nl> + } <nl> + <nl> + case HloOpcode : : kSin : { <nl> + / / sin ( a + bi ) = 0 . 5 ( sin ( a ) * ( e ^ b + e ^ - b ) + i * cos ( a ) * ( e ^ b - e ^ - b ) <nl> + auto a = real ( operand_value ) ; <nl> + auto llvm_ty = a - > getType ( ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto exp_b , EmitLibdeviceMathCall ( " __nv_exp " , { imag ( operand_value ) } , <nl> + { component_type } , component_type ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto cos_a , EmitLibdeviceMathCall ( " __nv_cos " , { a } , { component_type } , <nl> + component_type ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto sin_a , EmitLibdeviceMathCall ( " __nv_sin " , { a } , { component_type } , <nl> + component_type ) ) ; <nl> + auto half_exp_b = <nl> + ir_builder_ - > CreateFMul ( llvm : : ConstantFP : : get ( llvm_ty , 0 . 5 ) , exp_b ) ; <nl> + auto half_exp_neg_b = <nl> + ir_builder_ - > CreateFDiv ( llvm : : ConstantFP : : get ( llvm_ty , 0 . 5 ) , exp_b ) ; <nl> + return ComposeComplex ( <nl> + op , <nl> + ir_builder_ - > CreateFMul ( <nl> + sin_a , ir_builder_ - > CreateFAdd ( half_exp_b , half_exp_neg_b ) ) , <nl> + ir_builder_ - > CreateFMul ( <nl> + cos_a , ir_builder_ - > CreateFSub ( half_exp_b , half_exp_neg_b ) ) ) ; <nl> + } <nl> + default : <nl> + return ElementalIrEmitter : : EmitComplexUnaryOp ( op , operand_value ) ; <nl> + } <nl> + } <nl> + <nl> llvm : : Value * GpuElementalIrEmitter : : EmitDeviceFunctionCall ( <nl> const string & callee_name , <nl> tensorflow : : gtl : : ArraySlice < llvm : : Value * > operands , <nl> llvm : : Value * GpuElementalIrEmitter : : EmitDeviceFunctionCall ( <nl> std : : vector < llvm : : Type * > ir_input_types ; <nl> for ( PrimitiveType input_type : input_types ) { <nl> ir_input_types . push_back ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( input_type , ir_builder_ ) ) ; <nl> + llvm_ir : : PrimitiveTypeToIrType ( input_type , module_ ) ) ; <nl> } <nl> llvm : : FunctionType * callee_type = llvm : : FunctionType : : get ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( output_type , <nl> - ir_builder_ ) , / / The return type . <nl> - ir_input_types , / / The parameter types . <nl> - false ) ; / / No variadic arguments . <nl> + llvm_ir : : PrimitiveTypeToIrType ( output_type , module_ ) , / / Return type . <nl> + ir_input_types , / / Parameter types . <nl> + false ) ; / / No variadic arguments . <nl> <nl> / / Declares the callee if it is not declared already . <nl> llvm : : Function * callee = llvm : : cast < llvm : : Function > ( <nl> llvm_ir : : ElementGenerator GpuElementalIrEmitter : : MakeElementGenerator ( <nl> <nl> PrimitiveType operand_element_type = operand - > shape ( ) . element_type ( ) ; <nl> llvm : : Value * accum_ptr = llvm_ir : : EmitAllocaAtFunctionEntry ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( operand_element_type , ir_builder_ ) , <nl> + llvm_ir : : PrimitiveTypeToIrType ( operand_element_type , module_ ) , <nl> " reduce_window_accum_ptr " , ir_builder_ ) ; <nl> { <nl> TF_ASSIGN_OR_RETURN ( llvm : : Value * init_value , <nl> llvm_ir : : ElementGenerator GpuElementalIrEmitter : : MakeElementGenerator ( <nl> const HloInstruction * operand = hlo - > operand ( 0 ) ; <nl> llvm : : Value * accum_ptr = <nl> ir_builder ( ) - > CreateAlloca ( llvm_ir : : PrimitiveTypeToIrType ( <nl> - hlo - > shape ( ) . element_type ( ) , ir_builder ( ) ) ) ; <nl> + hlo - > shape ( ) . element_type ( ) , module_ ) ) ; <nl> TF_ASSIGN_OR_RETURN ( llvm : : Value * init_value , <nl> operand_to_generator . at ( hlo - > operand ( 1 ) ) ( { } ) ) ; <nl> ir_builder ( ) - > CreateStore ( init_value , accum_ptr ) ; <nl> mmm a / tensorflow / compiler / xla / service / gpu / elemental_ir_emitter . h <nl> ppp b / tensorflow / compiler / xla / service / gpu / elemental_ir_emitter . h <nl> class GpuElementalIrEmitter : public ElementalIrEmitter { <nl> StatusOr < llvm : : Value * > EmitFloatUnaryOp ( <nl> const HloInstruction * op , llvm : : Value * operand_value ) const override ; <nl> <nl> + StatusOr < llvm : : Value * > EmitComplexUnaryOp ( <nl> + const HloInstruction * op , llvm : : Value * operand_value ) const override ; <nl> + <nl> StatusOr < llvm : : Value * > EmitFloatBinaryOp ( <nl> const HloInstruction * op , llvm : : Value * lhs_value , <nl> llvm : : Value * rhs_value ) const override ; <nl> mmm a / tensorflow / compiler / xla / service / gpu / gpu_compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / gpu_compiler . cc <nl> tensorflow : : Status OptimizeHloModule ( <nl> / * is_layout_sensitive = * / false , <nl> [ ] ( const Shape & , const Shape & ) { return false ; } ) ; <nl> pass . AddPass < TupleSimplifier > ( ) ; <nl> + pass . AddPass < HloDCE > ( ) ; <nl> pass . AddPass < ReshapeMover > ( ) ; <nl> pass . AddPass < HloConstantFolding > ( ) ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / gpu / hlo_to_ir_bindings . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / hlo_to_ir_bindings . cc <nl> void HloToIrBindings : : EmitBasePointersForHlos ( <nl> slice_result . ConsumeValueOrDie ( ) ; <nl> if ( slice . allocation ( ) - > is_thread_local ( ) ) { <nl> llvm : : Type * pointee_type = <nl> - llvm_ir : : ShapeToIrType ( non_io_hlo - > shape ( ) , ir_builder_ ) ; <nl> + llvm_ir : : ShapeToIrType ( non_io_hlo - > shape ( ) , module_ ) ; <nl> BindHloToIrValue ( * non_io_hlo , <nl> ir_builder_ - > CreateAlloca ( pointee_type ) , index ) ; <nl> } else { <nl> llvm : : Value * HloToIrBindings : : EmitGetTupleElement ( const HloInstruction * gte , <nl> if ( gte - > operand ( 0 ) - > opcode ( ) ! = HloOpcode : : kGetTupleElement ) { <nl> return llvm_ir : : EmitGetTupleElement ( <nl> gte - > shape ( ) , gte - > tuple_index ( ) , / * alignment = * / 1 , <nl> - GetTypedIrValue ( * gte - > operand ( 0 ) , { } , base_ptr ) , ir_builder_ ) ; <nl> + GetTypedIrValue ( * gte - > operand ( 0 ) , { } , base_ptr ) , ir_builder_ , module_ ) ; <nl> } <nl> return llvm_ir : : EmitGetTupleElement ( <nl> gte - > shape ( ) , gte - > tuple_index ( ) , / * alignment = * / 1 , <nl> - EmitGetTupleElement ( gte - > operand ( 0 ) , base_ptr ) , ir_builder_ ) ; <nl> + EmitGetTupleElement ( gte - > operand ( 0 ) , base_ptr ) , ir_builder_ , module_ ) ; <nl> } <nl> <nl> llvm : : Value * HloToIrBindings : : GetTypedIrValue ( const HloInstruction & hlo , <nl> const ShapeIndex & shape_index , <nl> llvm : : Value * ir_value ) { <nl> llvm : : Type * pointee_type = llvm_ir : : ShapeToIrType ( <nl> - ShapeUtil : : GetSubshape ( hlo . shape ( ) , shape_index ) , ir_builder_ ) ; <nl> + ShapeUtil : : GetSubshape ( hlo . shape ( ) , shape_index ) , module_ ) ; <nl> llvm : : Type * dest_type = pointee_type - > getPointerTo ( ) ; <nl> <nl> llvm : : Value * typed_ir_value ; <nl> mmm a / tensorflow / compiler / xla / service / gpu / hlo_to_ir_bindings . h <nl> ppp b / tensorflow / compiler / xla / service / gpu / hlo_to_ir_bindings . h <nl> class HloToIrBindings { <nl> public : <nl> HloToIrBindings ( const HloModule & module , <nl> const BufferAssignment * buffer_assignment , <nl> - llvm : : IRBuilder < > * ir_builder , bool is_nested ) <nl> + llvm : : IRBuilder < > * ir_builder , llvm : : Module * llvm_module , <nl> + bool is_nested ) <nl> : buffer_assignment_ ( buffer_assignment ) , <nl> is_nested_ ( is_nested ) , <nl> ir_builder_ ( ir_builder ) , <nl> + module_ ( llvm_module ) , <nl> alias_analysis_ ( module , * buffer_assignment_ , <nl> & ir_builder_ - > getContext ( ) ) { } <nl> <nl> class HloToIrBindings { <nl> const bool is_nested_ ; <nl> <nl> llvm : : IRBuilder < > * ir_builder_ ; <nl> + llvm : : Module * module_ ; <nl> <nl> / / Stores the underlying llvm : : IrArray for each HloInstruction . <nl> / / For an instruction that generates multiple outputs , the root will be a <nl> mmm a / tensorflow / compiler / xla / service / gpu / ir_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / ir_emitter . cc <nl> namespace gpu { <nl> IrEmitter : : IrEmitter ( const HloModuleConfig & hlo_module_config , <nl> IrEmitterContext * ir_emitter_context , bool is_nested ) <nl> : ir_emitter_context_ ( ir_emitter_context ) , <nl> - ir_builder_ ( ir_emitter_context - > llvm_module ( ) - > getContext ( ) ) , <nl> + module_ ( ir_emitter_context - > llvm_module ( ) ) , <nl> + ir_builder_ ( module_ - > getContext ( ) ) , <nl> bindings_ ( ir_emitter_context - > hlo_module ( ) , <nl> - & ir_emitter_context - > buffer_assignment ( ) , & ir_builder_ , <nl> + & ir_emitter_context - > buffer_assignment ( ) , & ir_builder_ , module_ , <nl> is_nested ) , <nl> hlo_module_config_ ( hlo_module_config ) { <nl> ir_builder_ . setFastMathFlags ( llvm_ir : : GetFastMathFlags ( <nl> Status IrEmitter : : DefaultAction ( HloInstruction * hlo ) { <nl> } ; <nl> } <nl> return EmitTargetElementLoop ( <nl> - * hlo , GpuElementalIrEmitter ( hlo_module_config_ , <nl> - ir_emitter_context_ - > llvm_module ( ) , <nl> - & ir_builder_ , GetNestedComputer ( ) ) <nl> + * hlo , GpuElementalIrEmitter ( hlo_module_config_ , module_ , & ir_builder_ , <nl> + GetNestedComputer ( ) ) <nl> . MakeElementGenerator ( hlo , operand_to_generator ) ) ; <nl> } <nl> <nl> Status IrEmitter : : HandleConstant ( HloInstruction * constant , <nl> const Literal & literal ) { <nl> llvm : : Constant * initializer = <nl> - llvm_ir : : ConvertLiteralToIrConstant ( literal , & ir_builder_ ) ; <nl> + llvm_ir : : ConvertLiteralToIrConstant ( literal , module_ ) ; <nl> llvm : : GlobalVariable * global_for_const = new llvm : : GlobalVariable ( <nl> - * ir_emitter_context_ - > llvm_module ( ) , initializer - > getType ( ) , <nl> + * module_ , initializer - > getType ( ) , <nl> / * isConstant = * / true , llvm : : GlobalValue : : PrivateLinkage , initializer , <nl> / * Name = * / " " ) ; <nl> VLOG ( 2 ) < < " HandleConstant : " < < constant - > ToString ( ) < < std : : endl <nl> Status IrEmitter : : HandleGetTupleElement ( HloInstruction * get_tuple_element , <nl> get_tuple_element - > shape ( ) , get_tuple_element - > tuple_index ( ) , <nl> / / TODO ( b / 26344050 ) : tighten the alignment here <nl> / / based on the real element type . <nl> - / * alignment = * / 1 , GetBasePointer ( * operand ) , & ir_builder_ ) ) ; <nl> + / * alignment = * / 1 , GetBasePointer ( * operand ) , & ir_builder_ , module_ ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status IrEmitter : : HandleTuple ( <nl> for ( const HloInstruction * operand : operands ) { <nl> base_ptrs . push_back ( GetBasePointer ( * operand ) ) ; <nl> } <nl> - llvm_ir : : EmitTuple ( GetIrArray ( * tuple ) , base_ptrs , & ir_builder_ ) ; <nl> + llvm_ir : : EmitTuple ( GetIrArray ( * tuple ) , base_ptrs , & ir_builder_ , module_ ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status IrEmitter : : HandleSelect ( HloInstruction * select , HloInstruction * pred , <nl> if ( ShapeUtil : : IsTuple ( select - > shape ( ) ) ) { <nl> llvm_ir : : EmitTupleSelect ( GetIrArray ( * select ) , GetIrArray ( * pred ) , <nl> GetBasePointer ( * on_true ) , <nl> - GetBasePointer ( * on_false ) , & ir_builder_ ) ; <nl> + GetBasePointer ( * on_false ) , & ir_builder_ , module_ ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status IrEmitter : : HandleDot ( HloInstruction * dot , <nl> lhs_array . EmitReadArrayElement ( / * index = * / { } , & ir_builder_ ) ; <nl> llvm : : Value * rhs_value = <nl> rhs_array . EmitReadArrayElement ( / * index = * / { } , & ir_builder_ ) ; <nl> - llvm : : Value * result = ir_builder_ . CreateFMul ( lhs_value , rhs_value ) ; <nl> + llvm : : Value * result ; <nl> + if ( ShapeUtil : : ElementIsComplex ( lhs_shape ) ) { <nl> + auto real = [ & ] ( llvm : : Value * x ) { <nl> + return ir_builder_ . CreateExtractValue ( x , { 0 } ) ; <nl> + } ; <nl> + auto imag = [ & ] ( llvm : : Value * x ) { <nl> + return ir_builder_ . CreateExtractValue ( x , { 1 } ) ; <nl> + } ; <nl> + llvm : : Value * real_result = ir_builder_ . CreateFSub ( <nl> + ir_builder_ . CreateFMul ( real ( lhs_value ) , real ( rhs_value ) ) , <nl> + ir_builder_ . CreateFMul ( imag ( lhs_value ) , imag ( rhs_value ) ) ) ; <nl> + llvm : : Value * imag_result = ir_builder_ . CreateFAdd ( <nl> + ir_builder_ . CreateFMul ( real ( lhs_value ) , imag ( rhs_value ) ) , <nl> + ir_builder_ . CreateFMul ( imag ( lhs_value ) , real ( rhs_value ) ) ) ; <nl> + result = llvm : : ConstantAggregateZero : : get ( lhs_array . GetElementLlvmType ( ) ) ; <nl> + result = ir_builder_ . CreateInsertValue ( result , real_result , { 0 } ) ; <nl> + result = ir_builder_ . CreateInsertValue ( result , imag_result , { 1 } ) ; <nl> + } else { <nl> + result = ir_builder_ . CreateFMul ( lhs_value , rhs_value ) ; <nl> + } <nl> target_array . EmitWriteArrayElement ( / * index = * / { } , result , & ir_builder_ ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> Status IrEmitter : : HandleDot ( HloInstruction * dot , <nl> <nl> / / Initialize the accumulator in the preheader to zero . <nl> new llvm : : StoreInst ( <nl> - llvm : : ConstantFP : : get ( accum_type , 0 . 0 ) , / / The value stored . <nl> - accum_address , / / The address . <nl> + llvm : : Constant : : getNullValue ( lhs_array . GetElementLlvmType ( ) ) , / / init 0 <nl> + accum_address , / / The address . <nl> reduction_loop - > GetPreheaderBasicBlock ( ) <nl> - > getTerminator ( ) ) ; / / The instruction this store is inserted before . <nl> <nl> Status IrEmitter : : HandleDot ( HloInstruction * dot , <nl> lhs_array . EmitReadArrayElement ( lhs_index , & ir_builder_ ) ; <nl> llvm : : Value * rhs_element = <nl> rhs_array . EmitReadArrayElement ( rhs_index , & ir_builder_ ) ; <nl> - llvm : : Value * product = ir_builder_ . CreateFMul ( lhs_element , rhs_element ) ; <nl> llvm : : Value * accum = ir_builder_ . CreateLoad ( accum_address ) ; <nl> - llvm : : Value * updated_accum = ir_builder_ . CreateFAdd ( accum , product ) ; <nl> + llvm : : Value * updated_accum ; <nl> + if ( ShapeUtil : : ElementIsComplex ( lhs_shape ) ) { <nl> + # define REAL ( x ) ir_builder_ . CreateExtractValue ( x , { 0 } ) <nl> + # define IMAG ( x ) ir_builder_ . CreateExtractValue ( x , { 1 } ) <nl> + llvm : : Value * product_real = ir_builder_ . CreateFSub ( <nl> + ir_builder_ . CreateFMul ( REAL ( lhs_element ) , REAL ( rhs_element ) ) , <nl> + ir_builder_ . CreateFMul ( IMAG ( lhs_element ) , IMAG ( rhs_element ) ) ) ; <nl> + llvm : : Value * product_imag = ir_builder_ . CreateFAdd ( <nl> + ir_builder_ . CreateFMul ( REAL ( lhs_element ) , IMAG ( rhs_element ) ) , <nl> + ir_builder_ . CreateFMul ( IMAG ( lhs_element ) , REAL ( rhs_element ) ) ) ; <nl> + updated_accum = ir_builder_ . CreateInsertValue ( <nl> + accum , ir_builder_ . CreateFAdd ( REAL ( accum ) , product_real ) , { 0 } ) ; <nl> + updated_accum = ir_builder_ . CreateInsertValue ( <nl> + updated_accum , ir_builder_ . CreateFAdd ( IMAG ( accum ) , product_imag ) , { 1 } ) ; <nl> + # undef IMAG <nl> + # undef REAL <nl> + } else { <nl> + llvm : : Value * product = ir_builder_ . CreateFMul ( lhs_element , rhs_element ) ; <nl> + updated_accum = ir_builder_ . CreateFAdd ( accum , product ) ; <nl> + } <nl> ir_builder_ . CreateStore ( updated_accum , accum_address ) ; <nl> <nl> / / After the reduction loop exits , store the accumulator into the target <nl> Status IrEmitter : : HandleReduce ( HloInstruction * reduce , HloInstruction * arg , <nl> / / Initialize an accumulator with init_value . <nl> llvm : : AllocaInst * accumulator_addr = <nl> ir_builder_ . CreateAlloca ( llvm_ir : : PrimitiveTypeToIrType ( <nl> - reduce - > shape ( ) . element_type ( ) , & ir_builder_ ) ) ; <nl> + reduce - > shape ( ) . element_type ( ) , module_ ) ) ; <nl> ir_builder_ . CreateStore ( <nl> ir_builder_ . CreateLoad ( GetBasePointer ( * init_value ) ) , <nl> accumulator_addr ) ; <nl> Status IrEmitter : : HandleFusion ( HloInstruction * fusion ) { <nl> for ( HloInstruction * operand : fusion - > operands ( ) ) { <nl> parameter_arrays . push_back ( GetIrArray ( * operand ) ) ; <nl> } <nl> - GpuElementalIrEmitter elemental_emitter ( hlo_module_config_ , <nl> - ir_emitter_context_ - > llvm_module ( ) , <nl> + GpuElementalIrEmitter elemental_emitter ( hlo_module_config_ , module_ , <nl> & ir_builder_ , GetNestedComputer ( ) ) ; <nl> FusedIrEmitter fused_emitter ( parameter_arrays , & elemental_emitter ) ; <nl> TF_RETURN_IF_ERROR ( fusion - > fused_expression_root ( ) - > Accept ( & fused_emitter ) ) ; <nl> Status IrEmitter : : HandleRng ( HloInstruction * random , <nl> / / Emits a single - threaded loop because the loop body generated by the element <nl> / / generator for Rng can ' t be parallelized ( b / 32333178 ) . <nl> return llvm_ir : : LoopEmitter ( <nl> - GpuElementalIrEmitter ( hlo_module_config_ , <nl> - ir_emitter_context_ - > llvm_module ( ) , <nl> - & ir_builder_ , GetNestedComputer ( ) ) <nl> + GpuElementalIrEmitter ( hlo_module_config_ , module_ , & ir_builder_ , <nl> + GetNestedComputer ( ) ) <nl> . MakeElementGenerator ( random , operand_to_generator ) , <nl> GetIrArray ( * random ) , & ir_builder_ ) <nl> . EmitLoop ( IrName ( random ) ) ; <nl> StatusOr < llvm : : Value * > IrEmitter : : ComputeNestedElement ( <nl> tensorflow : : gtl : : ArraySlice < llvm : : Value * > parameter_elements ) { <nl> llvm : : Value * return_buffer = llvm_ir : : EmitAllocaAtFunctionEntry ( <nl> llvm_ir : : PrimitiveTypeToIrType ( <nl> - computation . root_instruction ( ) - > shape ( ) . element_type ( ) , & ir_builder_ ) , <nl> + computation . root_instruction ( ) - > shape ( ) . element_type ( ) , module_ ) , <nl> " return_buffer " , & ir_builder_ ) ; <nl> std : : vector < llvm : : Value * > parameter_buffers ; <nl> for ( llvm : : Value * parameter_element : parameter_elements ) { <nl> mmm a / tensorflow / compiler / xla / service / gpu / ir_emitter . h <nl> ppp b / tensorflow / compiler / xla / service / gpu / ir_emitter . h <nl> class IrEmitter : public DfsHloVisitorWithDefault { <nl> } <nl> <nl> IrEmitterContext * ir_emitter_context_ ; <nl> + llvm : : Module * module_ ; <nl> <nl> / / The following fields track the IR emission state . According to LLVM memory <nl> / / management rules , their memory is owned by the module . <nl> mmm a / tensorflow / compiler / xla / service / gpu / ir_emitter_nested . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / ir_emitter_nested . cc <nl> llvm : : Function * IrEmitterNested : : EmitBasePointersForNestedComputation ( <nl> io_hlos - > push_back ( param ) ; <nl> const Shape & param_shape = param - > shape ( ) ; <nl> argument_types . push_back ( <nl> - llvm_ir : : ShapeToIrType ( param_shape , & ir_builder_ ) - > getPointerTo ( ) ) ; <nl> - int64 param_size = llvm_ir : : ByteSizeOf ( <nl> - param_shape , ir_emitter_context_ - > llvm_module ( ) - > getDataLayout ( ) ) ; <nl> + llvm_ir : : ShapeToIrType ( param_shape , module_ ) - > getPointerTo ( ) ) ; <nl> + int64 param_size = <nl> + llvm_ir : : ByteSizeOf ( param_shape , module_ - > getDataLayout ( ) ) ; <nl> argument_dereferenceable_bytes . push_back ( param_size ) ; <nl> } <nl> { <nl> llvm : : Function * IrEmitterNested : : EmitBasePointersForNestedComputation ( <nl> io_hlos - > push_back ( root ) ; <nl> const Shape & root_shape = root - > shape ( ) ; <nl> argument_types . push_back ( <nl> - llvm_ir : : ShapeToIrType ( root_shape , & ir_builder_ ) - > getPointerTo ( ) ) ; <nl> + llvm_ir : : ShapeToIrType ( root_shape , module_ ) - > getPointerTo ( ) ) ; <nl> int64 root_size = llvm_ir : : ByteSizeOf ( <nl> root_shape , ir_emitter_context_ - > llvm_module ( ) - > getDataLayout ( ) ) ; <nl> argument_dereferenceable_bytes . push_back ( root_size ) ; <nl> mmm a / tensorflow / compiler / xla / service / gpu / ir_emitter_unnested . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / ir_emitter_unnested . cc <nl> Status IrEmitterUnnested : : EmitColumnReduction ( <nl> auto loop_body_emitter = <nl> [ = ] ( const llvm_ir : : IrArray : : Index & tile_index ) - > Status { <nl> / / Emit the loop body that reduces one tile . <nl> - llvm : : Type * element_ir_type = llvm_ir : : PrimitiveTypeToIrType ( <nl> - input_shape . element_type ( ) , & ir_builder_ ) ; <nl> + llvm : : Type * element_ir_type = <nl> + llvm_ir : : PrimitiveTypeToIrType ( input_shape . element_type ( ) , module_ ) ; <nl> llvm : : Value * partial_reduction_result_address = ir_builder_ . CreateAlloca ( <nl> element_ir_type , / * ArraySize = * / nullptr , " partial_reduction_result " ) ; <nl> { <nl> Status IrEmitterUnnested : : EmitRowReduction ( <nl> [ = ] ( const llvm_ir : : IrArray : : Index & tile_index ) - > Status { <nl> / / Emit the loop body that reduces one tile . <nl> llvm : : Type * element_ir_type = llvm_ir : : PrimitiveTypeToIrType ( <nl> - input_shape . element_type ( ) , & ir_builder_ ) ; <nl> + input_shape . element_type ( ) , ir_emitter_context_ - > llvm_module ( ) ) ; <nl> llvm : : Value * partial_reduction_result_address = ir_builder_ . CreateAlloca ( <nl> element_ir_type , / * ArraySize = * / nullptr , " partial_reduction_result " ) ; <nl> { <nl> Status IrEmitterUnnested : : HandleSelectAndScatter ( <nl> / / boolean flag if the value is initialized . The initialized_flag is set <nl> / / false . <nl> llvm : : Value * selected_value_address = llvm_ir : : EmitAllocaAtFunctionEntry ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( operand_element_type , & ir_builder_ ) , <nl> + llvm_ir : : PrimitiveTypeToIrType ( operand_element_type , <nl> + ir_emitter_context_ - > llvm_module ( ) ) , <nl> " selected_value_address " , & ir_builder_ ) ; <nl> llvm : : Value * selected_index_address = <nl> llvm_ir : : EmitAllocaAtFunctionEntryWithCount ( <nl> Status IrEmitterUnnested : : HandleSelectAndScatter ( <nl> llvm : : Value * operand_address = <nl> operand_array . EmitArrayElementAddress ( operand_index , & ir_builder_ ) ; <nl> llvm : : Value * select_return_buffer = llvm_ir : : EmitAllocaAtFunctionEntry ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( PRED , & ir_builder_ ) , <nl> + llvm_ir : : PrimitiveTypeToIrType ( PRED , <nl> + ir_emitter_context_ - > llvm_module ( ) ) , <nl> " select_return_buffer " , & ir_builder_ ) ; <nl> TF_RETURN_IF_ERROR ( EmitCallToNestedComputation ( <nl> * select_and_scatter - > select ( ) , <nl> Status IrEmitterUnnested : : HandleSelectAndScatter ( <nl> / / If the ' select ' function returns false , update the selected value and the <nl> / / index to the currently visiting operand . <nl> llvm : : Value * cond = ir_builder_ . CreateICmpNE ( <nl> - result , llvm : : ConstantInt : : get ( <nl> - llvm_ir : : PrimitiveTypeToIrType ( PRED , & ir_builder_ ) , 0 ) , <nl> + result , <nl> + llvm : : ConstantInt : : get ( llvm_ir : : PrimitiveTypeToIrType ( <nl> + PRED , ir_emitter_context_ - > llvm_module ( ) ) , <nl> + 0 ) , <nl> " boolean_predicate " ) ; <nl> llvm_ir : : LlvmIfData if_select_lhs = <nl> llvm_ir : : EmitIfThenElse ( cond , " if - select - lhs " , & ir_builder_ ) ; <nl> Status IrEmitterUnnested : : EmitTargetElementLoopInThunk ( <nl> tuple_operand_ptrs . push_back ( output_arrays [ i ] . GetBasePointer ( ) ) ; <nl> } <nl> ir_builder_ . SetInsertPoint ( ir_builder_ . GetInsertBlock ( ) - > getTerminator ( ) ) ; <nl> - llvm_ir : : EmitTuple ( GetIrArray ( hlo ) , tuple_operand_ptrs , & ir_builder_ ) ; <nl> + llvm_ir : : EmitTuple ( GetIrArray ( hlo ) , tuple_operand_ptrs , & ir_builder_ , <nl> + module_ ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / service / hlo_constant_folding . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_constant_folding . cc <nl> StatusOr < bool > HloConstantFolding : : Run ( HloModule * module ) { <nl> continue ; <nl> } <nl> / / Skip Constant , Parameter , Reduce operation . <nl> - / / TODO ( b / 35975797 ) : Enable Reduce operation once arbitary computation are <nl> - / / supported by the evaluator . <nl> + / / TODO ( b / 35975797 ) : Enable Reduce operation once arbitrary computation <nl> + / / are supported by the evaluator . <nl> / / TODO ( b / 64407269 ) : Enable Tuple once the timeout issue is resolved . <nl> if ( instruction - > opcode ( ) = = HloOpcode : : kParameter | | <nl> instruction - > opcode ( ) = = HloOpcode : : kConstant | | <nl> mmm a / tensorflow / compiler / xla / service / hlo_evaluator . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_evaluator . cc <nl> namespace xla { <nl> <nl> namespace { <nl> <nl> + template < typename T > <nl> + struct is_complex_t : public std : : false_type { } ; <nl> + <nl> + template < > <nl> + struct is_complex_t < complex64 > : public std : : true_type { } ; <nl> + <nl> template < typename OperandT > <nl> StatusOr < std : : unique_ptr < Literal > > Compare ( const Shape & shape , HloOpcode opcode , <nl> const Literal & lhs_literal , <nl> StatusOr < std : : unique_ptr < Literal > > Compare ( const Shape & shape , HloOpcode opcode , <nl> return std : : move ( result ) ; <nl> } <nl> <nl> + template < > <nl> + StatusOr < std : : unique_ptr < Literal > > Compare < complex64 > ( <nl> + const Shape & shape , HloOpcode opcode , const Literal & lhs_literal , <nl> + const Literal & rhs_literal ) { <nl> + std : : function < bool ( complex64 , complex64 ) > compare_op ; <nl> + switch ( opcode ) { <nl> + case HloOpcode : : kEq : <nl> + compare_op = [ ] ( complex64 lhs_el , complex64 rhs_el ) { <nl> + return lhs_el = = rhs_el ; <nl> + } ; <nl> + break ; <nl> + case HloOpcode : : kNe : <nl> + compare_op = [ ] ( complex64 lhs_el , complex64 rhs_el ) { <nl> + return lhs_el ! = rhs_el ; <nl> + } ; <nl> + break ; <nl> + default : <nl> + LOG ( FATAL ) < < " unhandled HLO opcode for conversion to Comparison : " <nl> + < < HloOpcodeString ( opcode ) ; <nl> + } <nl> + <nl> + auto result = Literal : : CreateFromShape ( shape ) ; <nl> + TF_RETURN_IF_ERROR ( result - > Populate < bool > ( <nl> + [ & ] ( tensorflow : : gtl : : ArraySlice < int64 > multi_index ) { <nl> + return compare_op ( lhs_literal . Get < complex64 > ( multi_index ) , <nl> + rhs_literal . Get < complex64 > ( multi_index ) ) ; <nl> + } ) ) ; <nl> + <nl> + return std : : move ( result ) ; <nl> + } <nl> + <nl> template < typename ReturnT , typename NativeT > <nl> StatusOr < std : : unique_ptr < Literal > > ElementWiseUnaryOpImpl ( <nl> HloInstruction * instruction , <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> Status DefaultAction ( HloInstruction * hlo_instruction ) override { <nl> return Unimplemented ( " unhandled HLO ops for HloEvaluator : % s . " , <nl> HloOpcodeString ( hlo_instruction - > opcode ( ) ) . c_str ( ) ) ; <nl> - } ; <nl> + } <nl> <nl> / / TODO ( b / 35950897 ) : many of the stl functions used in the handlers are not <nl> / / overloaded for every XLA primitive types . <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> <nl> template < <nl> typename NativeT , <nl> - typename std : : enable_if < std : : is_signed < NativeT > : : value > : : type * = nullptr > <nl> + typename std : : enable_if < std : : is_signed < NativeT > : : value | | <nl> + is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> Status HandleAbs ( HloInstruction * abs , HloInstruction * operand ) { <nl> TF_ASSIGN_OR_RETURN ( parent_ - > evaluated_ [ abs ] , <nl> ElementWiseUnaryOp ( abs , [ ] ( NativeT elem_operand ) { <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> return HandleAbs < ReturnT > ( abs , operand ) ; <nl> } <nl> <nl> - Status HandleRound ( HloInstruction * round ) override { <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < ! is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleRound ( HloInstruction * round ) { <nl> TF_ASSIGN_OR_RETURN ( parent_ - > evaluated_ [ round ] , <nl> ElementWiseUnaryOp ( round , [ ] ( ReturnT elem_operand ) { <nl> return std : : round ( elem_operand ) ; <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleRound ( HloInstruction * round ) { <nl> + return InvalidArgument ( " Unsupported type for Round " ) ; <nl> + } <nl> + <nl> + Status HandleRound ( HloInstruction * round ) override { <nl> + return HandleRound < ReturnT > ( round ) ; <nl> + } <nl> + <nl> Status HandleBroadcast ( HloInstruction * broadcast ) override { <nl> parent_ - > evaluated_ [ broadcast ] = <nl> Literal : : CreateFromShape ( broadcast - > shape ( ) ) ; <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> } <nl> return operand_to_broadcast . Get < ReturnT > ( broadcast_indices ) ; <nl> } ) ; <nl> - } ; <nl> + } <nl> <nl> - Status HandleCeil ( HloInstruction * ceil , HloInstruction * operand ) override { <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < ! is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleCeil ( HloInstruction * ceil ) { <nl> TF_ASSIGN_OR_RETURN ( parent_ - > evaluated_ [ ceil ] , <nl> ElementWiseUnaryOp ( ceil , [ ] ( ReturnT elem_operand ) { <nl> return std : : ceil ( elem_operand ) ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> + <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleCeil ( HloInstruction * ceil ) { <nl> + return InvalidArgument ( " Unsupported type for Ceil " ) ; <nl> + } <nl> + <nl> + Status HandleCeil ( HloInstruction * ceil , HloInstruction * operand ) override { <nl> + return HandleCeil < ReturnT > ( ceil ) ; <nl> + } <nl> <nl> Status HandleConvert ( HloInstruction * convert ) override { <nl> const HloInstruction * operand = convert - > operand ( 0 ) ; <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> return std : : exp ( elem_operand ) ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> - Status HandleFloor ( HloInstruction * floor , HloInstruction * operand ) override { <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < ! is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleFloor ( HloInstruction * floor ) { <nl> TF_ASSIGN_OR_RETURN ( parent_ - > evaluated_ [ floor ] , <nl> ElementWiseUnaryOp ( floor , [ ] ( ReturnT elem_operand ) { <nl> return std : : floor ( elem_operand ) ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> + <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleFloor ( HloInstruction * floor ) { <nl> + return InvalidArgument ( " Unsupported type for Floor " ) ; <nl> + } <nl> + <nl> + Status HandleFloor ( HloInstruction * floor , HloInstruction * operand ) override { <nl> + return HandleFloor < ReturnT > ( floor ) ; <nl> + } <nl> <nl> Status HandleLog ( HloInstruction * log , HloInstruction * operand ) override { <nl> TF_ASSIGN_OR_RETURN ( parent_ - > evaluated_ [ log ] , <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> return std : : log ( elem_operand ) ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> - Status HandleNot ( HloInstruction * not_ , HloInstruction * operand ) override { <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < ! is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleNot ( HloInstruction * not_ ) { <nl> TF_ASSIGN_OR_RETURN ( parent_ - > evaluated_ [ not_ ] , <nl> ElementWiseUnaryOp ( not_ , [ ] ( ReturnT elem_operand ) { <nl> return ! elem_operand ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> + <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleNot ( HloInstruction * not_ ) { <nl> + return InvalidArgument ( " Unsupported type for Not " ) ; <nl> + } <nl> + <nl> + Status HandleNot ( HloInstruction * not_ , HloInstruction * operand ) override { <nl> + return HandleNot < ReturnT > ( not_ ) ; <nl> + } <nl> <nl> Status HandleNegate ( HloInstruction * negate , <nl> HloInstruction * operand ) override { <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> return - elem_operand ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> - Status HandleSign ( HloInstruction * sign , HloInstruction * operand ) override { <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < ! is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleSign ( HloInstruction * sign ) { <nl> TF_ASSIGN_OR_RETURN ( parent_ - > evaluated_ [ sign ] , <nl> ElementWiseUnaryOp ( sign , [ ] ( ReturnT elem_operand ) { <nl> return ( ReturnT ( 0 ) < elem_operand ) - <nl> ( elem_operand < ReturnT ( 0 ) ) ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> + <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleSign ( HloInstruction * sign ) { <nl> + TF_ASSIGN_OR_RETURN ( parent_ - > evaluated_ [ sign ] , <nl> + ElementWiseUnaryOp ( sign , [ ] ( ReturnT elem_operand ) { <nl> + auto abs_val = std : : abs ( elem_operand ) ; <nl> + return 0 = = abs_val ? ReturnT ( 0 ) <nl> + : elem_operand / abs_val ; <nl> + } ) ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status HandleSign ( HloInstruction * sign , HloInstruction * operand ) override { <nl> + return HandleSign < ReturnT > ( sign ) ; <nl> + } <nl> <nl> Status HandleTanh ( HloInstruction * tanh , HloInstruction * operand ) override { <nl> TF_ASSIGN_OR_RETURN ( parent_ - > evaluated_ [ tanh ] , <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> return std : : tanh ( elem_operand ) ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> Status HandleMultiply ( HloInstruction * multiply , HloInstruction * lhs , <nl> HloInstruction * rhs ) override { <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> return lhs_elem * rhs_elem ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> Status HandleSubtract ( HloInstruction * subtract , HloInstruction * lhs , <nl> HloInstruction * rhs ) override { <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> return lhs_elem - rhs_elem ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> Status HandleAdd ( HloInstruction * add , HloInstruction * lhs , <nl> HloInstruction * rhs ) override { <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> return lhs_elem + rhs_elem ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> Status HandleDivide ( HloInstruction * divide , HloInstruction * lhs , <nl> HloInstruction * rhs ) override { <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> return lhs_elem / rhs_elem ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> - Status HandleMaximum ( HloInstruction * maximum ) override { <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < ! is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleMaximum ( HloInstruction * maximum ) { <nl> TF_ASSIGN_OR_RETURN ( <nl> parent_ - > evaluated_ [ maximum ] , <nl> ElementWiseBinaryOp ( maximum , [ ] ( ReturnT lhs , ReturnT rhs ) { <nl> return std : : fmax ( lhs , rhs ) ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> - Status HandleMinimum ( HloInstruction * minimum ) override { <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleMaximum ( HloInstruction * maximum ) { <nl> + return InvalidArgument ( " Unsupported type for Maximum " ) ; <nl> + } <nl> + <nl> + Status HandleMaximum ( HloInstruction * maximum ) override { <nl> + return HandleMaximum < ReturnT > ( maximum ) ; <nl> + } <nl> + <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < ! is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleMinimum ( HloInstruction * minimum ) { <nl> TF_ASSIGN_OR_RETURN ( <nl> parent_ - > evaluated_ [ minimum ] , <nl> ElementWiseBinaryOp ( minimum , [ ] ( ReturnT lhs_el , ReturnT rhs_el ) { <nl> return std : : fmin ( lhs_el , rhs_el ) ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> + <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleMinimum ( HloInstruction * minimum ) { <nl> + return InvalidArgument ( " Unsupported type for Minimum " ) ; <nl> + } <nl> + <nl> + Status HandleMinimum ( HloInstruction * minimum ) override { <nl> + return HandleMinimum < ReturnT > ( minimum ) ; <nl> + } <nl> <nl> Status HandlePower ( HloInstruction * power , HloInstruction * lhs , <nl> HloInstruction * rhs ) override { <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> return std : : pow ( lhs_el , rhs_el ) ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> - Status HandleRemainder ( HloInstruction * remainder , HloInstruction * lhs , <nl> - HloInstruction * rhs ) override { <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < ! is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleRemainder ( HloInstruction * remainder ) { <nl> TF_ASSIGN_OR_RETURN ( <nl> parent_ - > evaluated_ [ remainder ] , <nl> ElementWiseBinaryOp ( remainder , [ ] ( ReturnT lhs_el , ReturnT rhs_el ) { <nl> return std : : fmod ( lhs_el , rhs_el ) ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> - Status HandleAnd ( HloInstruction * and_ , HloInstruction * lhs , <nl> - HloInstruction * rhs ) override { <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleRemainder ( HloInstruction * remainder ) { <nl> + return InvalidArgument ( " Unsupported type for Remainder " ) ; <nl> + } <nl> + <nl> + Status HandleRemainder ( HloInstruction * remainder , HloInstruction * lhs , <nl> + HloInstruction * rhs ) override { <nl> + return HandleRemainder < ReturnT > ( remainder ) ; <nl> + } <nl> + <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < ! is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleAnd ( HloInstruction * and_ ) { <nl> TF_ASSIGN_OR_RETURN ( <nl> parent_ - > evaluated_ [ and_ ] , <nl> ElementWiseBinaryOp ( and_ , [ ] ( ReturnT lhs_el , ReturnT rhs_el ) { <nl> return lhs_el & & rhs_el ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> - Status HandleOr ( HloInstruction * or_ , HloInstruction * lhs , <nl> - HloInstruction * rhs ) override { <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleAnd ( HloInstruction * and_ ) { <nl> + return InvalidArgument ( " Unsupported type for And " ) ; <nl> + } <nl> + <nl> + Status HandleAnd ( HloInstruction * and_ , HloInstruction * lhs , <nl> + HloInstruction * rhs ) override { <nl> + return HandleAnd < ReturnT > ( and_ ) ; <nl> + } <nl> + <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < ! is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleOr ( HloInstruction * or_ ) { <nl> TF_ASSIGN_OR_RETURN ( <nl> parent_ - > evaluated_ [ or_ ] , <nl> ElementWiseBinaryOp ( or_ , [ ] ( ReturnT lhs_el , ReturnT rhs_el ) { <nl> return lhs_el | | rhs_el ; <nl> } ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> + <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleOr ( HloInstruction * or_ ) { <nl> + return InvalidArgument ( " Unsupported type for Or " ) ; <nl> + } <nl> + <nl> + Status HandleOr ( HloInstruction * or_ , HloInstruction * lhs , <nl> + HloInstruction * rhs ) override { <nl> + return HandleOr < ReturnT > ( or_ ) ; <nl> + } <nl> <nl> template < typename NativeT , <nl> typename std : : enable_if < <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> return HandleShiftRightLogical < ReturnT > ( shrl , lhs , rhs ) ; <nl> } <nl> <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < ! is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> Status HandleClamp ( HloInstruction * clamp , HloInstruction * min , <nl> - HloInstruction * arg , HloInstruction * max ) override { <nl> + HloInstruction * arg , HloInstruction * max ) { <nl> std : : function < ReturnT ( ReturnT , ReturnT , ReturnT ) > clamp_op = <nl> [ ] ( ReturnT low , ReturnT high , ReturnT value ) { <nl> return std : : fmax ( low , std : : fmin ( value , high ) ) ; <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> TF_ASSIGN_OR_RETURN ( parent_ - > evaluated_ [ clamp ] , <nl> ElementWiseTernaryOp ( clamp , std : : move ( clamp_op ) ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> + <nl> + template < <nl> + typename NativeT , <nl> + typename std : : enable_if < is_complex_t < NativeT > : : value > : : type * = nullptr > <nl> + Status HandleClamp ( HloInstruction * clamp , HloInstruction * min , <nl> + HloInstruction * arg , HloInstruction * max ) { <nl> + return InvalidArgument ( " Unsupported type for Clamp " ) ; <nl> + } <nl> + <nl> + Status HandleClamp ( HloInstruction * clamp , HloInstruction * min , <nl> + HloInstruction * arg , HloInstruction * max ) override { <nl> + return HandleClamp < ReturnT > ( clamp , min , arg , max ) ; <nl> + } <nl> <nl> Status HandleSelect ( HloInstruction * select , HloInstruction * pred , <nl> HloInstruction * on_true , <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> TF_ASSIGN_OR_RETURN ( parent_ - > evaluated_ [ select ] , <nl> ElementWiseTernaryOp ( select , std : : move ( select_op ) ) ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> Status HandleReverse ( HloInstruction * reverse , <nl> HloInstruction * operand ) override { <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> <nl> parent_ - > evaluated_ [ reverse ] = std : : move ( result ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> Status HandleConvolution ( HloInstruction * conv , HloInstruction * lhs , <nl> HloInstruction * rhs , const Window & window ) override { <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> <nl> parent_ - > evaluated_ [ conv ] = std : : move ( result ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> Status HandleDot ( HloInstruction * dot , HloInstruction * lhs , <nl> HloInstruction * rhs ) override { <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> <nl> parent_ - > evaluated_ [ dot ] = std : : move ( result ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> Status HandlePad ( HloInstruction * pad ) override { <nl> CHECK ( ! ShapeUtil : : IsTuple ( pad - > operand ( 0 ) - > shape ( ) ) ) ; <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> <nl> parent_ - > evaluated_ [ pad ] = std : : move ( result ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> Status HandleDynamicSlice ( HloInstruction * dynamic_slice , <nl> HloInstruction * operand , <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> } <nl> <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> Status HandleDynamicUpdateSlice ( HloInstruction * dynamic_update_slice , <nl> HloInstruction * operand , <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> } <nl> <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> Status HandleReduce ( HloInstruction * reduce , HloInstruction * arg , <nl> HloInstruction * init_value , <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> <nl> parent_ - > evaluated_ [ reduce ] = std : : move ( result ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> Status HandleReduceWindow ( HloInstruction * reduce_window , <nl> HloInstruction * operand , const Window & window , <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> <nl> parent_ - > evaluated_ [ reduce_window ] = std : : move ( result ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> Status HandleSlice ( HloInstruction * slice , HloInstruction * operand ) override { <nl> const Shape & shape = slice - > shape ( ) ; <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> TF_RETURN_IF_ERROR ( result - > Populate < ReturnT > ( func ) ) ; <nl> parent_ - > evaluated_ [ slice ] = std : : move ( result ) ; <nl> return Status : : OK ( ) ; <nl> - } ; <nl> + } <nl> <nl> private : <nl> template < typename IndexT > <nl> class HloEvaluator : : TypedVisitor : public DfsHloVisitorWithDefault { <nl> } <nl> <nl> HloEvaluator * parent_ ; <nl> - } ; / / namespace xla <nl> + } ; / / class HloEvaluator : : TypedVisitor <nl> <nl> HloEvaluator : : HloEvaluator ( ) { <nl> typed_visitors_ [ PRED ] = MakeUnique < TypedVisitor < bool > > ( this ) ; <nl> typed_visitors_ [ U8 ] = MakeUnique < TypedVisitor < uint8 > > ( this ) ; <nl> typed_visitors_ [ U16 ] = MakeUnique < FunctionVisitor > ( [ ] ( HloInstruction * ) { <nl> - return Unimplemented ( " unhandled primitive type : U16 . " ) ; <nl> + return Unimplemented ( " HloEvaluator : unhandled primitive type : U16 . " ) ; <nl> } ) ; <nl> typed_visitors_ [ U32 ] = MakeUnique < TypedVisitor < uint32 > > ( this ) ; <nl> typed_visitors_ [ U64 ] = MakeUnique < TypedVisitor < uint64 > > ( this ) ; <nl> typed_visitors_ [ S8 ] = MakeUnique < TypedVisitor < int8 > > ( this ) ; <nl> typed_visitors_ [ S16 ] = MakeUnique < FunctionVisitor > ( [ ] ( HloInstruction * ) { <nl> - return Unimplemented ( " unhandled primitive type : S16 . " ) ; <nl> + return Unimplemented ( " HloEvaluator : unhandled primitive type : S16 . " ) ; <nl> } ) ; <nl> typed_visitors_ [ S32 ] = MakeUnique < TypedVisitor < int32 > > ( this ) ; <nl> typed_visitors_ [ S64 ] = MakeUnique < TypedVisitor < int64 > > ( this ) ; <nl> typed_visitors_ [ F16 ] = MakeUnique < FunctionVisitor > ( [ ] ( HloInstruction * ) { <nl> - return Unimplemented ( " unhandled primitive type : F16 . " ) ; <nl> + return Unimplemented ( " HloEvaluator : unhandled primitive type : F16 . " ) ; <nl> } ) ; <nl> typed_visitors_ [ F32 ] = MakeUnique < TypedVisitor < float > > ( this ) ; <nl> typed_visitors_ [ F64 ] = MakeUnique < TypedVisitor < double > > ( this ) ; <nl> - typed_visitors_ [ C64 ] = MakeUnique < FunctionVisitor > ( [ ] ( HloInstruction * ) { <nl> - return Unimplemented ( " unhandled primitive type : C64 . " ) ; <nl> - } ) ; <nl> + typed_visitors_ [ C64 ] = MakeUnique < TypedVisitor < complex64 > > ( this ) ; <nl> typed_visitors_ [ TUPLE ] = MakeUnique < FunctionVisitor > ( [ ] ( HloInstruction * ) { <nl> - return Unimplemented ( " unhandled primitive type : TUPLE . " ) ; <nl> + return Unimplemented ( " HloEvaluator : unhandled primitive type : TUPLE . " ) ; <nl> } ) ; <nl> typed_visitors_ [ OPAQUE ] = MakeUnique < FunctionVisitor > ( [ ] ( HloInstruction * ) { <nl> - return Unimplemented ( " unhandled primitive type : OPAQUE . " ) ; <nl> + return Unimplemented ( " HloEvaluator : unhandled primitive type : OPAQUE . " ) ; <nl> } ) ; <nl> } <nl> <nl> Status HloEvaluator : : HandleCompare ( HloInstruction * compare , HloOpcode opcode , <nl> evaluated_ [ compare ] , <nl> Compare < double > ( compare - > shape ( ) , opcode , lhs_literal , rhs_literal ) ) ; <nl> } break ; <nl> + case C64 : { <nl> + TF_ASSIGN_OR_RETURN ( evaluated_ [ compare ] , <nl> + Compare < complex64 > ( compare - > shape ( ) , opcode , <nl> + lhs_literal , rhs_literal ) ) ; <nl> + } break ; <nl> default : <nl> LOG ( FATAL ) < < " HandleCompare : unknown primitive type : " <nl> < < PrimitiveType_Name ( lhs - > shape ( ) . element_type ( ) ) ; <nl> mmm a / tensorflow / compiler / xla / service / hlo_graph_dumper . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_graph_dumper . cc <nl> class HloDotDumper { <nl> <nl> bool ShouldShowSubcomputation ( const HloComputation * subcomp ) ; <nl> bool ShouldShowFusionSubcomputation ( const HloInstruction * instr ) ; <nl> + <nl> + / / We omit some nodes from the graph , instead drawing them inlined into the <nl> + / / nodes that use them . <nl> + bool ShouldMergeIntoUsers ( const HloInstruction * instr ) const ; <nl> + <nl> string DumpSubcomputation ( const HloComputation * subcomp , <nl> const HloInstruction * parent_instr ) ; <nl> string DumpComputation ( const HloComputation * comp ) ; <nl> class HloDotDumper { <nl> string GetInstructionNodeLabel ( const HloInstruction * instr ) ; <nl> string GetInstructionNodeMetadata ( const HloInstruction * instr ) ; <nl> string GetInstructionNodeExtraInfo ( const HloInstruction * instr ) ; <nl> - string GetInstructionNodeInlinedConstants ( const HloInstruction * instr ) ; <nl> + string GetInstructionNodeInlinedOperands ( const HloInstruction * instr ) ; <nl> void AddInstructionIncomingEdges ( const HloInstruction * instr ) ; <nl> <nl> / / If instr has just one computation and it ' s trivial ( e . g . " return param0 + <nl> string HloDotDumper : : DumpRootTag ( ) { <nl> to_id , node_body , node_shape , NodeColorAttributes ( color ) ) ; <nl> } <nl> <nl> + bool HloDotDumper : : ShouldMergeIntoUsers ( const HloInstruction * instr ) const { <nl> + / / If a node : <nl> + / / <nl> + / / - is a tuple - shaped parameter , <nl> + / / - is not a parameter to a fusion node , <nl> + / / - has at least kMinUsersToOmit users shown , and <nl> + / / - all of the shown users are get - tuple - elements , <nl> + / / <nl> + / / then we omit it from the graph , merging it with its users . <nl> + / / <nl> + / / This helps us handle the common case where a while loop body has one big <nl> + / / tuple - shaped parameter . <nl> + const int kMinUsersToOmit = 3 ; <nl> + return instr - > opcode ( ) = = HloOpcode : : kParameter & & <nl> + ShapeUtil : : IsTuple ( instr - > shape ( ) ) & & ! instr - > IsFused ( ) & & <nl> + std : : count_if ( instr - > users ( ) . begin ( ) , instr - > users ( ) . end ( ) , <nl> + [ & ] ( const HloInstruction * user ) { <nl> + return filter_ . Show ( user ) ; <nl> + } ) > kMinUsersToOmit & & <nl> + std : : all_of ( instr - > users ( ) . begin ( ) , instr - > users ( ) . end ( ) , <nl> + [ & ] ( const HloInstruction * user ) { <nl> + return ! filter_ . Show ( user ) | | <nl> + user - > opcode ( ) = = HloOpcode : : kGetTupleElement ; <nl> + } ) ; <nl> + } <nl> + <nl> string HloDotDumper : : DumpInstruction ( const HloInstruction * instr ) { <nl> / / We don ' t display constants as separate nodes ; they ' re merged into their <nl> / / users . <nl> if ( instr - > opcode ( ) = = HloOpcode : : kConstant ) { <nl> return " " ; <nl> } <nl> + / / Skip this node if it ' s merged into its users . <nl> + if ( ShouldMergeIntoUsers ( instr ) ) { <nl> + return " " ; <nl> + } <nl> / / Omit the fusion node if its subcomputation is drawn , since the <nl> / / subcomputation will be drawn inline . <nl> if ( instr - > opcode ( ) = = HloOpcode : : kFusion & & <nl> string HloDotDumper : : DumpInstruction ( const HloInstruction * instr ) { <nl> string node_label = GetInstructionNodeLabel ( instr ) ; <nl> string node_metadata = GetInstructionNodeMetadata ( instr ) ; <nl> string extra_info = GetInstructionNodeExtraInfo ( instr ) ; <nl> - string inlined_constants = GetInstructionNodeInlinedConstants ( instr ) ; <nl> + string inlined_constants = GetInstructionNodeInlinedOperands ( instr ) ; <nl> string trivial_subcomputation = GetInstructionTrivialComputationStr ( instr ) ; <nl> AddInstructionIncomingEdges ( instr ) ; <nl> <nl> string HloDotDumper : : DumpInstruction ( const HloInstruction * instr ) { <nl> NodeColorAttributes ( color ) ) ; <nl> } <nl> <nl> - string HloDotDumper : : GetInstructionNodeInlinedConstants ( <nl> + string HloDotDumper : : GetInstructionNodeInlinedOperands ( <nl> const HloInstruction * instr ) { <nl> auto stringify_constant = [ ] ( const HloInstruction * constant ) { <nl> if ( ShapeUtil : : IsEffectiveScalar ( constant - > shape ( ) ) ) { <nl> string HloDotDumper : : GetInstructionNodeInlinedConstants ( <nl> std : : vector < string > lines ; <nl> for ( int64 i = 0 ; i < instr - > operand_count ( ) ; + + i ) { <nl> const HloInstruction * operand = instr - > operand ( i ) ; <nl> - if ( operand - > opcode ( ) ! = HloOpcode : : kConstant ) { <nl> - continue ; <nl> + optional < string > operand_str ; <nl> + if ( operand - > opcode ( ) = = HloOpcode : : kConstant ) { <nl> + operand_str = stringify_constant ( operand ) ; <nl> + } else if ( ShouldMergeIntoUsers ( operand ) ) { <nl> + / / Special case : If the operand is a parameter , use its parameter number <nl> + / / rather than its name , because that ' s generally how people think of the <nl> + / / node . <nl> + if ( operand - > opcode ( ) = = HloOpcode : : kParameter ) { <nl> + operand_str = Printf ( " Parameter % lld " , operand - > parameter_number ( ) ) ; <nl> + } else { <nl> + operand_str = operand - > name ( ) ; <nl> + } <nl> + } <nl> + <nl> + if ( operand_str ) { <nl> + if ( instr - > operand_count ( ) > 1 ) { <nl> + lines . push_back ( Printf ( " < b > operand % lld < / b > = % s " , i , * operand_str ) ) ; <nl> + } else { <nl> + lines . push_back ( Printf ( " < b > operand < / b > = % s " , * operand_str ) ) ; <nl> + } <nl> } <nl> - lines . push_back ( <nl> - Printf ( " < b > operand % lld < / b > = % s " , i , stringify_constant ( operand ) ) ) ; <nl> } <nl> return Join ( lines , " < br / > " ) ; <nl> } <nl> <nl> ColorScheme HloDotDumper : : GetInstructionColor ( const HloInstruction * instr ) { <nl> + const auto kParameterColor = kOrange ; <nl> + <nl> + / / Special case : If this instruction has a parameter merged into it , paint it <nl> + / / the same color as a parameter . <nl> + if ( std : : any_of ( instr - > operands ( ) . begin ( ) , instr - > operands ( ) . end ( ) , <nl> + [ & ] ( const HloInstruction * operand ) { <nl> + return operand - > opcode ( ) = = HloOpcode : : kParameter & & <nl> + ShouldMergeIntoUsers ( operand ) ; <nl> + } ) ) { <nl> + return kParameterColor ; <nl> + } <nl> + <nl> / / Pick different colors or shapes for instructions which are particularly <nl> / / expensive ( eg , dot ) and those which are unusual in some way or unique <nl> / / ( eg , parameter ) . <nl> ColorScheme HloDotDumper : : GetInstructionColor ( const HloInstruction * instr ) { <nl> case HloOpcode : : kAbs : <nl> case HloOpcode : : kRoundNearestAfz : <nl> case HloOpcode : : kAdd : <nl> + case HloOpcode : : kAtan2 : <nl> case HloOpcode : : kCeil : <nl> case HloOpcode : : kClamp : <nl> + case HloOpcode : : kComplex : <nl> case HloOpcode : : kConvert : <nl> case HloOpcode : : kCos : <nl> case HloOpcode : : kDivide : <nl> ColorScheme HloDotDumper : : GetInstructionColor ( const HloInstruction * instr ) { <nl> case HloOpcode : : kFloor : <nl> case HloOpcode : : kGe : <nl> case HloOpcode : : kGt : <nl> + case HloOpcode : : kImag : <nl> case HloOpcode : : kIndex : <nl> case HloOpcode : : kIsFinite : <nl> case HloOpcode : : kLe : <nl> ColorScheme HloDotDumper : : GetInstructionColor ( const HloInstruction * instr ) { <nl> case HloOpcode : : kNe : <nl> case HloOpcode : : kNegate : <nl> case HloOpcode : : kPower : <nl> + case HloOpcode : : kReal : <nl> case HloOpcode : : kRemainder : <nl> case HloOpcode : : kShiftLeft : <nl> case HloOpcode : : kShiftRightArithmetic : <nl> ColorScheme HloDotDumper : : GetInstructionColor ( const HloInstruction * instr ) { <nl> case HloOpcode : : kReducePrecision : <nl> return kRed ; <nl> case HloOpcode : : kParameter : <nl> - return kOrange ; <nl> + return kParameterColor ; <nl> case HloOpcode : : kBatchNormTraining : <nl> case HloOpcode : : kBatchNormInference : <nl> case HloOpcode : : kBatchNormGrad : <nl> void HloDotDumper : : AddInstructionIncomingEdges ( const HloInstruction * instr ) { <nl> ShouldShowFusionSubcomputation ( from ) ) { <nl> from = from - > fused_expression_root ( ) ; <nl> } <nl> - if ( ! filter_ . Show ( from ) | | from - > opcode ( ) = = HloOpcode : : kConstant ) { <nl> + if ( ! filter_ . Show ( from ) | | from - > opcode ( ) = = HloOpcode : : kConstant | | <nl> + ShouldMergeIntoUsers ( from ) ) { <nl> return ; <nl> } <nl> VLOG ( 2 ) < < " Adding edge from " < < from - > name ( ) < < " to " < < to - > name ( ) <nl> mmm a / tensorflow / compiler / xla / service / hlo_instruction . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_instruction . cc <nl> HloInstruction : : CreateGetTupleElement ( const Shape & shape , <nl> case HloOpcode : : kCos : <nl> case HloOpcode : : kExp : <nl> case HloOpcode : : kFloor : <nl> + case HloOpcode : : kImag : <nl> case HloOpcode : : kIsFinite : <nl> case HloOpcode : : kLog : <nl> case HloOpcode : : kNot : <nl> case HloOpcode : : kNegate : <nl> + case HloOpcode : : kReal : <nl> case HloOpcode : : kSign : <nl> case HloOpcode : : kSin : <nl> case HloOpcode : : kSort : <nl> HloInstruction : : CreateGetTupleElement ( const Shape & shape , <nl> / / Only certain opcodes are supported with CreateBinary : opcodes of binary <nl> / / instructions with no auxiliary fields . <nl> switch ( opcode ) { <nl> - case ( HloOpcode : : kAdd ) : <nl> - case ( HloOpcode : : kDivide ) : <nl> - case ( HloOpcode : : kDot ) : <nl> - case ( HloOpcode : : kEq ) : <nl> - case ( HloOpcode : : kGe ) : <nl> - case ( HloOpcode : : kGt ) : <nl> - case ( HloOpcode : : kLe ) : <nl> - case ( HloOpcode : : kLt ) : <nl> - case ( HloOpcode : : kMaximum ) : <nl> - case ( HloOpcode : : kMinimum ) : <nl> - case ( HloOpcode : : kMultiply ) : <nl> - case ( HloOpcode : : kNe ) : <nl> - case ( HloOpcode : : kPower ) : <nl> - case ( HloOpcode : : kRemainder ) : <nl> - case ( HloOpcode : : kSubtract ) : <nl> - case ( HloOpcode : : kAnd ) : <nl> - case ( HloOpcode : : kOr ) : <nl> - case ( HloOpcode : : kShiftLeft ) : <nl> - case ( HloOpcode : : kShiftRightArithmetic ) : <nl> - case ( HloOpcode : : kShiftRightLogical ) : <nl> + case HloOpcode : : kAdd : <nl> + case HloOpcode : : kAtan2 : <nl> + case HloOpcode : : kDivide : <nl> + case HloOpcode : : kComplex : <nl> + case HloOpcode : : kDot : <nl> + case HloOpcode : : kEq : <nl> + case HloOpcode : : kGe : <nl> + case HloOpcode : : kGt : <nl> + case HloOpcode : : kLe : <nl> + case HloOpcode : : kLt : <nl> + case HloOpcode : : kMaximum : <nl> + case HloOpcode : : kMinimum : <nl> + case HloOpcode : : kMultiply : <nl> + case HloOpcode : : kNe : <nl> + case HloOpcode : : kPower : <nl> + case HloOpcode : : kRemainder : <nl> + case HloOpcode : : kSubtract : <nl> + case HloOpcode : : kAnd : <nl> + case HloOpcode : : kOr : <nl> + case HloOpcode : : kShiftLeft : <nl> + case HloOpcode : : kShiftRightArithmetic : <nl> + case HloOpcode : : kShiftRightLogical : <nl> break ; <nl> default : <nl> LOG ( FATAL ) < < " Invalid binary instruction opcode " <nl> std : : unique_ptr < HloInstruction > HloInstruction : : CloneWithNewOperands ( <nl> case HloOpcode : : kCopy : <nl> case HloOpcode : : kCos : <nl> case HloOpcode : : kExp : <nl> + case HloOpcode : : kImag : <nl> case HloOpcode : : kIsFinite : <nl> case HloOpcode : : kFloor : <nl> case HloOpcode : : kLog : <nl> case HloOpcode : : kNot : <nl> case HloOpcode : : kNegate : <nl> + case HloOpcode : : kReal : <nl> case HloOpcode : : kSign : <nl> case HloOpcode : : kSin : <nl> case HloOpcode : : kSort : <nl> std : : unique_ptr < HloInstruction > HloInstruction : : CloneWithNewOperands ( <nl> break ; <nl> / / Binary ops . <nl> case HloOpcode : : kAdd : <nl> + case HloOpcode : : kAtan2 : <nl> + case HloOpcode : : kComplex : <nl> case HloOpcode : : kDivide : <nl> case HloOpcode : : kMultiply : <nl> case HloOpcode : : kSubtract : <nl> bool HloInstruction : : IdenticalSlowPath ( <nl> / / The result of these instructions only depend upon their opcode and <nl> / / operands . <nl> case HloOpcode : : kAbs : <nl> + case HloOpcode : : kAtan2 : <nl> case HloOpcode : : kRoundNearestAfz : <nl> case HloOpcode : : kAdd : <nl> case HloOpcode : : kCeil : <nl> case HloOpcode : : kClamp : <nl> + case HloOpcode : : kComplex : <nl> case HloOpcode : : kCopy : <nl> case HloOpcode : : kCos : <nl> case HloOpcode : : kCrossReplicaSum : <nl> bool HloInstruction : : IdenticalSlowPath ( <nl> case HloOpcode : : kFloor : <nl> case HloOpcode : : kGe : <nl> case HloOpcode : : kGt : <nl> + case HloOpcode : : kImag : <nl> case HloOpcode : : kIsFinite : <nl> case HloOpcode : : kLe : <nl> case HloOpcode : : kLog : <nl> bool HloInstruction : : IdenticalSlowPath ( <nl> case HloOpcode : : kNe : <nl> case HloOpcode : : kNegate : <nl> case HloOpcode : : kPower : <nl> + case HloOpcode : : kReal : <nl> case HloOpcode : : kRemainder : <nl> case HloOpcode : : kSelect : <nl> case HloOpcode : : kShiftLeft : <nl> Status HloInstruction : : Visit ( DfsHloVisitor * visitor ) { <nl> switch ( opcode_ ) { <nl> case HloOpcode : : kAbs : <nl> return visitor - > HandleAbs ( this , operands_ [ 0 ] ) ; <nl> + case HloOpcode : : kAtan2 : <nl> + return visitor - > HandleAtan2 ( this , operands_ [ 0 ] , operands_ [ 1 ] ) ; <nl> case HloOpcode : : kRoundNearestAfz : <nl> return visitor - > HandleRound ( this ) ; <nl> case HloOpcode : : kBatchNormTraining : <nl> Status HloInstruction : : Visit ( DfsHloVisitor * visitor ) { <nl> case HloOpcode : : kLt : <nl> case HloOpcode : : kNe : <nl> return visitor - > HandleCompare ( this , opcode_ , operands_ [ 0 ] , operands_ [ 1 ] ) ; <nl> + case HloOpcode : : kComplex : <nl> + return visitor - > HandleComplex ( this , operands_ [ 0 ] , operands_ [ 1 ] ) ; <nl> case HloOpcode : : kAdd : <nl> return visitor - > HandleAdd ( this , operands_ [ 0 ] , operands_ [ 1 ] ) ; <nl> case HloOpcode : : kDivide : <nl> Status HloInstruction : : Visit ( DfsHloVisitor * visitor ) { <nl> return visitor - > HandleCos ( this , operands_ [ 0 ] ) ; <nl> case HloOpcode : : kSin : <nl> return visitor - > HandleSin ( this , operands_ [ 0 ] ) ; <nl> + case HloOpcode : : kReal : <nl> + return visitor - > HandleReal ( this , operands_ [ 0 ] ) ; <nl> + case HloOpcode : : kImag : <nl> + return visitor - > HandleImag ( this , operands_ [ 0 ] ) ; <nl> case HloOpcode : : kIsFinite : <nl> return visitor - > HandleIsFinite ( this , operands_ [ 0 ] ) ; <nl> case HloOpcode : : kNot : <nl> static Status PostOrderDFS ( HloInstruction * root , DfsHloVisitor * visitor , <nl> / / <nl> / / We need to keep track of both the id and the instruction because <nl> / / instructions can get deleted while they are on the stack , so we <nl> - / / can ' t always use the ( potentiall dead ) instruction object to grab <nl> + / / can ' t always use the ( potentially dead ) instruction object to grab <nl> / / its id . <nl> DFSStack dfs_stack ; <nl> dfs_stack . emplace_back ( root - > unique_id ( ) , root ) ; <nl> bool HloInstruction : : IsElementwiseBinary ( ) const { <nl> / / Binary elementwise operations . If you update this , please update <nl> / / IsElementwise ( ) accordingly . <nl> case HloOpcode : : kAdd : <nl> + case HloOpcode : : kComplex : <nl> case HloOpcode : : kDivide : <nl> case HloOpcode : : kEq : <nl> case HloOpcode : : kGe : <nl> bool HloInstruction : : IsElementwise ( ) const { <nl> <nl> / / Unary elementwise operations . <nl> case HloOpcode : : kAbs : <nl> + case HloOpcode : : kAtan2 : <nl> case HloOpcode : : kRoundNearestAfz : <nl> case HloOpcode : : kCeil : <nl> case HloOpcode : : kConvert : <nl> bool HloInstruction : : IsElementwise ( ) const { <nl> case HloOpcode : : kCos : <nl> case HloOpcode : : kExp : <nl> case HloOpcode : : kFloor : <nl> + case HloOpcode : : kImag : <nl> case HloOpcode : : kIsFinite : <nl> case HloOpcode : : kLog : <nl> case HloOpcode : : kNot : <nl> case HloOpcode : : kNegate : <nl> + case HloOpcode : : kReal : <nl> case HloOpcode : : kReducePrecision : <nl> case HloOpcode : : kSign : <nl> case HloOpcode : : kSin : <nl> bool HloInstruction : : IsElementwise ( ) const { <nl> / / Binary elementwise operations , the same as in IsElementwiseBinary ( ) . <nl> / / If you update this , please update IsElementwiseBinary ( ) accordingly . <nl> case HloOpcode : : kAdd : <nl> + case HloOpcode : : kComplex : <nl> case HloOpcode : : kDivide : <nl> case HloOpcode : : kEq : <nl> case HloOpcode : : kGe : <nl> class HloInstruction : : FusionReusesParamElements { <nl> public : <nl> using UseKind = HloInstruction : : UseKind ; <nl> <nl> - / / We could rather iterate backwards thru fused_instructions_ here , as it is <nl> - / / in reverse postorder , and compute whether each fused instruction reuses <nl> - / / the value of this parameter , which would save stack space but not allow <nl> - / / us to finish early if we find a reuse . <nl> + / / We could rather iterate backwards through fused_instructions_ here , as it <nl> + / / is in reverse postorder , and compute whether each fused instruction reuses <nl> + / / the value of this parameter , which would save stack space but not allow us <nl> + / / to finish early if we find a reuse . <nl> static UseKind Compute ( int64 i , const HloInstruction & hlo ) { <nl> tensorflow : : gtl : : FlatMap < const HloInstruction * , UseKind > memoization_cache ; <nl> return ComputeInternal ( i , hlo , & memoization_cache ) ; <nl> mmm a / tensorflow / compiler / xla / service / hlo_opcode . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_opcode . cc <nl> string HloOpcodeString ( HloOpcode opcode ) { <nl> return " abs " ; <nl> case HloOpcode : : kAdd : <nl> return " add " ; <nl> + case HloOpcode : : kAnd : <nl> + return " and " ; <nl> + case HloOpcode : : kAtan2 : <nl> + return " atan2 " ; <nl> case HloOpcode : : kBatchNormTraining : <nl> return " batch - norm - training " ; <nl> case HloOpcode : : kBatchNormInference : <nl> string HloOpcodeString ( HloOpcode opcode ) { <nl> return " call " ; <nl> case HloOpcode : : kClamp : <nl> return " clamp " ; <nl> + case HloOpcode : : kComplex : <nl> + return " complex " ; <nl> case HloOpcode : : kConcatenate : <nl> return " concatenate " ; <nl> case HloOpcode : : kConstant : <nl> string HloOpcodeString ( HloOpcode opcode ) { <nl> return " get - tuple - element " ; <nl> case HloOpcode : : kGt : <nl> return " greater - than " ; <nl> + case HloOpcode : : kImag : <nl> + return " imag " ; <nl> case HloOpcode : : kIndex : <nl> return " index " ; <nl> case HloOpcode : : kInfeed : <nl> string HloOpcodeString ( HloOpcode opcode ) { <nl> return " less - than - or - equal - to " ; <nl> case HloOpcode : : kLog : <nl> return " log " ; <nl> - case HloOpcode : : kAnd : <nl> - return " and " ; <nl> - case HloOpcode : : kOr : <nl> - return " or " ; <nl> - case HloOpcode : : kNot : <nl> - return " not " ; <nl> case HloOpcode : : kLt : <nl> return " less - than " ; <nl> case HloOpcode : : kMap : <nl> string HloOpcodeString ( HloOpcode opcode ) { <nl> return " not - equal - to " ; <nl> case HloOpcode : : kNegate : <nl> return " negate " ; <nl> + case HloOpcode : : kNot : <nl> + return " not " ; <nl> + case HloOpcode : : kOr : <nl> + return " or " ; <nl> case HloOpcode : : kOutfeed : <nl> return " outfeed " ; <nl> case HloOpcode : : kPad : <nl> string HloOpcodeString ( HloOpcode opcode ) { <nl> return " parameter " ; <nl> case HloOpcode : : kPower : <nl> return " power " ; <nl> + case HloOpcode : : kReal : <nl> + return " real " ; <nl> case HloOpcode : : kRecv : <nl> return " recv " ; <nl> case HloOpcode : : kReduce : <nl> StatusOr < HloOpcode > StringToHloOpcode ( const string & opcode_name ) { <nl> static auto * opcode_map = new tensorflow : : gtl : : FlatMap < string , HloOpcode > ( <nl> { { " abs " , HloOpcode : : kAbs } , <nl> { " add " , HloOpcode : : kAdd } , <nl> + { " and " , HloOpcode : : kAnd } , <nl> { " batch - norm - training " , HloOpcode : : kBatchNormTraining } , <nl> { " batch - norm - inference " , HloOpcode : : kBatchNormInference } , <nl> { " batch - norm - grad " , HloOpcode : : kBatchNormGrad } , <nl> StatusOr < HloOpcode > StringToHloOpcode ( const string & opcode_name ) { <nl> { " is - finite " , HloOpcode : : kIsFinite } , <nl> { " less - than - or - equal - to " , HloOpcode : : kLe } , <nl> { " log " , HloOpcode : : kLog } , <nl> - { " and " , HloOpcode : : kAnd } , <nl> - { " or " , HloOpcode : : kOr } , <nl> - { " not " , HloOpcode : : kNot } , <nl> { " less - than " , HloOpcode : : kLt } , <nl> { " map " , HloOpcode : : kMap } , <nl> { " maximum " , HloOpcode : : kMaximum } , <nl> { " minimum " , HloOpcode : : kMinimum } , <nl> { " multiply " , HloOpcode : : kMultiply } , <nl> + { " not " , HloOpcode : : kNot } , <nl> { " not - equal - to " , HloOpcode : : kNe } , <nl> { " negate " , HloOpcode : : kNegate } , <nl> + { " or " , HloOpcode : : kOr } , <nl> { " outfeed " , HloOpcode : : kOutfeed } , <nl> { " pad " , HloOpcode : : kPad } , <nl> { " parameter " , HloOpcode : : kParameter } , <nl> mmm a / tensorflow / compiler / xla / service / hlo_opcode . h <nl> ppp b / tensorflow / compiler / xla / service / hlo_opcode . h <nl> namespace xla { <nl> enum class HloOpcode { <nl> kAbs , <nl> kAdd , <nl> + kAtan2 , <nl> kBatchNormGrad , <nl> kBatchNormInference , <nl> kBatchNormTraining , <nl> enum class HloOpcode { <nl> kCall , <nl> kCeil , <nl> kClamp , <nl> + kComplex , <nl> kConcatenate , <nl> kConstant , <nl> kConvert , <nl> enum class HloOpcode { <nl> kGe , <nl> kGetTupleElement , <nl> kGt , <nl> + kImag , <nl> kIndex , <nl> kInfeed , <nl> kIsFinite , <nl> enum class HloOpcode { <nl> kPad , <nl> kParameter , <nl> kPower , <nl> + kReal , <nl> kRecv , <nl> kReduce , <nl> kReducePrecision , <nl> mmm a / tensorflow / compiler / xla / service / hlo_pass_pipeline . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_pass_pipeline . cc <nl> StatusOr < bool > HloPassPipeline : : Run ( HloModule * module ) { <nl> for ( auto & invariant_checker : invariant_checkers_ ) { <nl> VLOG ( 1 ) < < " Invariant checker " < < invariant_checker - > name ( ) ; <nl> StatusOr < bool > changed_status = invariant_checker - > Run ( module ) ; <nl> + VLOG ( 1 ) < < " Invariant checker done " < < invariant_checker - > name ( ) ; <nl> if ( ! changed_status . ok ( ) ) { <nl> VLOG ( 2 ) < < " Module failed invariant check : " ; <nl> XLA_VLOG_LINES ( 2 , module - > ToString ( ) ) ; <nl> mmm a / tensorflow / compiler / xla / service / hlo_verifier . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_verifier . cc <nl> class ShapeVerifier : public DfsHloVisitor { <nl> } <nl> <nl> Status HandleConvert ( HloInstruction * convert ) override { <nl> + if ( ShapeUtil : : ElementIsComplex ( convert - > operand ( 0 ) - > shape ( ) ) ) { <nl> + TF_RET_CHECK ( ShapeUtil : : ElementIsComplex ( convert - > shape ( ) ) ) <nl> + < < " Unsupported complex - > real kConvert " ; <nl> + } <nl> return CheckShape ( convert , ShapeInference : : InferConvertShape ( <nl> convert - > operand ( 0 ) - > shape ( ) , <nl> convert - > shape ( ) . element_type ( ) ) ) ; <nl> mmm a / tensorflow / compiler / xla / service / instruction_fusion . cc <nl> ppp b / tensorflow / compiler / xla / service / instruction_fusion . cc <nl> namespace xla { <nl> const HloInstruction & instruction ) { <nl> switch ( instruction . opcode ( ) ) { <nl> / / Cheap instructions . <nl> - case HloOpcode : : kAbs : <nl> case HloOpcode : : kAdd : <nl> case HloOpcode : : kBitcast : <nl> case HloOpcode : : kBroadcast : <nl> case HloOpcode : : kCeil : <nl> case HloOpcode : : kClamp : <nl> + case HloOpcode : : kComplex : <nl> case HloOpcode : : kConcatenate : <nl> case HloOpcode : : kConstant : <nl> case HloOpcode : : kConvert : <nl> case HloOpcode : : kCopy : <nl> - case HloOpcode : : kCos : <nl> case HloOpcode : : kDynamicSlice : <nl> case HloOpcode : : kDynamicUpdateSlice : <nl> case HloOpcode : : kEq : <nl> namespace xla { <nl> case HloOpcode : : kGe : <nl> case HloOpcode : : kGetTupleElement : <nl> case HloOpcode : : kGt : <nl> + case HloOpcode : : kImag : <nl> case HloOpcode : : kInfeed : <nl> case HloOpcode : : kIsFinite : <nl> case HloOpcode : : kLe : <nl> namespace xla { <nl> case HloOpcode : : kNegate : <nl> case HloOpcode : : kOutfeed : <nl> case HloOpcode : : kPad : <nl> + case HloOpcode : : kReal : <nl> case HloOpcode : : kReducePrecision : <nl> case HloOpcode : : kReshape : <nl> case HloOpcode : : kReverse : <nl> namespace xla { <nl> case HloOpcode : : kShiftLeft : <nl> case HloOpcode : : kShiftRightArithmetic : <nl> case HloOpcode : : kShiftRightLogical : <nl> - case HloOpcode : : kSign : <nl> - case HloOpcode : : kSin : <nl> case HloOpcode : : kSlice : <nl> case HloOpcode : : kSubtract : <nl> case HloOpcode : : kTranspose : <nl> case HloOpcode : : kTuple : <nl> return false ; <nl> <nl> + / / Cheap instructions for reals , but expensive for complex . <nl> + case HloOpcode : : kAbs : <nl> + case HloOpcode : : kCos : <nl> + case HloOpcode : : kSign : <nl> + case HloOpcode : : kSin : <nl> + return ShapeUtil : : ElementIsComplex ( instruction . shape ( ) ) ; <nl> + <nl> / / Expensive instructions . <nl> + case HloOpcode : : kAtan2 : <nl> case HloOpcode : : kBatchNormTraining : <nl> case HloOpcode : : kBatchNormInference : <nl> case HloOpcode : : kBatchNormGrad : <nl> mmm a / tensorflow / compiler / xla / service / llvm_ir / fused_ir_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / llvm_ir / fused_ir_emitter . cc <nl> Status FusedIrEmitter : : DefaultAction ( HloInstruction * hlo ) { <nl> Status FusedIrEmitter : : HandleConstant ( HloInstruction * constant , <nl> const Literal & literal ) { <nl> llvm : : Constant * initializer = <nl> - llvm_ir : : ConvertLiteralToIrConstant ( literal , ir_builder_ ) ; <nl> + llvm_ir : : ConvertLiteralToIrConstant ( literal , module_ ) ; <nl> llvm : : GlobalVariable * global = new llvm : : GlobalVariable ( <nl> * ir_builder_ - > GetInsertBlock ( ) - > getModule ( ) , initializer - > getType ( ) , <nl> / * isConstant = * / true , llvm : : GlobalValue : : ExternalLinkage , initializer , <nl> Status FusedIrEmitter : : HandleGetTupleElement ( HloInstruction * get_tuple_element , <nl> / / Emit code to lookup tuple element pointer , and store it in ' gte_values_ ' . <nl> llvm : : Value * tuple_element_ptr = llvm_ir : : EmitGetTupleElement ( <nl> get_tuple_element - > shape ( ) , get_tuple_element - > tuple_index ( ) , <nl> - / * alignment = * / 1 , it - > second , ir_builder_ ) ; <nl> + / * alignment = * / 1 , it - > second , ir_builder_ , module_ ) ; <nl> gte_values_ . insert ( std : : make_pair ( get_tuple_element , tuple_element_ptr ) ) ; <nl> / / Emit code to read base tuple element array ( if non - tuple shaped ) . <nl> if ( ! ShapeUtil : : IsTuple ( get_tuple_element - > shape ( ) ) ) { <nl> Status FusedIrEmitter : : HandleTuple ( <nl> std : : vector < llvm : : Type * > operand_elemental_ir_types ; <nl> for ( HloInstruction * operand : operands ) { <nl> operand_elemental_ir_types . push_back ( llvm_ir : : PrimitiveTypeToIrType ( <nl> - operand - > shape ( ) . element_type ( ) , ir_builder_ ) ) ; <nl> + operand - > shape ( ) . element_type ( ) , module_ ) ) ; <nl> } <nl> generators_ [ tuple ] = <nl> [ = ] ( const IrArray : : Index & index ) - > StatusOr < llvm : : Value * > { <nl> mmm a / tensorflow / compiler / xla / service / llvm_ir / fused_ir_emitter . h <nl> ppp b / tensorflow / compiler / xla / service / llvm_ir / fused_ir_emitter . h <nl> class FusedIrEmitter : public DfsHloVisitorWithDefault { <nl> ElementalIrEmitter * elemental_emitter ) <nl> : parameter_arrays_ ( parameter_arrays ) , <nl> elemental_emitter_ ( elemental_emitter ) , <nl> - ir_builder_ ( elemental_emitter - > ir_builder ( ) ) { } <nl> + ir_builder_ ( elemental_emitter - > ir_builder ( ) ) , <nl> + module_ ( elemental_emitter - > module ( ) ) { } <nl> <nl> Status DefaultAction ( HloInstruction * hlo ) override ; <nl> <nl> class FusedIrEmitter : public DfsHloVisitorWithDefault { <nl> <nl> / / Borrowed <nl> llvm : : IRBuilder < > * ir_builder_ ; <nl> + llvm : : Module * module_ ; <nl> <nl> / / Map from instruction pointers to functions to generate elements of their <nl> / / outputs <nl> mmm a / tensorflow / compiler / xla / service / llvm_ir / ir_array . cc <nl> ppp b / tensorflow / compiler / xla / service / llvm_ir / ir_array . cc <nl> llvm : : Value * IrArray : : EmitArrayElementAddress ( <nl> } <nl> <nl> if ( ! is_implicit_broadcast & & index . LinearValidOnShape ( * shape_ ) ) { <nl> + llvm : : Module * module = <nl> + ir_builder - > GetInsertBlock ( ) - > getParent ( ) - > getParent ( ) ; <nl> return ir_builder - > CreateInBoundsGEP ( <nl> ir_builder - > CreateBitCast ( <nl> - base_ptr_ , PrimitiveTypeToIrType ( shape_ - > element_type ( ) , ir_builder ) <nl> + base_ptr_ , PrimitiveTypeToIrType ( shape_ - > element_type ( ) , module ) <nl> - > getPointerTo ( ) ) , <nl> { index . linear ( ) } , llvm_ir : : AsStringRef ( name ) ) ; <nl> } <nl> void IrArray : : EmitWriteArrayElement ( const Index & index , llvm : : Value * value , <nl> <nl> IrArray IrArray : : CastToShape ( const Shape & new_shape , <nl> llvm : : IRBuilder < > * ir_builder ) const { <nl> - llvm : : Type * new_ir_type = llvm_ir : : ShapeToIrType ( new_shape , ir_builder ) ; <nl> + llvm : : Module * module = ir_builder - > GetInsertBlock ( ) - > getParent ( ) - > getParent ( ) ; <nl> + llvm : : Type * new_ir_type = llvm_ir : : ShapeToIrType ( new_shape , module ) ; <nl> return IrArray ( <nl> ir_builder - > CreatePointerCast ( base_ptr_ , new_ir_type - > getPointerTo ( ) ) , <nl> new_shape ) ; <nl> mmm a / tensorflow / compiler / xla / service / llvm_ir / llvm_util . cc <nl> ppp b / tensorflow / compiler / xla / service / llvm_ir / llvm_util . cc <nl> limitations under the License . <nl> # include < memory > <nl> # include < vector > <nl> <nl> + # include " llvm / IR / DerivedTypes . h " <nl> # include " llvm / IR / MDBuilder . h " <nl> # include " llvm / IR / Operator . h " <nl> # include " llvm / Target / TargetOptions . h " <nl> limitations under the License . <nl> namespace xla { <nl> namespace llvm_ir { <nl> <nl> + namespace { <nl> + <nl> + / / Note , this function is only useful in an insertion context ; in a global <nl> + / / ( e . g . constants ) context it will CHECK fail . <nl> + llvm : : Module * ModuleFromIRBuilder ( llvm : : IRBuilder < > * ir_builder ) { <nl> + auto block = CHECK_NOTNULL ( ir_builder - > GetInsertBlock ( ) ) ; <nl> + auto fn = CHECK_NOTNULL ( block - > getParent ( ) ) ; <nl> + auto module = CHECK_NOTNULL ( fn - > getParent ( ) ) ; <nl> + return module ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> string AsString ( const std : : string & str ) { <nl> return string ( str . data ( ) , str . length ( ) ) ; <nl> } <nl> llvm : : Value * EmitCallToIntrinsic ( <nl> for ( auto type : overloaded_types ) { <nl> types . push_back ( type ) ; <nl> } <nl> - llvm : : Module * module = ir_builder - > GetInsertBlock ( ) - > getParent ( ) - > getParent ( ) ; <nl> + llvm : : Module * module = ModuleFromIRBuilder ( ir_builder ) ; <nl> llvm : : Function * intrinsic = <nl> llvm : : Intrinsic : : getDeclaration ( module , intrinsic_id , types ) ; <nl> std : : vector < llvm : : Value * > operands_vec ; <nl> llvm : : Value * EmitBufferIndexingGEP ( llvm : : Value * array , int64 index , <nl> } <nl> <nl> llvm : : Type * PrimitiveTypeToIrType ( PrimitiveType element_type , <nl> - llvm : : IRBuilder < > * ir_builder ) { <nl> + llvm : : Module * module ) { <nl> switch ( element_type ) { <nl> case PRED : <nl> case S8 : <nl> case U8 : <nl> - return ir_builder - > getInt8Ty ( ) ; <nl> + return llvm : : Type : : getInt8Ty ( module - > getContext ( ) ) ; <nl> case S16 : <nl> case U16 : <nl> - return ir_builder - > getInt16Ty ( ) ; <nl> + return llvm : : Type : : getInt16Ty ( module - > getContext ( ) ) ; <nl> case S32 : <nl> case U32 : <nl> - return ir_builder - > getInt32Ty ( ) ; <nl> + return llvm : : Type : : getInt32Ty ( module - > getContext ( ) ) ; <nl> case S64 : <nl> case U64 : <nl> - return ir_builder - > getInt64Ty ( ) ; <nl> + return llvm : : Type : : getInt64Ty ( module - > getContext ( ) ) ; <nl> case F32 : <nl> - return ir_builder - > getFloatTy ( ) ; <nl> + return llvm : : Type : : getFloatTy ( module - > getContext ( ) ) ; <nl> case F64 : <nl> - return ir_builder - > getDoubleTy ( ) ; <nl> + return llvm : : Type : : getDoubleTy ( module - > getContext ( ) ) ; <nl> + case C64 : { <nl> + auto cplx_t = module - > getTypeByName ( " complex64 " ) ; <nl> + if ( cplx_t = = nullptr ) { <nl> + / / C + + standard dictates the memory layout of std : : complex is contiguous <nl> + / / real followed by imaginary . C + + 11 section 26 . 4 [ complex . numbers ] : <nl> + / / If z is an lvalue expression of type cv std : : complex < T > then the <nl> + / / expression reinterpret_cast < cv T ( & ) [ 2 ] > ( z ) shall be well - formed , <nl> + / / reinterpret_cast < cv T ( & ) [ 2 ] > ( z ) [ 0 ] shall designate the real part of <nl> + / / z , and reinterpret_cast < cv T ( & ) [ 2 ] > ( z ) [ 1 ] shall designate the <nl> + / / imaginary part of z . <nl> + return llvm : : StructType : : create ( <nl> + " complex64 " , llvm : : Type : : getFloatTy ( module - > getContext ( ) ) , <nl> + llvm : : Type : : getFloatTy ( module - > getContext ( ) ) ) ; <nl> + } <nl> + return cplx_t ; <nl> + } <nl> / / A Tuple contains an array of pointers . Use i8 * . <nl> case TUPLE : <nl> / / An Opaque is like a void * , use i8 * . <nl> case OPAQUE : <nl> - return ir_builder - > getInt8PtrTy ( ) ; <nl> + return llvm : : Type : : getInt8PtrTy ( module - > getContext ( ) ) ; <nl> default : <nl> LOG ( FATAL ) < < " unsupported type " < < element_type ; <nl> } <nl> } <nl> <nl> - llvm : : Type * ShapeToIrType ( const Shape & shape , llvm : : IRBuilder < > * ir_builder ) { <nl> - llvm : : Type * result_type = <nl> - PrimitiveTypeToIrType ( shape . element_type ( ) , ir_builder ) ; <nl> + llvm : : Type * ShapeToIrType ( const Shape & shape , llvm : : Module * module ) { <nl> + llvm : : Type * result_type = PrimitiveTypeToIrType ( shape . element_type ( ) , module ) ; <nl> if ( ShapeUtil : : IsTuple ( shape ) ) { <nl> / / A tuple buffer is an array of pointers . <nl> result_type = llvm : : ArrayType : : get ( result_type , shape . tuple_shapes_size ( ) ) ; <nl> namespace { <nl> / / value down to zero ) . <nl> llvm : : Constant * LiteralToConstant ( const Literal & literal , int64 dimension_index , <nl> std : : vector < int64 > * multi_index , <nl> - llvm : : IRBuilder < > * ir_builder ) { <nl> + llvm : : Module * module ) { <nl> const Shape & shape = literal . shape ( ) ; <nl> llvm : : Type * ir_element_type = <nl> - llvm_ir : : PrimitiveTypeToIrType ( shape . element_type ( ) , ir_builder ) ; <nl> + llvm_ir : : PrimitiveTypeToIrType ( shape . element_type ( ) , module ) ; <nl> if ( dimension_index = = - 1 ) { <nl> / / Base case of the recursion . Index into the data field of the protobuf <nl> / / with the multi index . <nl> llvm : : Constant * LiteralToConstant ( const Literal & literal , int64 dimension_index , <nl> value = llvm : : ConstantFP : : get ( ir_element_type , <nl> literal . Get < double > ( * multi_index ) ) ; <nl> break ; <nl> + case C64 : { <nl> + complex64 x = literal . Get < complex64 > ( * multi_index ) ; <nl> + value = llvm : : ConstantStruct : : get ( <nl> + static_cast < llvm : : StructType * > ( ir_element_type ) , <nl> + llvm : : ConstantFP : : get ( llvm_ir : : PrimitiveTypeToIrType ( F32 , module ) , <nl> + x . real ( ) ) , <nl> + llvm : : ConstantFP : : get ( llvm_ir : : PrimitiveTypeToIrType ( F32 , module ) , <nl> + x . imag ( ) ) ) ; <nl> + break ; <nl> + } <nl> default : <nl> LOG ( FATAL ) < < " unsupported type " < < shape . element_type ( ) ; <nl> } <nl> llvm : : Constant * LiteralToConstant ( const Literal & literal , int64 dimension_index , <nl> std : : vector < llvm : : Constant * > elements ; <nl> for ( int64 i = 0 ; i < shape . dimensions ( dimension ) ; + + i ) { <nl> ( * multi_index ) [ dimension ] = i ; <nl> - elements . push_back ( LiteralToConstant ( literal , dimension_index - 1 , <nl> - multi_index , ir_builder ) ) ; <nl> + elements . push_back ( <nl> + LiteralToConstant ( literal , dimension_index - 1 , multi_index , module ) ) ; <nl> } <nl> <nl> llvm : : Type * element_type ; <nl> llvm : : Constant * LiteralToConstant ( const Literal & literal , int64 dimension_index , <nl> } / / namespace <nl> <nl> llvm : : Constant * ConvertLiteralToIrConstant ( const Literal & literal , <nl> - llvm : : IRBuilder < > * ir_builder ) { <nl> + llvm : : Module * module ) { <nl> std : : vector < int64 > multi_index ( ShapeUtil : : Rank ( literal . shape ( ) ) , 0 ) ; <nl> llvm : : Constant * value = LiteralToConstant ( <nl> literal , / * dimension_index = * / ShapeUtil : : Rank ( literal . shape ( ) ) - 1 , <nl> - & multi_index , ir_builder ) ; <nl> + & multi_index , module ) ; <nl> return value ; <nl> } <nl> <nl> llvm : : Value * EmitComparison ( llvm : : CmpInst : : Predicate predicate , <nl> / / comparison_result is i1 , but the NVPTX codegen incorrectly lowers i1 <nl> / / arrays . So we extend it to i8 so that it ' s addressable . <nl> return ir_builder - > CreateZExt ( <nl> - comparison_result , llvm_ir : : PrimitiveTypeToIrType ( PRED , ir_builder ) ) ; <nl> + comparison_result , <nl> + llvm_ir : : PrimitiveTypeToIrType ( PRED , ModuleFromIRBuilder ( ir_builder ) ) ) ; <nl> } <nl> <nl> / / Internal helper that is called from emitted code to log an int64 value with a <nl> mmm a / tensorflow / compiler / xla / service / llvm_ir / llvm_util . h <nl> ppp b / tensorflow / compiler / xla / service / llvm_ir / llvm_util . h <nl> llvm : : Value * EmitBufferIndexingGEP ( llvm : : Value * array , int64 index , <nl> <nl> / / Returns the LLVM type which represents the given XLA primitive type . <nl> llvm : : Type * PrimitiveTypeToIrType ( PrimitiveType element_type , <nl> - llvm : : IRBuilder < > * ir_builder ) ; <nl> + llvm : : Module * module ) ; <nl> <nl> / / Returns the LLVM type which represents the given XLA shape . For example , <nl> / / if " shape " is [ 5 x [ 10 x f32 ] ] , the function returns [ 5 x [ 10 x float ] ] . <nl> - llvm : : Type * ShapeToIrType ( const Shape & shape , llvm : : IRBuilder < > * ir_builder ) ; <nl> + llvm : : Type * ShapeToIrType ( const Shape & shape , llvm : : Module * module ) ; <nl> <nl> / / Returns a value that represents a pointer to a global string constant that <nl> / / encodes the shape as a serialized protobuf . <nl> StatusOr < Shape > DecodeSelfDescribingShapeConstant ( const void * shape_ptr , <nl> / / Converts a given literal to an IR Constant . Literals have known constant <nl> / / values at IR emission time . <nl> llvm : : Constant * ConvertLiteralToIrConstant ( const Literal & literal , <nl> - llvm : : IRBuilder < > * ir_builder ) ; <nl> + llvm : : Module * module ) ; <nl> <nl> / / Inserts an allocate of the requested type at the entry point of the <nl> / / function that the builder is currently building . The insert point <nl> mmm a / tensorflow / compiler / xla / service / llvm_ir / tuple_ops . cc <nl> ppp b / tensorflow / compiler / xla / service / llvm_ir / tuple_ops . cc <nl> namespace xla { <nl> namespace llvm_ir { <nl> <nl> void EmitTupleSelect ( IrArray select , IrArray pred , llvm : : Value * on_true , <nl> - llvm : : Value * on_false , llvm : : IRBuilder < > * ir_builder ) { <nl> + llvm : : Value * on_false , llvm : : IRBuilder < > * ir_builder , <nl> + llvm : : Module * module ) { <nl> CHECK ( ShapeUtil : : IsScalar ( pred . GetShape ( ) ) ) ; <nl> <nl> llvm : : LoadInst * pred_value = <nl> ir_builder - > CreateLoad ( pred . GetBasePointer ( ) , " load_predicate_value " ) ; <nl> llvm : : Value * pred_cond = ir_builder - > CreateICmpNE ( <nl> pred_value , <nl> - llvm : : ConstantInt : : get ( PrimitiveTypeToIrType ( PRED , ir_builder ) , 0 ) , <nl> + llvm : : ConstantInt : : get ( PrimitiveTypeToIrType ( PRED , module ) , 0 ) , <nl> " boolean_predicate " ) ; <nl> <nl> VLOG ( 2 ) < < " HandleSelect for tuple : " ; <nl> void EmitTupleSelect ( IrArray select , IrArray pred , llvm : : Value * on_true , <nl> <nl> void EmitTuple ( IrArray tuple , <nl> tensorflow : : gtl : : ArraySlice < llvm : : Value * > operands , <nl> - llvm : : IRBuilder < > * ir_builder ) { <nl> + llvm : : IRBuilder < > * ir_builder , llvm : : Module * module ) { <nl> for ( size_t i = 0 ; i < operands . size ( ) ; + + i ) { <nl> auto * store = ir_builder - > CreateStore ( <nl> ir_builder - > CreatePointerCast ( operands [ i ] , <nl> - PrimitiveTypeToIrType ( TUPLE , ir_builder ) ) , <nl> + PrimitiveTypeToIrType ( TUPLE , module ) ) , <nl> ir_builder - > CreateInBoundsGEP ( <nl> tuple . GetBasePointer ( ) , <nl> { ir_builder - > getInt64 ( 0 ) , ir_builder - > getInt64 ( i ) } ) ) ; <nl> void EmitTuple ( IrArray tuple , <nl> <nl> llvm : : Value * EmitGetTupleElement ( const Shape & target_shape , int64 index , <nl> int alignment , llvm : : Value * operand , <nl> - llvm : : IRBuilder < > * ir_builder ) { <nl> + llvm : : IRBuilder < > * ir_builder , <nl> + llvm : : Module * module ) { <nl> llvm : : Value * element_ptr = ir_builder - > CreateInBoundsGEP ( <nl> operand , { ir_builder - > getInt64 ( 0 ) , ir_builder - > getInt64 ( index ) } ) ; <nl> llvm : : LoadInst * src_buffer = ir_builder - > CreateLoad ( element_ptr ) ; <nl> llvm : : Value * EmitGetTupleElement ( const Shape & target_shape , int64 index , <nl> } <nl> SetAlignmentMetadataForLoad ( src_buffer , alignment ) ; <nl> <nl> - llvm : : Type * element_type = ShapeToIrType ( target_shape , ir_builder ) ; <nl> + llvm : : Type * element_type = ShapeToIrType ( target_shape , module ) ; <nl> llvm : : Value * ret_val = <nl> ir_builder - > CreateBitCast ( src_buffer , element_type - > getPointerTo ( ) ) ; <nl> return ret_val ; <nl> mmm a / tensorflow / compiler / xla / service / llvm_ir / tuple_ops . h <nl> ppp b / tensorflow / compiler / xla / service / llvm_ir / tuple_ops . h <nl> namespace llvm_ir { <nl> / / tuple_on_true or tuple_on_false : <nl> / / output [ i ] = pred ? tuple_on_true [ i ] : tuple_on_false [ i ] <nl> void EmitTupleSelect ( IrArray select , IrArray pred , llvm : : Value * on_true , <nl> - llvm : : Value * on_false , llvm : : IRBuilder < > * ir_builder ) ; <nl> + llvm : : Value * on_false , llvm : : IRBuilder < > * ir_builder , <nl> + llvm : : Module * module ) ; <nl> <nl> / / A tuple is an array of pointers , one for each operand . Each pointer points to <nl> / / the output buffer of its corresponding operand . <nl> void EmitTuple ( IrArray tuple , <nl> tensorflow : : gtl : : ArraySlice < llvm : : Value * > operands , <nl> - llvm : : IRBuilder < > * ir_builder ) ; <nl> + llvm : : IRBuilder < > * ir_builder , llvm : : Module * module ) ; <nl> <nl> / / A tuple is an array of pointers , one for each operand . Each pointer points to <nl> / / the output buffer of its corresponding operand . A GetTupleElement instruction <nl> void EmitTuple ( IrArray tuple , <nl> / / Returns an llvm value representing a pointer to the tuple element buffer . <nl> llvm : : Value * EmitGetTupleElement ( const Shape & target_shape , int64 index , <nl> int alignment , llvm : : Value * operand , <nl> - llvm : : IRBuilder < > * ir_builder ) ; <nl> + llvm : : IRBuilder < > * ir_builder , <nl> + llvm : : Module * module ) ; <nl> } / / namespace llvm_ir <nl> } / / namespace xla <nl> <nl> mmm a / tensorflow / compiler / xla / service / shape_inference . cc <nl> ppp b / tensorflow / compiler / xla / service / shape_inference . cc <nl> UnaryOperation OpcodeToUnaryOperation ( HloOpcode opcode ) { <nl> return UNOP_EXP ; <nl> case HloOpcode : : kFloor : <nl> return UNOP_FLOOR ; <nl> + case HloOpcode : : kImag : <nl> + return UNOP_IMAG ; <nl> case HloOpcode : : kIsFinite : <nl> return UNOP_IS_FINITE ; <nl> case HloOpcode : : kLog : <nl> UnaryOperation OpcodeToUnaryOperation ( HloOpcode opcode ) { <nl> return UNOP_NOT ; <nl> case HloOpcode : : kNegate : <nl> return UNOP_NEGATE ; <nl> + case HloOpcode : : kReal : <nl> + return UNOP_REAL ; <nl> case HloOpcode : : kRoundNearestAfz : <nl> return UNOP_ROUND_NEAREST_AFZ ; <nl> case HloOpcode : : kSign : <nl> UnaryOperation OpcodeToUnaryOperation ( HloOpcode opcode ) { <nl> / / opcode . <nl> BinaryOperation OpcodeToBinaryOperation ( HloOpcode opcode ) { <nl> switch ( opcode ) { <nl> + case HloOpcode : : kAtan2 : <nl> + return BINOP_ATAN2 ; <nl> + case HloOpcode : : kComplex : <nl> + return BINOP_COMPLEX ; <nl> case HloOpcode : : kDot : <nl> return BINOP_DOT ; <nl> case HloOpcode : : kMultiply : <nl> StatusOr < Shape > InferWindowOutputShape ( const Shape & base_shape , <nl> switch ( operation ) { <nl> case UNOP_FLOOR : <nl> case UNOP_CEIL : <nl> + if ( ! ShapeUtil : : ElementIsFloating ( arg ) ) { <nl> + return InvalidArgument ( <nl> + " expected element type in shape to be floating for floor / ceil " <nl> + " operation ; got % s " , <nl> + PrimitiveType_Name ( arg . element_type ( ) ) . c_str ( ) ) ; <nl> + } <nl> + return arg ; <nl> case UNOP_COS : <nl> case UNOP_SIN : <nl> case UNOP_EXP : <nl> case UNOP_LOG : <nl> case UNOP_TANH : <nl> - if ( ! ShapeUtil : : ElementIsFloating ( arg ) ) { <nl> + if ( ! ShapeUtil : : ElementIsFloating ( arg ) & & <nl> + ! ShapeUtil : : ElementIsComplex ( arg ) ) { <nl> return InvalidArgument ( <nl> - " expected element type in shape to be floating for exp / log / tanh " <nl> - " operation ; got % s " , <nl> + " expected element type in shape to be floating or complex for " <nl> + " sin / cos / exp / log / tanh operation ; got % s " , <nl> PrimitiveType_Name ( arg . element_type ( ) ) . c_str ( ) ) ; <nl> } <nl> return arg ; <nl> + case UNOP_REAL : <nl> + case UNOP_IMAG : <nl> + if ( ! ShapeUtil : : ElementIsComplex ( arg ) ) { <nl> + return InvalidArgument ( <nl> + " expected element type in shape to be complex for real / imag " <nl> + " operation ; got % s " , <nl> + PrimitiveType_Name ( arg . element_type ( ) ) . c_str ( ) ) ; <nl> + } <nl> + return ShapeUtil : : ChangeElementType ( arg , F32 ) ; <nl> case UNOP_ABS : <nl> + if ( ShapeUtil : : ElementIsComplex ( arg ) ) { <nl> + return ShapeUtil : : ChangeElementType ( <nl> + arg , primitive_util : : ComplexComponentType ( arg . element_type ( ) ) ) ; <nl> + } <nl> + return arg ; <nl> case UNOP_NEGATE : <nl> case UNOP_ROUND_NEAREST_AFZ : <nl> case UNOP_SIGN : <nl> ShapeInference : : InferDegenerateDimensionBroadcastShape ( <nl> case BINOP_MIN : <nl> case BINOP_SUB : <nl> case BINOP_ADD : <nl> + case BINOP_ATAN2 : <nl> case BINOP_POW : <nl> case BINOP_DIV : <nl> case BINOP_REM : <nl> ShapeInference : : InferDegenerateDimensionBroadcastShape ( <nl> return InferElementwiseBinaryOpShape ( operation , lhs , rhs , <nl> broadcast_dimensions ) ; <nl> <nl> + case BINOP_COMPLEX : { <nl> + if ( ! ShapeUtil : : ElementIsFloating ( lhs ) ) { <nl> + return InvalidArgument ( <nl> + " expected element type in shape to be floating for complex compose " <nl> + " operation ; got % s " , <nl> + PrimitiveType_Name ( lhs . element_type ( ) ) . c_str ( ) ) ; <nl> + } <nl> + TF_ASSIGN_OR_RETURN ( const Shape & shape , <nl> + InferElementwiseBinaryOpShape ( operation , lhs , rhs , <nl> + broadcast_dimensions ) ) ; <nl> + if ( lhs . element_type ( ) = = F32 ) { <nl> + return ShapeUtil : : ChangeElementType ( shape , C64 ) ; <nl> + } else { <nl> + return Unimplemented ( " complex component type not supported " ) ; <nl> + } <nl> + } <nl> case BINOP_AND : <nl> case BINOP_OR : <nl> if ( lhs . element_type ( ) ! = PRED & & <nl> mmm a / tensorflow / compiler / xla / service / shape_inference_test . cc <nl> ppp b / tensorflow / compiler / xla / service / shape_inference_test . cc <nl> class ShapeInferenceTest : public : : testing : : Test { <nl> / / Some handy scalar shapes . <nl> const Shape s32_ = ShapeUtil : : MakeShape ( S32 , { } ) ; <nl> const Shape f32_ = ShapeUtil : : MakeShape ( F32 , { } ) ; <nl> + const Shape f64_ = ShapeUtil : : MakeShape ( F64 , { } ) ; <nl> const Shape pred_ = ShapeUtil : : MakeShape ( PRED , { } ) ; <nl> <nl> / / Some handy vector and matrix shapes of F32 type . <nl> TEST_F ( ShapeInferenceTest , ClampBadShapes ) { <nl> . ok ( ) ) ; <nl> } <nl> <nl> + TEST_F ( ShapeInferenceTest , Complex ) { <nl> + auto complex_shape = [ & ] ( const Shape & lhs , const Shape & rhs , <nl> + const tensorflow : : gtl : : ArraySlice < int64 > & bcast ) { <nl> + return ShapeInference : : InferBinaryOpShape ( BinaryOperation : : BINOP_COMPLEX , <nl> + lhs , rhs , bcast ) ; <nl> + } ; <nl> + / / Inputs must be FP . <nl> + ASSERT_FALSE ( complex_shape ( s32_ , s32_ , { } ) . ok ( ) ) ; <nl> + ASSERT_FALSE ( complex_shape ( pred_ , pred_ , { } ) . ok ( ) ) ; <nl> + / / Component types must match . <nl> + ASSERT_FALSE ( complex_shape ( f32_ , f64_ , { } ) . ok ( ) ) ; <nl> + / / Only F32 - > C64 supported . <nl> + ASSERT_FALSE ( complex_shape ( f64_ , f64_ , { } ) . ok ( ) ) ; <nl> + / / Validate correct uses . <nl> + Shape c64_32 = ShapeUtil : : MakeShape ( C64 , { 32 } ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( Shape result , complex_shape ( f32_ , f32_ , { } ) ) ; <nl> + ASSERT_TRUE ( ShapeUtil : : Equal ( result , ShapeUtil : : MakeShape ( C64 , { } ) ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( result , complex_shape ( vector_32_ , f32_ , { } ) ) ; <nl> + ASSERT_TRUE ( ShapeUtil : : Equal ( result , c64_32 ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( result , complex_shape ( f32_ , vector_32_ , { } ) ) ; <nl> + ASSERT_TRUE ( ShapeUtil : : Equal ( result , c64_32 ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( result , complex_shape ( vector_32_ , f32_ , { } ) ) ; <nl> + ASSERT_TRUE ( ShapeUtil : : Equal ( result , c64_32 ) ) ; <nl> + <nl> + Shape c64_32_64 = ShapeUtil : : MakeShape ( C64 , { 32 , 64 } ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( result , <nl> + complex_shape ( vector_64_ , matrix_32_64_ , { 1 } ) ) ; <nl> + ASSERT_TRUE ( ShapeUtil : : Equal ( result , c64_32_64 ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( result , <nl> + complex_shape ( matrix_32_64_ , vector_64_ , { 1 } ) ) ; <nl> + ASSERT_TRUE ( ShapeUtil : : Equal ( result , c64_32_64 ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( result , <nl> + complex_shape ( matrix_32_64_ , matrix_32_64_ , { } ) ) ; <nl> + ASSERT_TRUE ( ShapeUtil : : Equal ( result , c64_32_64 ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( result , complex_shape ( matrix_32_64_ , f32_ , { } ) ) ; <nl> + ASSERT_TRUE ( ShapeUtil : : Equal ( result , c64_32_64 ) ) ; <nl> + } <nl> + <nl> TEST_F ( ShapeInferenceTest , VariadicOpTuplify ) { <nl> StatusOr < Shape > result = ShapeInference : : InferVariadicOpShape ( <nl> VariadicOperation : : VAROP_TUPLE , { & s32_ , & f32_ } ) ; <nl> mmm a / tensorflow / compiler / xla / service / tuple_points_to_analysis . h <nl> ppp b / tensorflow / compiler / xla / service / tuple_points_to_analysis . h <nl> namespace xla { <nl> <nl> / / A class describing the source ( s ) of the Buffer ( s ) contained in the output of <nl> / / a particular HLO instruction . The structure of PointsToSet mirrors the <nl> - / / structure of the instruction ' s shape which may be an arbitrary tree ( eg , a <nl> + / / structure of the instruction ' s shape , which may be an arbitrary tree ( eg , a <nl> / / nested tuple ) . Each node in this tree corresponds to a single buffer in the <nl> / / instruction ' s output and contains the set of Buffers which might define <nl> / / the corresponding buffer . <nl> class PointsToSet { <nl> ShapeTree < Elem > tree_ ; <nl> <nl> / / PointsToSet contains references ( const LogicalBuffer * ) to elements within <nl> - / / TuplePointsToAnalysis so disable copying . <nl> + / / TuplePointsToAnalysis , so disable copying . <nl> TF_DISALLOW_COPY_AND_ASSIGN ( PointsToSet ) ; <nl> } ; <nl> <nl> mmm a / tensorflow / compiler / xla / service / user_computation . cc <nl> ppp b / tensorflow / compiler / xla / service / user_computation . cc <nl> HloOpcode UnaryOperationToHloOpcode ( UnaryOperation unop ) { <nl> return HloOpcode : : kExp ; <nl> case UNOP_FLOOR : <nl> return HloOpcode : : kFloor ; <nl> + case UNOP_IMAG : <nl> + return HloOpcode : : kImag ; <nl> case UNOP_IS_FINITE : <nl> return HloOpcode : : kIsFinite ; <nl> case UNOP_LOG : <nl> HloOpcode UnaryOperationToHloOpcode ( UnaryOperation unop ) { <nl> return HloOpcode : : kNot ; <nl> case UNOP_NEGATE : <nl> return HloOpcode : : kNegate ; <nl> + case UNOP_REAL : <nl> + return HloOpcode : : kReal ; <nl> case UNOP_ROUND_NEAREST_AFZ : <nl> return HloOpcode : : kRoundNearestAfz ; <nl> case UNOP_SIGN : <nl> HloOpcode UnaryOperationToHloOpcode ( UnaryOperation unop ) { <nl> <nl> HloOpcode BinaryOperationToHloOpcode ( BinaryOperation binop ) { <nl> switch ( binop ) { <nl> + case BINOP_ATAN2 : <nl> + return HloOpcode : : kAtan2 ; <nl> + case BINOP_COMPLEX : <nl> + return HloOpcode : : kComplex ; <nl> case BINOP_DOT : <nl> return HloOpcode : : kDot ; <nl> case BINOP_MUL : <nl> mmm a / tensorflow / compiler / xla / shape_util . cc <nl> ppp b / tensorflow / compiler / xla / shape_util . cc <nl> StatusOr < Shape > MakeShapeWithLayoutInternal ( <nl> case U16 : <nl> case U32 : <nl> case U64 : <nl> + case C64 : <nl> case TUPLE : <nl> case OPAQUE : <nl> return false ; <nl> mmm a / tensorflow / compiler / xla / tests / client_library_test_base . h <nl> ppp b / tensorflow / compiler / xla / tests / client_library_test_base . h <nl> void ClientLibraryTestBase : : ComputeAndCompareR2 ( <nl> ComputationBuilder * builder , const Array2D < NativeT > & expected , <nl> tensorflow : : gtl : : ArraySlice < GlobalData * > arguments , ErrorSpec error ) { <nl> static_assert ( std : : is_same < NativeT , float > : : value | | <nl> - std : : is_same < NativeT , double > : : value , <nl> - " Floating point type required when specifying an ErrorSpec " ) ; <nl> + std : : is_same < NativeT , double > : : value | | <nl> + std : : is_same < NativeT , complex64 > : : value , <nl> + " Float or complex type required when specifying an ErrorSpec " ) ; <nl> std : : unique_ptr < Literal > expected_literal = <nl> Literal : : CreateR2FromArray2D < NativeT > ( expected ) ; <nl> ClientLibraryTestBase : : ComputeAndCompareLiteral ( builder , * expected_literal , <nl> void ClientLibraryTestBase : : ComputeAndCompareR3 ( <nl> ComputationBuilder * builder , const Array3D < NativeT > & expected , <nl> tensorflow : : gtl : : ArraySlice < GlobalData * > arguments , ErrorSpec error ) { <nl> static_assert ( std : : is_same < NativeT , float > : : value | | <nl> - std : : is_same < NativeT , double > : : value , <nl> - " Floating point type required when specifying an ErrorSpec " ) ; <nl> + std : : is_same < NativeT , double > : : value | | <nl> + std : : is_same < NativeT , complex64 > : : value , <nl> + " Float or complex type required when specifying an ErrorSpec " ) ; <nl> std : : unique_ptr < Literal > expected_literal = <nl> Literal : : CreateR3FromArray3D < NativeT > ( expected ) ; <nl> ClientLibraryTestBase : : ComputeAndCompareLiteral ( builder , * expected_literal , <nl> void ClientLibraryTestBase : : ComputeAndCompareR4 ( <nl> ComputationBuilder * builder , const Array4D < NativeT > & expected , <nl> tensorflow : : gtl : : ArraySlice < GlobalData * > arguments , ErrorSpec error ) { <nl> static_assert ( std : : is_same < NativeT , float > : : value | | <nl> - std : : is_same < NativeT , double > : : value , <nl> - " Floating point type required when specifying an ErrorSpec " ) ; <nl> + std : : is_same < NativeT , double > : : value | | <nl> + std : : is_same < NativeT , complex64 > : : value , <nl> + " Float or complex type required when specifying an ErrorSpec " ) ; <nl> std : : unique_ptr < Literal > expected_literal = <nl> Literal : : CreateR4FromArray4D < NativeT > ( expected ) ; <nl> ClientLibraryTestBase : : ComputeAndCompareLiteral ( builder , * expected_literal , <nl> mmm a / tensorflow / compiler / xla / tests / dot_operation_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / dot_operation_test . cc <nl> XLA_TEST_F ( DotOperationTest , NonsquareMatrixDotF32MajorToMinorTF ) { <nl> TestNonsquareMatrixDot < float > ( kLhsRowMajor , kRhsRowMajor ) ; <nl> } <nl> <nl> - TEST_F ( DotOperationTest , NonsquareMatrixDotF32MajorToMinorTT ) { <nl> + XLA_TEST_F ( DotOperationTest , NonsquareMatrixDotF32MajorToMinorTT ) { <nl> constexpr bool kLhsRowMajor = true ; <nl> constexpr bool kRhsRowMajor = true ; <nl> TestNonsquareMatrixDot < float > ( kLhsRowMajor , kRhsRowMajor ) ; <nl> XLA_TEST_F ( DotOperationTest , NonsquareMatrixDotF64 ) { <nl> TestNonsquareMatrixDot < double > ( ) ; <nl> } <nl> <nl> - TEST_F ( DotOperationTest , ConcurrentMatMul ) { <nl> + XLA_TEST_F ( DotOperationTest , NonsquareMatrixDotC64 ) { <nl> + TestNonsquareMatrixDot < complex64 > ( ) ; <nl> + } <nl> + <nl> + XLA_TEST_F ( DotOperationTest , ConcurrentMatMul ) { <nl> ComputationBuilder builder ( client_ , TestName ( ) ) ; <nl> auto matrix1 = builder . ConstantR2 < float > ( { { 1 . 0 , 2 . 0 } , { 3 . 0 , 4 . 0 } } ) ; <nl> auto matrix2 = builder . ConstantR2 < float > ( { { 5 . 0 , 6 . 0 } , { 7 . 0 , 8 . 0 } } ) ; <nl> mmm a / tensorflow / compiler / xla / tests / local_client_test_base . cc <nl> ppp b / tensorflow / compiler / xla / tests / local_client_test_base . cc <nl> int64 TestAllocator : : deallocation_count ( int device_ordinal ) const { <nl> <nl> / * static * / TestAllocator * LocalClientTestBase : : GetOrCreateAllocator ( <nl> perftools : : gputools : : Platform * platform ) { <nl> + static tensorflow : : mutex mu ( tensorflow : : LINKER_INITIALIZED ) ; <nl> + tensorflow : : mutex_lock lock ( mu ) ; <nl> + <nl> if ( allocator_ = = nullptr ) { <nl> allocator_ = new TestAllocator ( <nl> platform = = nullptr ? PlatformUtil : : GetDefaultPlatform ( ) . ValueOrDie ( ) <nl> mmm a / tensorflow / compiler / xla / tests / local_client_test_base . h <nl> ppp b / tensorflow / compiler / xla / tests / local_client_test_base . h <nl> class LocalClientTestBase : public : : testing : : Test { <nl> return : : testing : : UnitTest : : GetInstance ( ) - > current_test_info ( ) - > name ( ) ; <nl> } <nl> <nl> - / / The allocator must live as long as the service which lives until the end of <nl> - / / the process , so make the allocator static . <nl> + / / The allocator must live as long as the service , which lives until the end <nl> + / / of the process . So make the allocator static . <nl> static TestAllocator * allocator_ ; <nl> <nl> perftools : : gputools : : StreamExecutor * stream_executor_ ; <nl> mmm a / tensorflow / compiler / xla / tests / reduce_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / reduce_test . cc <nl> XLA_TEST_F ( ReduceTest , AddReduce2DScalarToR0 ) { <nl> ComputationBuilder builder ( client_ , TestName ( ) ) ; <nl> auto add = CreateScalarAddComputation ( F32 , & builder ) ; <nl> auto scalar = builder . ConstantR0 < float > ( 42 . 0 ) ; <nl> - auto broacasted = builder . Broadcast ( scalar , { 500 , 500 } ) ; <nl> - builder . Reduce ( broacasted , builder . ConstantR0 < float > ( 0 . 0f ) , add , { 0 , 1 } ) ; <nl> + auto broadcasted = builder . Broadcast ( scalar , { 500 , 500 } ) ; <nl> + builder . Reduce ( broadcasted , builder . ConstantR0 < float > ( 0 . 0f ) , add , { 0 , 1 } ) ; <nl> <nl> float expected = 42 . 0f * static_cast < float > ( 500 * 500 ) ; <nl> ComputeAndCompareR0 < float > ( & builder , expected , { } , ErrorSpec ( 0 . 0001 ) ) ; <nl> XLA_TEST_F ( ReduceTest , MaxReduce2DScalarToR0 ) { <nl> ComputationBuilder builder ( client_ , TestName ( ) ) ; <nl> auto max = CreateScalarMaxComputation ( F32 , & builder ) ; <nl> auto scalar = builder . ConstantR0 < float > ( 42 . 0 ) ; <nl> - auto broacasted = builder . Broadcast ( scalar , { 500 , 500 } ) ; <nl> - builder . Reduce ( broacasted , builder . ConstantR0 < float > ( 0 . 0f ) , max , { 0 , 1 } ) ; <nl> + auto broadcasted = builder . Broadcast ( scalar , { 500 , 500 } ) ; <nl> + builder . Reduce ( broadcasted , builder . ConstantR0 < float > ( 0 . 0f ) , max , { 0 , 1 } ) ; <nl> <nl> float expected = 42 . 0f ; <nl> ComputeAndCompareR0 < float > ( & builder , expected , { } , ErrorSpec ( 0 . 0001 ) ) ; <nl> mmm a / tensorflow / compiler / xla / tests / unary_op_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / unary_op_test . cc <nl> class UnaryOpTest : public ClientLibraryTestBase { <nl> auto arg = builder . ConstantR1 < T > ( { } ) ; <nl> auto abs = builder . Abs ( arg ) ; <nl> <nl> - ComputeAndCompareR1 < T > ( & builder , { } , { } ) ; <nl> + if ( primitive_util : : NativeToPrimitiveType < T > ( ) = = C64 ) { <nl> + ComputeAndCompareR1 < float > ( & builder , { } , { } ) ; <nl> + } else { <nl> + ComputeAndCompareR1 < T > ( & builder , { } , { } ) ; <nl> + } <nl> } <nl> <nl> template < typename T > <nl> int UnaryOpTest : : inf < int > ( ) { <nl> return 2147483647 ; <nl> } <nl> <nl> + template < > <nl> + void UnaryOpTest : : AbsTestHelper < complex64 > ( ) { <nl> + ComputationBuilder builder ( client_ , TestName ( ) ) ; <nl> + auto arg = builder . ConstantR1 < complex64 > ( { { - 2 , 0 } , <nl> + { 0 , 25 } , <nl> + { 0 , 0 } , <nl> + { - 0 . 3f , 0 . 4f } , <nl> + { 0 , inf < float > ( ) } , <nl> + { - inf < float > ( ) , 0 } } ) ; <nl> + auto abs = builder . Abs ( arg ) ; <nl> + <nl> + std : : unique_ptr < Literal > expected = <nl> + Literal : : CreateR1 < float > ( { 2 , 25 , 0 , 0 . 5 , inf < float > ( ) , inf < float > ( ) } ) ; <nl> + ComputeAndCompareLiteral ( & builder , * expected , { } , ErrorSpec ( 1e - 6f ) ) ; <nl> + } <nl> + <nl> + template < > <nl> + void UnaryOpTest : : SignTestHelper < complex64 > ( ) { <nl> + ComputationBuilder builder ( client_ , TestName ( ) ) ; <nl> + auto arg = builder . ConstantR1 < complex64 > ( <nl> + { { - 2 , 0 } , { 0 , 25 } , { 0 , 0 } , { static_cast < float > ( - 0 . 0 ) , 0 } , { - 1 , 1 } } ) ; <nl> + auto sign = builder . Sign ( arg ) ; <nl> + <nl> + std : : unique_ptr < Literal > expected = Literal : : CreateR1 < complex64 > ( <nl> + { { - 1 , 0 } , { 0 , 1 } , { 0 , 0 } , { 0 , 0 } , { - std : : sqrt ( 0 . 5f ) , std : : sqrt ( 0 . 5f ) } } ) ; <nl> + ComputeAndCompareLiteral ( & builder , * expected , { } , ErrorSpec ( 1e - 6f ) ) ; <nl> + } <nl> + <nl> + template < > <nl> + void UnaryOpTest : : SignAbsTestHelper < complex64 > ( ) { <nl> + ComputationBuilder builder ( client_ , TestName ( ) ) ; <nl> + auto arg = <nl> + builder . ConstantR1 < complex64 > ( { { - 2 , 0 } , { 0 , 25 } , { 0 , 0 } , { - 0 . 4 , 0 . 3 } } ) ; <nl> + auto sign = builder . Sign ( arg ) ; <nl> + auto abs = builder . Abs ( arg ) ; <nl> + builder . Sub ( builder . Mul ( sign , builder . ConvertElementType ( abs , C64 ) ) , arg ) ; <nl> + <nl> + std : : unique_ptr < Literal > expected = <nl> + Literal : : CreateR1 < complex64 > ( { 0 , 0 , 0 , 0 } ) ; <nl> + ComputeAndCompareLiteral ( & builder , * expected , { } , ErrorSpec ( 1e - 6f ) ) ; <nl> + } <nl> + <nl> XLA_TEST_F ( UnaryOpTest , AbsTestR1Size0 ) { <nl> AbsSize0TestHelper < int > ( ) ; <nl> AbsSize0TestHelper < float > ( ) ; <nl> + AbsSize0TestHelper < complex64 > ( ) ; <nl> } <nl> <nl> XLA_TEST_F ( UnaryOpTest , AbsTestR1 ) { <nl> AbsTestHelper < int > ( ) ; <nl> AbsTestHelper < float > ( ) ; <nl> + AbsTestHelper < complex64 > ( ) ; <nl> } <nl> <nl> XLA_TEST_F ( UnaryOpTest , AbsTestR0 ) { <nl> XLA_TEST_F ( UnaryOpTest , AbsTestR0 ) { <nl> auto absf = builder . Abs ( argf ) ; <nl> auto argf0 = builder . ConstantR0 < float > ( - 0 . 0f ) ; <nl> auto absf0 = builder . Abs ( argf0 ) ; <nl> - builder . Add ( absf0 , builder . Add ( absf , builder . ConvertElementType ( <nl> - absi , PrimitiveType : : F32 ) ) ) ; <nl> + auto argc = builder . ConstantR0 < complex64 > ( { - 0 . 3f , 0 . 4f } ) ; <nl> + auto absc = builder . Abs ( argc ) ; <nl> + builder . Add ( builder . Add ( absc , absf0 ) , <nl> + builder . Add ( absf , builder . ConvertElementType ( absi , F32 ) ) ) ; <nl> <nl> - ComputeAndCompareR0 < float > ( & builder , 8 . 0f , { } ) ; <nl> + ComputeAndCompareR0 < float > ( & builder , 8 . 5f , { } ) ; <nl> } <nl> <nl> XLA_TEST_F ( UnaryOpTest , SignTestR0 ) { <nl> ComputationBuilder builder ( client_ , TestName ( ) ) ; <nl> auto argi = builder . ConstantR0 < int > ( - 5 ) ; <nl> - auto absi = builder . Sign ( argi ) ; <nl> + auto sgni = builder . Sign ( argi ) ; / / - 1 <nl> auto argf = builder . ConstantR0 < float > ( - 4 . 0f ) ; <nl> - auto absf = builder . Sign ( argf ) ; <nl> + auto sgnf = builder . Sign ( argf ) ; / / - 1 <nl> auto argf0 = builder . ConstantR0 < float > ( - 0 . 0f ) ; <nl> - auto absf0 = builder . Sign ( argf0 ) ; <nl> - builder . Add ( absf0 , builder . Add ( absf , builder . ConvertElementType ( <nl> - absi , PrimitiveType : : F32 ) ) ) ; <nl> - <nl> - ComputeAndCompareR0 < float > ( & builder , - 2 . 0f , { } ) ; <nl> + auto sgnf0 = builder . Sign ( argf0 ) ; / / 0 <nl> + auto argc = builder . ConstantR0 < complex64 > ( { - . 3 , . 4 } ) ; <nl> + auto sgnc = builder . Sign ( argc ) ; / / ( - . 6 , . 8 ) <nl> + builder . Add ( sgnc , builder . ConvertElementType ( <nl> + builder . Add ( builder . Add ( sgnf0 , sgnf ) , <nl> + builder . ConvertElementType ( sgni , F32 ) ) , <nl> + C64 ) ) ; <nl> + <nl> + std : : unique_ptr < Literal > expected = <nl> + Literal : : CreateR0 < complex64 > ( { - 2 . 6f , 0 . 8f } ) ; <nl> + ComputeAndCompareLiteral ( & builder , * expected , { } , ErrorSpec ( 1e - 6f ) ) ; <nl> } <nl> <nl> XLA_TEST_F ( UnaryOpTest , SignTestR1 ) { <nl> SignTestHelper < int > ( ) ; <nl> SignTestHelper < float > ( ) ; <nl> + SignTestHelper < complex64 > ( ) ; <nl> } <nl> <nl> XLA_TEST_F ( UnaryOpTest , SignAbsTestR1 ) { <nl> SignAbsTestHelper < int > ( ) ; <nl> SignAbsTestHelper < float > ( ) ; <nl> + SignAbsTestHelper < complex64 > ( ) ; <nl> } <nl> <nl> XLA_TEST_F ( UnaryOpTest , UnsignedAbsTestR1 ) { <nl> mmm a / tensorflow / compiler / xla / tools / parser / hlo_parser . cc <nl> ppp b / tensorflow / compiler / xla / tools / parser / hlo_parser . cc <nl> bool HloParser : : ParseInstruction ( HloComputation : : Builder * builder , <nl> case HloOpcode : : kCopy : <nl> case HloOpcode : : kCos : <nl> case HloOpcode : : kExp : <nl> + case HloOpcode : : kImag : <nl> case HloOpcode : : kIsFinite : <nl> case HloOpcode : : kFloor : <nl> case HloOpcode : : kLog : <nl> case HloOpcode : : kNot : <nl> case HloOpcode : : kNegate : <nl> + case HloOpcode : : kReal : <nl> case HloOpcode : : kSign : <nl> case HloOpcode : : kSin : <nl> case HloOpcode : : kSort : <nl> bool HloParser : : ParseInstruction ( HloComputation : : Builder * builder , <nl> case HloOpcode : : kDivide : <nl> case HloOpcode : : kMultiply : <nl> case HloOpcode : : kSubtract : <nl> + case HloOpcode : : kAtan2 : <nl> + case HloOpcode : : kComplex : <nl> case HloOpcode : : kEq : <nl> case HloOpcode : : kGe : <nl> case HloOpcode : : kGt : <nl> mmm a / tensorflow / compiler / xla / types . h <nl> ppp b / tensorflow / compiler / xla / types . h <nl> limitations under the License . <nl> # ifndef TENSORFLOW_COMPILER_XLA_TYPES_H_ <nl> # define TENSORFLOW_COMPILER_XLA_TYPES_H_ <nl> <nl> + # include < complex > <nl> + <nl> # include " third_party / eigen3 / Eigen / Core " <nl> # include " tensorflow / core / platform / types . h " <nl> <nl> using : : tensorflow : : uint16 ; <nl> using : : tensorflow : : uint32 ; <nl> using : : tensorflow : : uint64 ; <nl> <nl> - typedef std : : complex < float > complex64 ; <nl> + using complex64 = std : : complex < float > ; <nl> <nl> using : : Eigen : : half ; <nl> <nl> mmm a / tensorflow / compiler / xla / xla_data . proto <nl> ppp b / tensorflow / compiler / xla / xla_data . proto <nl> enum PrimitiveType { <nl> F64 = 12 ; <nl> <nl> / / Complex values of fixed width . <nl> - C64 = 15 ; <nl> + C64 = 15 ; / / Paired F32 ( real , imag ) , as in std : : complex < float > . <nl> <nl> / / A tuple is a polymorphic sequence ; e . g . a shape that holds different <nl> / / sub - shapes . They are used for things like returning multiple values from a <nl> enum UnaryOperation { <nl> / / Elementwise , rounds x to nearest integral value , rounding half - way cases <nl> / / away from zero . <nl> UNOP_ROUND_NEAREST_AFZ = 14 ; <nl> + <nl> + / / Elementwise , extract real component of complex x . <nl> + UNOP_REAL = 15 ; <nl> + <nl> + / / Elementwise , extract real component of complex x . <nl> + UNOP_IMAG = 16 ; <nl> } <nl> <nl> message UnaryOpRequest { <nl> enum BinaryOperation { <nl> BINOP_SHIFT_LEFT = 20 ; <nl> BINOP_SHIFT_RIGHT_ARITHMETIC = 21 ; <nl> BINOP_SHIFT_RIGHT_LOGICAL = 22 ; <nl> + <nl> + / / Complex from real , imag . <nl> + BINOP_COMPLEX = 23 ; <nl> + <nl> + / / Computes the 4 - quadrant arctangent of the y , x input arguments . <nl> + BINOP_ATAN2 = 24 ; <nl> } <nl> <nl> message BinaryOpRequest { <nl> mmm a / tensorflow / contrib / BUILD <nl> ppp b / tensorflow / contrib / BUILD <nl> py_library ( <nl> " / / tensorflow / contrib / tfprof " , <nl> " / / tensorflow / contrib / timeseries " , <nl> " / / tensorflow / contrib / tpu " , <nl> + " / / tensorflow / contrib / tpu : tpu_py " , <nl> " / / tensorflow / contrib / training : training_py " , <nl> " / / tensorflow / contrib / util : util_py " , <nl> + " / / tensorflow / python : util " , <nl> ] + if_mpi ( [ " / / tensorflow / contrib / mpi_collectives : mpi_ops_py " ] ) , <nl> ) <nl> <nl> mmm a / tensorflow / contrib / __init__ . py <nl> ppp b / tensorflow / contrib / __init__ . py <nl> <nl> from tensorflow . contrib import tpu <nl> from tensorflow . contrib import training <nl> from tensorflow . contrib import util <nl> + from tensorflow . contrib . eager . python import tfe as eager <nl> from tensorflow . contrib . ndlstm import python as ndlstm <nl> from tensorflow . contrib . remote_fused_graph import pylib as remote_fused_graph <nl> from tensorflow . contrib . specs import python as specs <nl> mmm a / tensorflow / contrib / all_reduce / BUILD <nl> ppp b / tensorflow / contrib / all_reduce / BUILD <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> visibility = [ " / / visibility : public " ] , <nl> deps = [ <nl> - " / / tensorflow / contrib / nccl : nccl_ops " , <nl> + " / / tensorflow / contrib / nccl : nccl_py " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / contrib / bayesflow / BUILD <nl> ppp b / tensorflow / contrib / bayesflow / BUILD <nl> py_library ( <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : check_ops " , <nl> " / / tensorflow / python : control_flow_ops " , <nl> - " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : functional_ops " , <nl> + " / / tensorflow / python : gradients " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : nn " , <nl> " / / tensorflow / python : nn_ops " , <nl> py_library ( <nl> " / / tensorflow / python : training " , <nl> " / / tensorflow / python : util " , <nl> " / / tensorflow / python : variable_scope " , <nl> - " / / tensorflow / python : variables " , <nl> " / / tensorflow / python / ops / distributions " , <nl> " / / third_party / py / numpy " , <nl> " @ six_archive / / : six " , <nl> mmm a / tensorflow / contrib / boosted_trees / estimator_batch / BUILD <nl> ppp b / tensorflow / contrib / boosted_trees / estimator_batch / BUILD <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " : model " , <nl> + " / / tensorflow / contrib / boosted_trees : losses " , <nl> " / / tensorflow / contrib / learn " , <nl> + " / / tensorflow / python : math_ops " , <nl> ] , <nl> ) <nl> mmm a / tensorflow / contrib / cmake / tf_python . cmake <nl> ppp b / tensorflow / contrib / cmake / tf_python . cmake <nl> add_python_module ( " tensorflow / contrib / distributions / python " ) <nl> add_python_module ( " tensorflow / contrib / distributions / python / kernel_tests " ) <nl> add_python_module ( " tensorflow / contrib / distributions / python / ops " ) <nl> add_python_module ( " tensorflow / contrib / distributions / python / ops / bijectors " ) <nl> + add_python_module ( " tensorflow / contrib / eager " ) <nl> + add_python_module ( " tensorflow / contrib / eager / python " ) <nl> add_python_module ( " tensorflow / contrib / estimator " ) <nl> add_python_module ( " tensorflow / contrib / estimator / python " ) <nl> add_python_module ( " tensorflow / contrib / estimator / python / estimator " ) <nl> mmm a / tensorflow / contrib / data / __init__ . py <nl> ppp b / tensorflow / contrib / data / __init__ . py <nl> <nl> from tensorflow . contrib . data . python . ops . enumerate_ops import enumerate_dataset <nl> from tensorflow . contrib . data . python . ops . error_ops import ignore_errors <nl> from tensorflow . contrib . data . python . ops . grouping import group_by_window <nl> + from tensorflow . contrib . data . python . ops . interleave_ops import sloppy_interleave <nl> from tensorflow . contrib . data . python . ops . iterator_ops import make_saveable_from_iterator <nl> from tensorflow . contrib . data . python . ops . readers import FixedLengthRecordDataset <nl> from tensorflow . contrib . data . python . ops . readers import read_batch_features <nl> <nl> from tensorflow . contrib . data . python . ops . readers import TextLineDataset <nl> from tensorflow . contrib . data . python . ops . readers import TFRecordDataset <nl> from tensorflow . contrib . data . python . ops . resampling import rejection_resample <nl> - from tensorflow . contrib . data . python . ops . sloppy_ops import sloppy_interleave <nl> from tensorflow . python . data . ops . iterator_ops import Iterator <nl> # pylint : enable = unused - import <nl> <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / BUILD <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / BUILD <nl> py_test ( <nl> ] , <nl> ) <nl> <nl> + py_test ( <nl> + name = " interleave_dataset_op_test " , <nl> + size = " small " , <nl> + srcs = [ " interleave_dataset_op_test . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + tags = [ <nl> + " manual " , # b / 67958761 <nl> + ] , <nl> + deps = [ <nl> + " / / tensorflow / contrib / data / python / ops : dataset_ops " , <nl> + " / / tensorflow / contrib / data / python / ops : transformation_ops " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : script_ops " , <nl> + " / / tensorflow / python : training " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> py_test ( <nl> name = " iterator_ops_cluster_test " , <nl> size = " small " , <nl> py_test ( <nl> srcs = [ " reader_dataset_ops_test . py " ] , <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> + " / / tensorflow / contrib / data / python / ops : iterator_ops " , <nl> " / / tensorflow / contrib / data / python / ops : readers " , <nl> " / / tensorflow / core : protos_all_py " , <nl> " / / tensorflow / python : array_ops " , <nl> py_test ( <nl> " / / tensorflow / python : lib " , <nl> " / / tensorflow / python : parsing_ops " , <nl> " / / tensorflow / python : tensor_shape " , <nl> + " / / tensorflow / python : training " , <nl> " / / tensorflow / python : util " , <nl> " / / tensorflow / python / data / ops : iterator_ops " , <nl> ] , <nl> py_test ( <nl> " / / tensorflow / contrib / data / python / ops : transformation_ops " , <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : errors " , <nl> - " / / tensorflow / python : framework_ops " , <nl> " / / tensorflow / python : string_ops " , <nl> - " / / tensorflow / python : training " , <nl> " / / tensorflow / python : util " , <nl> - " / / tensorflow / python : variables " , <nl> " / / third_party / py / numpy " , <nl> ] , <nl> ) <nl> py_test ( <nl> ] , <nl> ) <nl> <nl> - py_test ( <nl> - name = " sloppy_transformation_dataset_op_test " , <nl> - size = " small " , <nl> - srcs = [ " sloppy_transformation_dataset_op_test . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - tags = [ <nl> - " manual " , # b / 67958761 <nl> - ] , <nl> - deps = [ <nl> - " / / tensorflow / contrib / data / python / ops : dataset_ops " , <nl> - " / / tensorflow / contrib / data / python / ops : transformation_ops " , <nl> - " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : client " , <nl> - " / / tensorflow / python : client_testlib " , <nl> - " / / tensorflow / python : dtypes " , <nl> - " / / tensorflow / python : errors " , <nl> - " / / tensorflow / python : math_ops " , <nl> - " / / tensorflow / python : script_ops " , <nl> - " / / tensorflow / python : training " , <nl> - " / / third_party / py / numpy " , <nl> - ] , <nl> - ) <nl> - <nl> py_test ( <nl> name = " sql_dataset_op_test " , <nl> size = " small " , <nl> similarity index 84 % <nl> rename from tensorflow / contrib / data / python / kernel_tests / sloppy_transformation_dataset_op_test . py <nl> rename to tensorflow / contrib / data / python / kernel_tests / interleave_dataset_op_test . py <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / sloppy_transformation_dataset_op_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / interleave_dataset_op_test . py <nl> <nl> from six . moves import zip_longest <nl> <nl> from tensorflow . contrib . data . python . ops import dataset_ops <nl> - from tensorflow . contrib . data . python . ops import sloppy_ops <nl> + from tensorflow . contrib . data . python . ops import interleave_ops <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import errors <nl> from tensorflow . python . ops import array_ops <nl> <nl> from tensorflow . python . platform import test <nl> <nl> <nl> - class SloppyInterleaveDatasetTest ( test . TestCase ) : <nl> + class ParallelInterleaveDatasetTest ( test . TestCase ) : <nl> <nl> def setUp ( self ) : <nl> self . input_values = array_ops . placeholder ( dtypes . int64 , shape = [ None ] ) <nl> self . cycle_length = array_ops . placeholder ( dtypes . int64 , shape = [ ] ) <nl> self . block_length = array_ops . placeholder ( dtypes . int64 , shape = [ ] ) <nl> + self . sloppy = array_ops . placeholder ( dtypes . bool , shape = [ ] ) <nl> <nl> self . repeat_count = 2 <nl> <nl> def interleave_fn ( x ) : <nl> <nl> self . dataset = ( dataset_ops . Dataset . from_tensor_slices ( self . input_values ) <nl> . repeat ( self . repeat_count ) . apply ( <nl> - sloppy_ops . sloppy_interleave ( <nl> + interleave_ops . parallel_interleave ( <nl> interleave_fn , self . cycle_length , <nl> - self . block_length ) ) ) <nl> + self . block_length , self . sloppy ) ) ) <nl> self . iterator = self . dataset . make_initializable_iterator ( ) <nl> self . init_op = self . iterator . initializer <nl> self . next_element = self . iterator . get_next ( ) <nl> def _allow_all_map_threads ( self ) : <nl> for i in range ( 4 , 7 ) : <nl> self . write_coordination_events [ i ] . set ( ) <nl> <nl> - def testSingleThreaded ( self ) : <nl> + def _testSingleThreaded ( self , sloppy = False ) : <nl> # cycle_length = 1 , block_length = 1 acts like ` Dataset . interleave ( ) ` and <nl> # ` Dataset . flat_map ( ) ` and is single - threaded . No synchronization required . <nl> with self . test_session ( ) as sess : <nl> def testSingleThreaded ( self ) : <nl> feed_dict = { <nl> self . input_values : [ 4 , 5 , 6 ] , <nl> self . cycle_length : 1 , <nl> - self . block_length : 1 <nl> + self . block_length : 1 , <nl> + self . sloppy : sloppy <nl> } ) <nl> <nl> for expected_element in self . _interleave ( <nl> def testSingleThreaded ( self ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( self . next_element ) <nl> <nl> - def testTwoThreadsNoContention ( self ) : <nl> + def testSingleThreaded ( self ) : <nl> + self . _testSingleThreaded ( ) <nl> + <nl> + def testSingleThreadedSloppy ( self ) : <nl> + self . _testSingleThreaded ( sloppy = True ) <nl> + <nl> + def _testTwoThreadsNoContention ( self , sloppy = False ) : <nl> # num_threads > 1 . <nl> # Explicit coordination should result in ` Dataset . interleave ( ) ` behavior <nl> with self . test_session ( ) as sess : <nl> def testTwoThreadsNoContention ( self ) : <nl> feed_dict = { <nl> self . input_values : [ 4 , 5 , 6 ] , <nl> self . cycle_length : 2 , <nl> - self . block_length : 1 <nl> + self . block_length : 1 , <nl> + self . sloppy : sloppy <nl> } ) <nl> for i , expected_element in enumerate ( <nl> self . _interleave ( [ [ 4 ] * 4 , [ 5 ] * 5 , [ 6 ] * 6 ] * self . repeat_count , 2 , <nl> def testTwoThreadsNoContention ( self ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( self . next_element ) <nl> <nl> - def testTwoThreadsNoContentionWithRaces ( self ) : <nl> + def testTwoThreadsNoContention ( self ) : <nl> + self . _testTwoThreadsNoContention ( ) <nl> + <nl> + def testTwoThreadsNoContentionSloppy ( self ) : <nl> + self . _testTwoThreadsNoContention ( sloppy = True ) <nl> + <nl> + def _testTwoThreadsNoContentionWithRaces ( self , sloppy = False ) : <nl> " " " Tests where all the workers race in producing elements . <nl> <nl> Note : this is in contrast with the prevous test which carefully sequences <nl> the execution of the map functions . <nl> + <nl> + Args : <nl> + sloppy : Whether to be sloppy or not . <nl> " " " <nl> with self . test_session ( ) as sess : <nl> self . _clear_coordination_events ( ) <nl> def testTwoThreadsNoContentionWithRaces ( self ) : <nl> feed_dict = { <nl> self . input_values : [ 4 , 5 , 6 ] , <nl> self . cycle_length : 2 , <nl> - self . block_length : 1 <nl> + self . block_length : 1 , <nl> + self . sloppy : sloppy , <nl> } ) <nl> for i , expected_element in enumerate ( <nl> self . _interleave ( [ [ 4 ] * 4 , [ 5 ] * 5 , [ 6 ] * 6 ] * self . repeat_count , 2 , <nl> def testTwoThreadsNoContentionWithRaces ( self ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( self . next_element ) <nl> <nl> - def testTwoThreadsNoContentionBlockLength ( self ) : <nl> + def testTwoThreadsNoContentionWithRaces ( self ) : <nl> + self . _testTwoThreadsNoContentionWithRaces ( ) <nl> + <nl> + def testTwoThreadsNoContentionWithRacesSloppy ( self ) : <nl> + self . _testTwoThreadsNoContentionWithRaces ( sloppy = True ) <nl> + <nl> + def _testTwoThreadsNoContentionBlockLength ( self , sloppy = False ) : <nl> # num_threads > 1 . <nl> # Explicit coordination should result in ` Dataset . interleave ( ) ` behavior <nl> with self . test_session ( ) as sess : <nl> def testTwoThreadsNoContentionBlockLength ( self ) : <nl> feed_dict = { <nl> self . input_values : [ 4 , 5 , 6 ] , <nl> self . cycle_length : 2 , <nl> - self . block_length : 2 <nl> + self . block_length : 2 , <nl> + self . sloppy : sloppy <nl> } ) <nl> for i , expected_element in enumerate ( <nl> self . _interleave ( [ [ 4 ] * 4 , [ 5 ] * 5 , [ 6 ] * 6 ] * self . repeat_count , 2 , <nl> def testTwoThreadsNoContentionBlockLength ( self ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( self . next_element ) <nl> <nl> - def testTwoThreadsNoContentionWithRacesAndBlocking ( self ) : <nl> + def testTwoThreadsNoContentionBlockLength ( self ) : <nl> + self . _testTwoThreadsNoContentionBlockLength ( ) <nl> + <nl> + def testTwoThreadsNoContentionBlockLengthSloppy ( self ) : <nl> + self . _testTwoThreadsNoContentionBlockLength ( sloppy = True ) <nl> + <nl> + def _testTwoThreadsNoContentionWithRacesAndBlocking ( self , sloppy = False ) : <nl> " " " Tests where all the workers race in producing elements . <nl> <nl> Note : this is in contrast with the prevous test which carefully sequences <nl> the execution of the map functions . <nl> + <nl> + <nl> + Args : <nl> + sloppy : Whether to be sloppy or not . <nl> " " " <nl> with self . test_session ( ) as sess : <nl> self . _clear_coordination_events ( ) <nl> def testTwoThreadsNoContentionWithRacesAndBlocking ( self ) : <nl> feed_dict = { <nl> self . input_values : [ 4 , 5 , 6 ] , <nl> self . cycle_length : 2 , <nl> - self . block_length : 2 <nl> + self . block_length : 2 , <nl> + self . sloppy : sloppy <nl> } ) <nl> for i , expected_element in enumerate ( <nl> self . _interleave ( [ [ 4 ] * 4 , [ 5 ] * 5 , [ 6 ] * 6 ] * self . repeat_count , 2 , <nl> def testTwoThreadsNoContentionWithRacesAndBlocking ( self ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( self . next_element ) <nl> <nl> - def testEmptyInput ( self ) : <nl> + def testTwoThreadsNoContentionWithRacesAndBlocking ( self ) : <nl> + self . _testTwoThreadsNoContentionWithRacesAndBlocking ( ) <nl> + <nl> + def testTwoThreadsNoContentionWithRacesAndBlockingSloppy ( self ) : <nl> + self . _testTwoThreadsNoContentionWithRacesAndBlocking ( sloppy = True ) <nl> + <nl> + def _testEmptyInput ( self , sloppy = False ) : <nl> with self . test_session ( ) as sess : <nl> # Empty input . <nl> self . _clear_coordination_events ( ) <nl> def testEmptyInput ( self ) : <nl> feed_dict = { <nl> self . input_values : [ ] , <nl> self . cycle_length : 2 , <nl> - self . block_length : 3 <nl> + self . block_length : 3 , <nl> + self . sloppy : sloppy <nl> } ) <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( self . next_element ) <nl> <nl> - def testNonEmptyInputIntoEmptyOutputs ( self ) : <nl> + def testEmptyInput ( self ) : <nl> + self . _testEmptyInput ( ) <nl> + <nl> + def testEmptyInputSloppy ( self ) : <nl> + self . _testEmptyInput ( sloppy = True ) <nl> + <nl> + def _testNonEmptyInputIntoEmptyOutputs ( self , sloppy = False ) : <nl> # Non - empty input leading to empty output . <nl> with self . test_session ( ) as sess : <nl> self . _clear_coordination_events ( ) <nl> def testNonEmptyInputIntoEmptyOutputs ( self ) : <nl> feed_dict = { <nl> self . input_values : [ 0 , 0 , 0 ] , <nl> self . cycle_length : 2 , <nl> - self . block_length : 3 <nl> + self . block_length : 3 , <nl> + self . sloppy : sloppy <nl> } ) <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( self . next_element ) <nl> <nl> - def testPartiallyEmptyOutputs ( self ) : <nl> + def testNonEmptyInputIntoEmptyOutputs ( self ) : <nl> + self . _testNonEmptyInputIntoEmptyOutputs ( ) <nl> + <nl> + def testNonEmptyInputIntoEmptyOutputsSloppy ( self ) : <nl> + self . _testNonEmptyInputIntoEmptyOutputs ( sloppy = True ) <nl> + <nl> + def _testPartiallyEmptyOutputs ( self , sloppy = False ) : <nl> # Mixture of non - empty and empty interleaved datasets . <nl> with self . test_session ( ) as sess : <nl> self . _clear_coordination_events ( ) <nl> def testPartiallyEmptyOutputs ( self ) : <nl> feed_dict = { <nl> self . input_values : [ 4 , 0 , 6 ] , <nl> self . cycle_length : 2 , <nl> - self . block_length : 1 <nl> + self . block_length : 1 , <nl> + self . sloppy : sloppy , <nl> } ) <nl> for i , expected_element in enumerate ( <nl> self . _interleave ( [ [ 4 ] * 4 , [ ] , [ 6 ] * 6 ] * self . repeat_count , 2 , 1 ) ) : <nl> def testPartiallyEmptyOutputs ( self ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( self . next_element ) <nl> <nl> - def testDelayedOutput ( self ) : <nl> + def testPartiallyEmptyOutputs ( self ) : <nl> + self . _testPartiallyEmptyOutputs ( ) <nl> + <nl> + def testPartiallyEmptyOutputsSloppy ( self ) : <nl> + self . _testPartiallyEmptyOutputs ( sloppy = True ) <nl> + <nl> + def testDelayedOutputSloppy ( self ) : <nl> # Explicitly control the sequence of events to ensure we correctly avoid <nl> # head - of - line blocking . <nl> with self . test_session ( ) as sess : <nl> def testDelayedOutput ( self ) : <nl> feed_dict = { <nl> self . input_values : [ 4 , 5 , 6 ] , <nl> self . cycle_length : 2 , <nl> - self . block_length : 1 <nl> + self . block_length : 1 , <nl> + self . sloppy : True , <nl> } ) <nl> <nl> mis_ordering = [ <nl> def testDelayedOutput ( self ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( self . next_element ) <nl> <nl> - def testBlockLengthWithContention ( self ) : <nl> + def testBlockLengthWithContentionSloppy ( self ) : <nl> with self . test_session ( ) as sess : <nl> self . _clear_coordination_events ( ) <nl> done_first_event = False <nl> def testBlockLengthWithContention ( self ) : <nl> feed_dict = { <nl> self . input_values : [ 4 , 5 , 6 ] , <nl> self . cycle_length : 2 , <nl> - self . block_length : 3 <nl> + self . block_length : 3 , <nl> + self . sloppy : True <nl> } ) <nl> # Test against a generating sequence that differs from the uncontended <nl> # case , in order to prove sloppy correctness . <nl> def testBlockLengthWithContention ( self ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( self . next_element ) <nl> <nl> - def testEarlyExit ( self ) : <nl> + def _testEarlyExit ( self , sloppy = False ) : <nl> # Exiting without consuming all input should not block <nl> with self . test_session ( ) as sess : <nl> self . _clear_coordination_events ( ) <nl> def testEarlyExit ( self ) : <nl> feed_dict = { <nl> self . input_values : [ 4 , 5 , 6 ] , <nl> self . cycle_length : 3 , <nl> - self . block_length : 2 <nl> + self . block_length : 2 , <nl> + self . sloppy : sloppy <nl> } ) <nl> for i in range ( 4 , 7 ) : <nl> self . write_coordination_events [ i ] . set ( ) <nl> def testEarlyExit ( self ) : <nl> self . read_coordination_events [ i ] . acquire ( ) <nl> self . write_coordination_events [ i ] . set ( ) <nl> <nl> - def testTooManyReaders ( self ) : <nl> + def testEarlyExit ( self ) : <nl> + self . _testEarlyExit ( ) <nl> + <nl> + def testEarlyExitSloppy ( self ) : <nl> + self . _testEarlyExit ( sloppy = True ) <nl> + <nl> + def _testTooManyReaders ( self , sloppy = False ) : <nl> <nl> def interleave_fn ( x ) : <nl> dataset = dataset_ops . Dataset . from_tensors ( x ) <nl> def interleave_fn ( x ) : <nl> dataset = dataset_ops . Dataset . from_tensor_slices ( [ 4 , 5 , 6 ] ) <nl> dataset = dataset . repeat ( self . repeat_count ) <nl> dataset = dataset . apply ( <nl> - sloppy_ops . sloppy_interleave ( interleave_fn , cycle_length = 16 , <nl> - block_length = 2 ) ) <nl> + interleave_ops . parallel_interleave ( <nl> + interleave_fn , cycle_length = 16 , block_length = 2 , sloppy = sloppy ) ) <nl> iterator = dataset . make_one_shot_iterator ( ) <nl> <nl> with self . test_session ( ) as sess : <nl> def interleave_fn ( x ) : <nl> [ [ 4 ] * 4 , [ 5 ] * 5 , [ 6 ] * 6 ] * self . repeat_count , 1 , 2 ) <nl> self . assertItemsEqual ( output_values , expected_values ) <nl> <nl> + def testTooManyReaders ( self ) : <nl> + self . _testTooManyReaders ( ) <nl> + <nl> + def testTooManyReadersSloppy ( self ) : <nl> + self . _testTooManyReaders ( sloppy = True ) <nl> <nl> if __name__ = = " __main__ " : <nl> test . main ( ) <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / reader_dataset_ops_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / reader_dataset_ops_test . py <nl> <nl> import os <nl> import zlib <nl> <nl> + from tensorflow . contrib . data . python . ops import iterator_ops as contrib_iterator_ops <nl> from tensorflow . contrib . data . python . ops import readers <nl> from tensorflow . core . example import example_pb2 <nl> from tensorflow . core . example import feature_pb2 <nl> <nl> from tensorflow . python . ops import io_ops <nl> from tensorflow . python . ops import parsing_ops <nl> from tensorflow . python . platform import test <nl> + from tensorflow . python . training import saver as saver_lib <nl> from tensorflow . python . util import compat <nl> <nl> <nl> def testTextLineDatasetBuffering ( self ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( iterator . get_next ( ) ) <nl> <nl> + def _ckpt_path ( self ) : <nl> + return os . path . join ( self . get_temp_dir ( ) , " iterator " ) <nl> + <nl> + def _latest_ckpt ( self ) : <nl> + return saver_lib . latest_checkpoint ( self . get_temp_dir ( ) ) <nl> + <nl> + def _save ( self , saver , sess ) : <nl> + saver . save ( sess , self . _ckpt_path ( ) ) <nl> + <nl> + def _restore ( self , saver , sess ) : <nl> + saver . restore ( sess , self . _latest_ckpt ( ) ) <nl> + <nl> + def _import_meta_graph ( self ) : <nl> + meta_file_path = self . _ckpt_path ( ) + " . meta " <nl> + return saver_lib . import_meta_graph ( meta_file_path ) <nl> + <nl> + def _build_graph ( self , <nl> + test_filenames , <nl> + compression_type = None , <nl> + build_saveable = True ) : <nl> + ds = readers . TextLineDataset ( <nl> + test_filenames , compression_type = compression_type , buffer_size = 10 ) <nl> + iterator = ds . make_initializable_iterator ( ) <nl> + if build_saveable : <nl> + saveable = contrib_iterator_ops . make_saveable_from_iterator ( iterator ) <nl> + ops . add_to_collection ( ops . GraphKeys . SAVEABLE_OBJECTS , saveable ) <nl> + init_op = iterator . initializer <nl> + get_next = iterator . get_next ( ) <nl> + ops . add_to_collection ( " iterator_ops " , init_op ) <nl> + ops . add_to_collection ( " iterator_ops " , get_next ) <nl> + saver = saver_lib . Saver ( allow_empty = True ) <nl> + return init_op , get_next , saver <nl> + <nl> + def _testReadWithBreaks ( self , breaks , num_files = 5 , lines_per_file = 5 ) : <nl> + " " " Tests reading from input pipeline with regular breaks . <nl> + <nl> + At each break point the iterator state gets saved using Saver and reloaded <nl> + in a new Graph and session . <nl> + <nl> + Args : <nl> + breaks : List of counts of records after reading which iterator state is <nl> + checkpointed . Must to in non - decreasing order . <nl> + num_files : Total number of files . <nl> + lines_per_file : Total number of lines per file . <nl> + " " " <nl> + compression_types = [ None , " GZIP " , " ZLIB " ] <nl> + for compression_type in compression_types : <nl> + test_filenames = self . _createFiles ( <nl> + num_files , <nl> + lines_per_file , <nl> + crlf = True , <nl> + compression_type = compression_type ) <nl> + <nl> + # Collect ground truth . <nl> + total_records = num_files * lines_per_file <nl> + expected_records = [ ] <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + init_op , get_next , saver = self . _build_graph ( <nl> + test_filenames , compression_type = compression_type ) <nl> + with self . test_session ( graph = g ) as sess : <nl> + sess . run ( init_op ) <nl> + for _ in range ( total_records ) : <nl> + expected_records . append ( sess . run ( get_next ) ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( get_next ) <nl> + <nl> + # Simulate run with breaks . <nl> + actual_records = [ ] <nl> + next_record_index = 0 <nl> + load_from_ckpt = False <nl> + breaks . append ( total_records ) <nl> + for break_index in breaks : <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + if not load_from_ckpt : <nl> + init_op , get_next , saver = self . _build_graph ( <nl> + test_filenames , compression_type = compression_type ) <nl> + else : <nl> + saver = self . _import_meta_graph ( ) <nl> + init_op , get_next = ops . get_collection ( " iterator_ops " ) <nl> + <nl> + with self . test_session ( graph = g ) as sess : <nl> + if not load_from_ckpt : <nl> + sess . run ( init_op ) <nl> + else : <nl> + self . _restore ( saver , sess ) <nl> + while next_record_index ! = break_index : <nl> + actual_records . append ( sess . run ( get_next ) ) <nl> + next_record_index + = 1 <nl> + if break_index = = total_records : <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( get_next ) <nl> + self . _save ( saver , sess ) <nl> + load_from_ckpt = True <nl> + self . assertEqual ( actual_records , expected_records ) <nl> + <nl> + def testSaveAtFileBoundary ( self ) : <nl> + self . _testReadWithBreaks ( [ 10 ] ) <nl> + <nl> + def testSaveWithinFile ( self ) : <nl> + self . _testReadWithBreaks ( [ 12 ] ) <nl> + <nl> + def testSaveUnusedIterator ( self ) : <nl> + self . _testReadWithBreaks ( [ 0 ] ) <nl> + <nl> + def testSaveRestoreIdempotence ( self ) : <nl> + # Attempt to save an iterator immediately after it has been <nl> + # restored . <nl> + self . _testReadWithBreaks ( [ 0 , 0 ] ) <nl> + self . _testReadWithBreaks ( [ 10 , 10 ] ) <nl> + self . _testReadWithBreaks ( [ 12 , 12 ] ) <nl> + <nl> + def testMultipleBreaks ( self ) : <nl> + self . _testReadWithBreaks ( [ 0 , 4 , 20 ] ) <nl> + <nl> + def testRestoreExhaustedIterator ( self ) : <nl> + num_files = 2 <nl> + lines_per_file = 5 <nl> + test_filenames = self . _createFiles ( num_files , lines_per_file , crlf = True ) <nl> + <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + init_op , get_next , saver = self . _build_graph ( test_filenames ) <nl> + with self . test_session ( graph = g ) as sess : <nl> + sess . run ( init_op ) <nl> + for _ in range ( num_files * lines_per_file ) : <nl> + sess . run ( get_next ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( get_next ) <nl> + self . _save ( saver , sess ) <nl> + <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + with self . test_session ( graph = g ) as sess : <nl> + saver = self . _import_meta_graph ( ) <nl> + self . _restore ( saver , sess ) <nl> + _ , get_next = ops . get_collection ( " iterator_ops " ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( get_next ) <nl> + <nl> + def testInitThenRestore ( self ) : <nl> + num_files = 5 <nl> + lines_per_file = 5 <nl> + total_records = num_files * lines_per_file <nl> + break_record = 8 <nl> + test_filenames = self . _createFiles ( num_files , lines_per_file , crlf = True ) <nl> + <nl> + expected_records = [ ] <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + init_op , get_next , saver = self . _build_graph ( test_filenames ) <nl> + with self . test_session ( graph = g ) as sess : <nl> + sess . run ( init_op ) <nl> + for _ in range ( break_record ) : <nl> + sess . run ( get_next ) <nl> + self . _save ( saver , sess ) <nl> + for _ in range ( total_records - break_record ) : <nl> + expected_records . append ( sess . run ( get_next ) ) <nl> + <nl> + actual_records = [ ] <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + with self . test_session ( graph = g ) as sess : <nl> + saver = self . _import_meta_graph ( ) <nl> + init_op , get_next = ops . get_collection ( " iterator_ops " ) <nl> + sess . run ( init_op ) <nl> + self . _restore ( saver , sess ) <nl> + for _ in range ( total_records - break_record ) : <nl> + actual_records . append ( sess . run ( get_next ) ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( get_next ) <nl> + self . assertEqual ( actual_records , expected_records ) <nl> + <nl> + def testRestoreInModifiedGraph ( self ) : <nl> + num_files = 5 <nl> + lines_per_file = 5 <nl> + total_records = num_files * lines_per_file <nl> + break_record = 8 <nl> + test_filenames = self . _createFiles ( num_files , lines_per_file , crlf = True ) <nl> + <nl> + expected_records = [ ] <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + init_op , get_next , saver = self . _build_graph ( test_filenames ) <nl> + with self . test_session ( graph = g ) as sess : <nl> + sess . run ( init_op ) <nl> + for _ in range ( break_record ) : <nl> + sess . run ( get_next ) <nl> + self . _save ( saver , sess ) <nl> + for _ in range ( total_records - break_record ) : <nl> + expected_records . append ( sess . run ( get_next ) ) <nl> + <nl> + actual_records = [ ] <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + with self . test_session ( graph = g ) as sess : <nl> + init_op , get_next , saver = self . _build_graph ( <nl> + test_filenames , compression_type = " GZIP " ) <nl> + self . _restore ( saver , sess ) <nl> + for _ in range ( total_records - break_record ) : <nl> + actual_records . append ( sess . run ( get_next ) ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( get_next ) <nl> + self . assertEqual ( actual_records , expected_records ) <nl> + <nl> + def testRestoreInModifiedGraphThenInit ( self ) : <nl> + num_files = 5 <nl> + lines_per_file = 5 <nl> + total_records = num_files * lines_per_file <nl> + break_record = 8 <nl> + test_filenames = self . _createFiles ( num_files , lines_per_file , crlf = True ) <nl> + <nl> + expected_records = [ ] <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + init_op , get_next , saver = self . _build_graph ( test_filenames ) <nl> + with self . test_session ( graph = g ) as sess : <nl> + sess . run ( init_op ) <nl> + for _ in range ( break_record ) : <nl> + expected_records . append ( sess . run ( get_next ) ) <nl> + self . _save ( saver , sess ) <nl> + for _ in range ( total_records - break_record ) : <nl> + expected_records . append ( sess . run ( get_next ) ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( get_next ) <nl> + <nl> + # Test that calling the init_op overrides the restored iterator . The <nl> + # iterator for the old graph was build to read uncompressed files and <nl> + # would fail when trying to read the new files . <nl> + actual_records = [ ] <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + with self . test_session ( graph = g ) as sess : <nl> + test_filenames = self . _createFiles ( <nl> + num_files , lines_per_file , crlf = True , compression_type = " GZIP " ) <nl> + init_op , get_next , saver = self . _build_graph ( <nl> + test_filenames , compression_type = " GZIP " ) <nl> + self . _restore ( saver , sess ) <nl> + sess . run ( init_op ) <nl> + for _ in range ( total_records ) : <nl> + actual_records . append ( sess . run ( get_next ) ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( get_next ) <nl> + self . assertEqual ( actual_records , expected_records ) <nl> + <nl> + def testDoNotRestoreIterator ( self ) : <nl> + num_files = 5 <nl> + lines_per_file = 5 <nl> + total_records = num_files * lines_per_file <nl> + break_record = 8 <nl> + test_filenames = self . _createFiles ( num_files , lines_per_file , crlf = True ) <nl> + <nl> + expected_records = [ ] <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + init_op , get_next , saver = self . _build_graph ( test_filenames ) <nl> + with self . test_session ( graph = g ) as sess : <nl> + sess . run ( init_op ) <nl> + for _ in range ( break_record ) : <nl> + expected_records . append ( sess . run ( get_next ) ) <nl> + self . _save ( saver , sess ) <nl> + for _ in range ( total_records - break_record ) : <nl> + expected_records . append ( sess . run ( get_next ) ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( get_next ) <nl> + <nl> + actual_records = [ ] <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + with self . test_session ( graph = g ) as sess : <nl> + init_op , get_next , saver = self . _build_graph ( <nl> + test_filenames , build_saveable = False ) <nl> + self . _restore ( saver , sess ) <nl> + with self . assertRaises ( errors . FailedPreconditionError ) : <nl> + sess . run ( get_next ) <nl> + sess . run ( init_op ) <nl> + for _ in range ( total_records ) : <nl> + actual_records . append ( sess . run ( get_next ) ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( get_next ) <nl> + self . assertEqual ( actual_records , expected_records ) <nl> + <nl> <nl> class FixedLengthRecordReaderTest ( test . TestCase ) : <nl> <nl> mmm a / tensorflow / contrib / data / python / ops / BUILD <nl> ppp b / tensorflow / contrib / data / python / ops / BUILD <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " : transformation_ops " , <nl> - " / / tensorflow / python : dtypes " , <nl> - " / / tensorflow / python : script_ops " , <nl> - " / / tensorflow / python : tensor_shape " , <nl> + " / / tensorflow / python : util " , <nl> " / / tensorflow / python / data / ops : dataset_ops " , <nl> " / / tensorflow / python / data / util : nest " , <nl> ] , <nl> py_library ( <nl> " / / tensorflow / python : platform " , <nl> " / / tensorflow / python : sparse_tensor " , <nl> " / / tensorflow / python : tensor_shape " , <nl> + " / / tensorflow / python : util " , <nl> " / / tensorflow / python / data / ops : dataset_ops " , <nl> " / / tensorflow / python / data / ops : readers " , <nl> " / / tensorflow / python / data / util : nest " , <nl> py_library ( <nl> " enumerate_ops . py " , <nl> " error_ops . py " , <nl> " grouping . py " , <nl> + " interleave_ops . py " , <nl> " resampling . py " , <nl> " scan_ops . py " , <nl> - " sloppy_ops . py " , <nl> ] , <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> py_library ( <nl> " / / tensorflow / python : logging_ops " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : random_ops " , <nl> - " / / tensorflow / python : resource_variable_ops " , <nl> " / / tensorflow / python : tensor_shape " , <nl> " / / tensorflow / python : tensor_util " , <nl> " / / tensorflow / python / data / ops : dataset_ops " , <nl> similarity index 67 % <nl> rename from tensorflow / contrib / data / python / ops / sloppy_ops . py <nl> rename to tensorflow / contrib / data / python / ops / interleave_ops . py <nl> mmm a / tensorflow / contrib / data / python / ops / sloppy_ops . py <nl> ppp b / tensorflow / contrib / data / python / ops / interleave_ops . py <nl> <nl> from tensorflow . python . framework import function <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . ops import gen_dataset_ops <nl> + from tensorflow . python . util import deprecation <nl> <nl> <nl> - class SloppyInterleaveDataset ( dataset_ops . Dataset ) : <nl> + class ParallelInterleaveDataset ( dataset_ops . Dataset ) : <nl> " " " A ` Dataset ` that maps a function over its input and flattens the result . " " " <nl> <nl> - def __init__ ( self , input_dataset , map_func , cycle_length , block_length ) : <nl> - " " " See ` tf . contrib . data . sloppy_interleave ( ) ` for details . " " " <nl> - super ( SloppyInterleaveDataset , self ) . __init__ ( ) <nl> + def __init__ ( self , input_dataset , map_func , cycle_length , block_length , <nl> + sloppy ) : <nl> + " " " See ` tf . contrib . data . parallel_interleave ( ) ` for details . " " " <nl> + super ( ParallelInterleaveDataset , self ) . __init__ ( ) <nl> self . _input_dataset = input_dataset <nl> <nl> @ function . Defun ( * nest . flatten ( input_dataset . output_types ) ) <nl> def tf_map_func ( * args ) : <nl> cycle_length , dtype = dtypes . int64 , name = " cycle_length " ) <nl> self . _block_length = ops . convert_to_tensor ( <nl> block_length , dtype = dtypes . int64 , name = " block_length " ) <nl> + self . _sloppy = ops . convert_to_tensor ( <nl> + sloppy , dtype = dtypes . bool , name = " sloppy " ) <nl> <nl> def _as_variant_tensor ( self ) : <nl> - return gen_dataset_ops . sloppy_interleave_dataset ( <nl> + return gen_dataset_ops . parallel_interleave_dataset ( <nl> self . _input_dataset . _as_variant_tensor ( ) , # pylint : disable = protected - access <nl> self . _map_func . captured_inputs , <nl> self . _cycle_length , <nl> self . _block_length , <nl> + self . _sloppy , <nl> f = self . _map_func , <nl> output_types = nest . flatten ( self . output_types ) , <nl> output_shapes = nest . flatten ( self . output_shapes ) ) <nl> def output_types ( self ) : <nl> return self . _output_types <nl> <nl> <nl> + def parallel_interleave ( map_func , cycle_length , block_length = 1 , sloppy = False ) : <nl> + " " " A parallel version of the ` Dataset . interleave ( ) ` transformation . <nl> + <nl> + ` parallel_interleave ( ) ` maps ` map_func ` across its input to produce nested <nl> + datasets , and outputs their elements interleaved . Unlike <nl> + @ { tf . data . Dataset . interleave } , it gets elements from ` cycle_length ` nested <nl> + datasets in parallel , which increases the throughput , especially in the <nl> + presence of stragglers . Furthermore , the ` sloppy ` argument can be used to <nl> + improve performance , by relaxing the requirement that the outputs are produced <nl> + in a deterministic order , and allowing the implementation to skip over nested <nl> + datasets whose elements are not readily available when requested . <nl> + <nl> + Example usage : <nl> + <nl> + ` ` ` python <nl> + # Preprocess 4 files concurrently . <nl> + filenames = tf . data . Dataset . list_files ( " / path / to / data / train * . tfrecords " ) <nl> + dataset = filenames . apply ( <nl> + tf . contrib . data . parallel_interleave ( <nl> + lambda filename : tf . data . TFRecordDataset ( filename ) , <nl> + cycle_length = 4 ) ) <nl> + ` ` ` <nl> + <nl> + WARNING : If ` sloppy ` is ` True ` , the order of produced elements is not <nl> + deterministic . <nl> + <nl> + Args : <nl> + map_func : A function mapping a nested structure of tensors to a ` Dataset ` . <nl> + cycle_length : The number of threads to interleave from in parallel . <nl> + block_length : The number of consecutive elements to pull from a thread <nl> + before advancing to the next thread . <nl> + sloppy : If false , elements are produced in deterministic order . Otherwise , <nl> + the implementation is allowed , for the sake of expediency , to produce <nl> + elements in a non - deterministic order . <nl> + <nl> + Returns : <nl> + A ` Dataset ` transformation function , which can be passed to <nl> + @ { tf . data . Dataset . apply } . <nl> + " " " <nl> + def _apply_fn ( dataset ) : <nl> + return ParallelInterleaveDataset ( <nl> + dataset , map_func , cycle_length , block_length , sloppy ) <nl> + return _apply_fn <nl> + <nl> + <nl> + @ deprecation . deprecated ( <nl> + None , " Use ` tf . contrib . data . parallel_interleave ( . . . , sloppy = True ) ` . " ) <nl> def sloppy_interleave ( map_func , cycle_length , block_length = 1 ) : <nl> " " " A non - deterministic version of the ` Dataset . interleave ( ) ` transformation . <nl> <nl> def sloppy_interleave ( map_func , cycle_length , block_length = 1 ) : <nl> @ { tf . data . Dataset . apply } . <nl> " " " <nl> def _apply_fn ( dataset ) : <nl> - return SloppyInterleaveDataset ( <nl> - dataset , map_func , cycle_length , block_length ) <nl> + return ParallelInterleaveDataset ( <nl> + dataset , map_func , cycle_length , block_length , sloppy = True ) <nl> return _apply_fn <nl> mmm a / tensorflow / contrib / distributions / BUILD <nl> ppp b / tensorflow / contrib / distributions / BUILD <nl> py_library ( <nl> " / / tensorflow / contrib / linalg : linalg_py " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : check_ops " , <nl> + " / / tensorflow / python : clip_ops " , <nl> " / / tensorflow / python : control_flow_ops " , <nl> " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : init_ops " , <nl> + " / / tensorflow / python : layers " , <nl> " / / tensorflow / python : linalg_ops " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : nn_ops " , <nl> + " / / tensorflow / python : template " , <nl> " / / tensorflow / python : tensor_util " , <nl> " / / tensorflow / python : util " , <nl> + " / / tensorflow / python : variable_scope " , <nl> " / / tensorflow / python / ops / distributions " , <nl> + " / / tensorflow / python / ops / linalg " , <nl> " / / third_party / py / numpy " , <nl> ] , <nl> ) <nl> py_library ( <nl> " / / tensorflow / python : tensor_util " , <nl> " / / tensorflow / python : util " , <nl> " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> " / / tensorflow / python / ops / distributions " , <nl> + " / / tensorflow / python / ops / linalg " , <nl> " / / third_party / py / numpy " , <nl> " @ six_archive / / : six " , <nl> ] , <nl> mmm a / tensorflow / contrib / eager / python / BUILD <nl> ppp b / tensorflow / contrib / eager / python / BUILD <nl> py_library ( <nl> " : saver " , <nl> " : summary_writer " , <nl> " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : framework_test_lib " , <nl> " / / tensorflow / python : numerics " , <nl> " / / tensorflow / python : resource_variable_ops " , <nl> " / / tensorflow / python : util " , <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> visibility = [ " / / tensorflow : internal " ] , <nl> deps = [ <nl> + " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : dataset_ops_gen " , <nl> " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_ops " , <nl> " / / tensorflow / python : resource_variable_ops " , <nl> " / / tensorflow / python / data / util : nest " , <nl> " / / tensorflow / python / eager : context " , <nl> py_test ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " : datasets " , <nl> - " / / tensorflow / contrib / data " , <nl> + " / / tensorflow / python : dtypes " , <nl> " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : script_ops " , <nl> + " / / tensorflow / python / data " , <nl> " / / tensorflow / python / eager : test " , <nl> - " / / third_party / py / numpy " , <nl> ] , <nl> ) <nl> <nl> py_library ( <nl> srcs = [ " saver . py " ] , <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : resource_variable_ops " , <nl> " / / tensorflow / python : training " , <nl> + " / / tensorflow / python / eager : context " , <nl> ] , <nl> ) <nl> <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " / / tensorflow / contrib / summary : gen_summary_ops " , <nl> - " / / tensorflow / contrib / summary : summary_ops " , <nl> " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : init_ops " , <nl> " / / tensorflow / python : resource_variable_ops " , <nl> " / / tensorflow / python : state_ops " , <nl> " / / tensorflow / python : summary_op_util " , <nl> - " / / tensorflow / python : training " , <nl> + " / / tensorflow / python : variable_scope " , <nl> " / / tensorflow / python / eager : context " , <nl> ] , <nl> ) <nl> py_library ( <nl> " / / tensorflow / python : constant_op " , <nl> " / / tensorflow / python : control_flow_ops " , <nl> " / / tensorflow / python : dtypes " , <nl> - " / / tensorflow / python : framework_ops " , <nl> - " / / tensorflow / python : layers_base " , <nl> + " / / tensorflow / python : init_ops " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : resource_variable_ops " , <nl> " / / tensorflow / python : util " , <nl> py_test ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " : metrics " , <nl> + " / / tensorflow / contrib / summary : summary_ops " , <nl> + " / / tensorflow / core : protos_all_py " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : lib " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : training " , <nl> " / / tensorflow / python : variables " , <nl> " / / tensorflow / python / eager : context " , <nl> " / / tensorflow / python / eager : test " , <nl> py_library ( <nl> " / / tensorflow / python : layers_base " , <nl> " / / tensorflow / python : variable_scope " , <nl> " / / tensorflow / python / estimator : util " , <nl> + " @ six_archive / / : six " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / contrib / eager / python / datasets . py <nl> ppp b / tensorflow / contrib / eager / python / datasets . py <nl> <nl> # See the License for the specific language governing permissions and <nl> # limitations under the License . <nl> # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - " " " Support for tf . contrib . data when eager execution is enabled . " " " <nl> + " " " Iteration over tf . data . Datasets when eager execution is enabled . " " " <nl> <nl> from __future__ import absolute_import <nl> from __future__ import division <nl> <nl> from tensorflow . python . eager import context <nl> from tensorflow . python . framework import errors <nl> from tensorflow . python . framework import ops <nl> + from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import gen_dataset_ops <nl> from tensorflow . python . ops import resource_variable_ops <nl> <nl> def _iterator_shared_name ( ) : <nl> <nl> <nl> class Iterator ( object ) : <nl> - " " " An iterator producing tf . Tensor objects from a tf . contrib . data . Dataset . " " " <nl> + " " " An iterator producing tf . Tensor objects from a tf . data . Dataset . " " " <nl> <nl> def __init__ ( self , dataset ) : <nl> " " " Creates a new iterator over the given dataset . <nl> <nl> For example : <nl> ` ` ` python <nl> - dataset = tf . contrib . data . Dataset . range ( 4 ) <nl> + dataset = tf . data . Dataset . range ( 4 ) <nl> for x in Iterator ( dataset ) : <nl> print ( x ) <nl> ` ` ` <nl> <nl> + Tensors produced will be placed on the device on which this iterator object <nl> + was created . <nl> + <nl> Args : <nl> - dataset : A ` tf . contrib . data . Dataset ` object . <nl> + dataset : A ` tf . data . Dataset ` object . <nl> <nl> Raises : <nl> RuntimeError : When invoked without eager execution enabled . <nl> def __init__ ( self , dataset ) : <nl> <nl> if not context . in_eager_mode ( ) : <nl> raise RuntimeError ( <nl> - " { } objects only make sense when eager execution is enabled " . format ( <nl> - type ( self ) ) ) <nl> + " { } objects can only be used when eager execution is enabled , use " <nl> + " tf . data . Dataset . make_iterator or " <nl> + " tf . data . Dataset . make_one_shot_iterator for graph construction " . <nl> + format ( type ( self ) ) ) <nl> with ops . device ( " / device : CPU : 0 " ) : <nl> ds_variant = dataset . _as_variant_tensor ( ) # pylint : disable = protected - access <nl> self . _output_types = dataset . output_types <nl> def __init__ ( self , dataset ) : <nl> output_types = self . _flat_output_types , <nl> output_shapes = self . _flat_output_shapes ) <nl> gen_dataset_ops . make_iterator ( ds_variant , self . _resource ) <nl> + self . _device = context . context ( ) . device_name <nl> <nl> def __del__ ( self ) : <nl> if self . _resource is not None : <nl> def next ( self ) : <nl> self . _resource , <nl> output_types = self . _flat_output_types , <nl> output_shapes = self . _flat_output_shapes ) <nl> - return nest . pack_sequence_as ( self . _output_types , ret ) <nl> except errors . OutOfRangeError : <nl> raise StopIteration <nl> + # Copies tensors from CPU to the current device if necessary . <nl> + # TODO ( rohanj ) : This should be replaced by the mechanism to have the <nl> + # runtime ' s threads copy tensors to the destination device . <nl> + with ops . device ( self . _device ) : <nl> + ret = [ array_ops . identity ( x ) for x in ret ] <nl> + return nest . pack_sequence_as ( self . _output_types , ret ) <nl> mmm a / tensorflow / contrib / eager / python / datasets_test . py <nl> ppp b / tensorflow / contrib / eager / python / datasets_test . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> - from tensorflow . contrib . data import Dataset <nl> from tensorflow . contrib . eager . python import datasets <nl> + from tensorflow . python . data import Dataset <nl> from tensorflow . python . eager import test <nl> from tensorflow . python . framework import dtypes <nl> + from tensorflow . python . framework import ops <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import script_ops <nl> <nl> def my_map ( inp ) : <nl> got = [ x . numpy ( ) for x in datasets . Iterator ( ds ) ] <nl> self . assertAllEqual ( [ [ 1 ] , [ 2 ] , [ 3 ] , [ 4 ] ] , got ) <nl> <nl> + def testTensorsPlacedOnDevice ( self ) : <nl> + ds = Dataset . from_tensors ( [ 0 . , 1 . ] ) <nl> + with ops . device ( test . gpu_device_name ( ) ) : <nl> + x = datasets . Iterator ( ds ) . next ( ) <nl> + x = math_ops . add ( x , x ) <nl> + self . assertAllEqual ( [ 0 . , 2 . ] , x . numpy ( ) ) <nl> + <nl> <nl> if __name__ = = ' __main__ ' : <nl> test . main ( ) <nl> mmm a / tensorflow / contrib / eager / python / metrics_impl . py <nl> ppp b / tensorflow / contrib / eager / python / metrics_impl . py <nl> <nl> class Metric ( object ) : <nl> " " " A metric holds state for aggregating statistics over an evaluation run . <nl> <nl> - Users will use Evaluator . add_metric ( ) to add Metric objects to their <nl> - evaluation , call them in each step ( treating the object as a callable ) , <nl> - and then use Evaluator . all_metric_results ( ) at the end . <nl> + Example use with eager execution : <nl> + <nl> + ` ` ` python <nl> + m = SomeMetric ( . . . ) <nl> + for input in . . . : <nl> + m ( input ) <nl> + print ( m . result ( ) ) <nl> + ` ` ` <nl> + <nl> + Example use with graph execution : <nl> + <nl> + ` ` ` python <nl> + m = SomeMetric ( . . . ) <nl> + m_placeholder = tf . placeholder ( . . . ) <nl> + m_update = m ( m_placeholder ) <nl> + # Variables defined in first call , so get the initialization op afterwards . <nl> + m_init = m . init_variables ( ) # or tf . global_variables_initializer ( ) <nl> + m_result = m . result ( ) <nl> + with tf . Session ( ) as sess : <nl> + sess . run ( m_init ) <nl> + for input in . . . : <nl> + sess . run ( m_update , feed_dict = { m_placeholder : input } ) <nl> + print ( sess . run ( m_result ) ) <nl> + ` ` ` <nl> <nl> Descendants will implement : <nl> * ` build ( ) ` : All variables should be created in this method , by calling <nl> class Metric ( object ) : <nl> * ` result ( ) ` : Computes and returns a final value for the metric <nl> from the variables in ` self ` . <nl> <nl> - Decendants may override , but usually won ' t need to : <nl> - * ` aggregate ( ) ` : Adds in the state from a list of metrics of the same type <nl> - as ` self ` . ( Default is to sum all the variables . ) <nl> - * ` reset ( ) ` : Reset all variables to their initial state . ( Default is to <nl> - zero all the variables . ) <nl> - Note that users should not call ` aggregate ( ) ` or ` reset ( ) ` , they are for <nl> - use by TensorFlow infrastructure . <nl> + Decendants may override ` aggregate ( ) ` , but usually won ' t need to . It <nl> + adds in the state from a list of metrics of the same type as ` self ` . <nl> + ( Default is to sum all the variables . ) Note that users should not call <nl> + ` aggregate ( ) ` , it is for use by TensorFlow infrastructure . <nl> " " " <nl> <nl> def __init__ ( self , name = None ) : <nl> self . _built = False <nl> self . _vars = [ ] <nl> + self . _initial_values = { } <nl> self . _updates = [ ] <nl> name = name or self . __class__ . __name__ <nl> # Replace things like spaces in name to create a valid scope name . <nl> def __init__ ( self , name = None ) : <nl> # We create the variable scope now to get the unique name that will <nl> # be used as a variable prefix when build ( ) calls add_variable ( ) . <nl> with variable_scope . variable_scope ( <nl> - None , default_name = scope_name , use_resource = True , reuse = False ) as scope : <nl> + scope_name , use_resource = True , reuse = False ) as scope : <nl> pos = scope . name . rfind ( scope_name ) <nl> self . _name = name + scope . name [ pos + len ( scope_name ) : ] <nl> self . _scope = scope <nl> def variables ( self ) : <nl> return self . _vars <nl> <nl> def init_variables ( self ) : <nl> - " " " Return an op for initializing this Metric ' s variables . <nl> + " " " Initializes this Metric ' s variables . <nl> <nl> - Only for graph execution . Should be called after variables are created <nl> - in the first execution of __call__ ( ) . <nl> + Should be called after variables are created in the first execution <nl> + of ` __call__ ( ) ` . If using graph execution , the return value should be <nl> + ` run ( ) ` in a session before running the op returned by ` __call__ ( ) ` . <nl> + ( See example above . ) <nl> <nl> Returns : <nl> - An op to run . <nl> + If using graph execution , this returns an op to perform the <nl> + initialization . Under eager execution , the variables are reset to their <nl> + initial values as a side effect and this function returns None . <nl> " " " <nl> - assert context . in_graph_mode ( ) <nl> - return control_flow_ops . group ( [ v . initializer for v in self . _vars ] ) <nl> + if context . in_graph_mode ( ) : <nl> + return control_flow_ops . group ( [ v . initializer for v in self . _vars ] ) <nl> + for v in self . _vars : <nl> + v . assign ( self . _initial_values [ v ] ) <nl> <nl> # mmm - To be implemented by descendants mmm <nl> def build ( self , * args , * * kwargs ) : <nl> def aggregate ( self , metrics ) : <nl> self . _vars [ i ] . assign_add ( math_ops . add_n ( [ m . _vars [ i ] for m in metrics ] ) ) <nl> # pylint : enable = protected - access <nl> <nl> - def reset ( self ) : <nl> - " " " Reset this metric to a freshly initialized state . <nl> - <nl> - Default implementation zeros all the metric variables . <nl> - " " " <nl> - for v in self . _vars : <nl> - v . assign ( math_ops . zeros_like ( v ) ) <nl> - <nl> # mmm - For use by descendants mmm <nl> def add_variable ( self , name , shape = None , dtype = None , initializer = None ) : <nl> " " " * * * Only for use by descendants of Metric * * * . " " " <nl> def add_variable ( self , name , shape = None , dtype = None , initializer = None ) : <nl> v = variable_scope . get_variable ( name , shape , dtype , initializer , <nl> trainable = False , use_resource = True ) <nl> self . _vars . append ( v ) <nl> + if context . in_eager_mode ( ) : <nl> + self . _initial_values [ v ] = v . value ( ) <nl> return v <nl> <nl> <nl> def call ( self , values , weights = None ) : <nl> " " " <nl> if weights is None : <nl> self . denom . assign_add ( <nl> - math_ops . cast ( array_ops . size ( values ) , self . dtype ) ) <nl> + math_ops . cast ( array_ops . identity ( array_ops . size ( values ) ) , self . dtype ) ) <nl> values = math_ops . reduce_sum ( values ) <nl> self . numer . assign_add ( math_ops . cast ( values , self . dtype ) ) <nl> else : <nl> mmm a / tensorflow / contrib / eager / python / metrics_test . py <nl> ppp b / tensorflow / contrib / eager / python / metrics_test . py <nl> def testMean ( self ) : <nl> self . assertEqual ( dtypes . float64 , m . dtype ) <nl> self . assertEqual ( dtypes . float64 , m . result ( ) . dtype ) <nl> <nl> + def testInitVariables ( self ) : <nl> + m = metrics . Mean ( ) <nl> + m ( [ 1 , 10 , 100 , 1000 ] ) <nl> + m ( [ 10000 . 0 , 100000 . 0 ] ) <nl> + self . assertEqual ( 111111 . 0 / 6 , m . result ( ) . numpy ( ) ) <nl> + m . init_variables ( ) <nl> + m ( 7 ) <nl> + self . assertEqual ( 7 . 0 , m . result ( ) . numpy ( ) ) <nl> + <nl> def testWriteSummaries ( self ) : <nl> m = metrics . Mean ( ) <nl> m ( [ 1 , 10 , 100 ] ) <nl> def testTwoMeans ( self ) : <nl> # Verify two metrics with the same class and name don ' t <nl> # accidentally share state . <nl> m1 = metrics . Mean ( ) <nl> - m2 = metrics . Mean ( ) <nl> m1 ( 0 ) <nl> - m2 ( 2 ) <nl> - self . assertEqual ( 0 , m1 . result ( ) . numpy ( ) ) <nl> - self . assertEqual ( 2 , m2 . result ( ) . numpy ( ) ) <nl> - self . assertNotEqual ( m1 . name , m2 . name ) <nl> + with self . assertRaises ( ValueError ) : <nl> + m2 = metrics . Mean ( ) <nl> + m2 ( 2 ) <nl> <nl> def testNamesWithSpaces ( self ) : <nl> # Verify two metrics with the same class and name don ' t <nl> # accidentally share state . <nl> m1 = metrics . Mean ( " has space " ) <nl> - m2 = metrics . Mean ( " has space " ) <nl> - m2 ( 2 ) <nl> m1 ( 0 ) <nl> self . assertEqual ( m1 . name , " has space " ) <nl> self . assertEqual ( m1 . numer . name , " has_space / numer : 0 " ) <nl> - self . assertEqual ( m2 . name , " has space_1 " ) <nl> - self . assertEqual ( m2 . numer . name , " has_space_1 / numer : 0 " ) <nl> <nl> def testGraph ( self ) : <nl> with context . graph_mode ( ) , self . test_session ( ) as sess : <nl> def testGraph ( self ) : <nl> def testTwoMeansGraph ( self ) : <nl> # Verify two metrics with the same class and name don ' t <nl> # accidentally share state . <nl> - with context . graph_mode ( ) , self . test_session ( ) as sess : <nl> + with context . graph_mode ( ) : <nl> m1 = metrics . Mean ( ) <nl> - m2 = metrics . Mean ( ) <nl> - accumulate1 = m1 ( 0 ) <nl> - accumulate2 = m2 ( 2 ) <nl> - m1 . init_variables ( ) . run ( ) <nl> - m2 . init_variables ( ) . run ( ) <nl> - sess . run ( [ accumulate1 , accumulate2 ] ) <nl> - self . assertEqual ( 0 , m1 . result ( ) . eval ( ) ) <nl> - self . assertEqual ( 2 , m2 . result ( ) . eval ( ) ) <nl> + m1 ( 0 ) <nl> + with self . assertRaises ( ValueError ) : <nl> + m2 = metrics . Mean ( ) <nl> + m2 ( 2 ) <nl> <nl> <nl> if __name__ = = " __main__ " : <nl> mmm a / tensorflow / contrib / eager / python / network . py <nl> ppp b / tensorflow / contrib / eager / python / network . py <nl> <nl> from __future__ import print_function <nl> <nl> import collections <nl> - import uuid <nl> - <nl> - import six <nl> + import os <nl> + import weakref <nl> <nl> + from tensorflow . python . eager import context <nl> from tensorflow . python . estimator import util as estimator_util <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . layers import base <nl> from tensorflow . python . ops import variable_scope <nl> + from tensorflow . python . training import checkpoint_utils <nl> + from tensorflow . python . training import saver as saver_lib <nl> + from tensorflow . python . training import training_util <nl> + <nl> + # pylint : disable = protected - access <nl> + # Explanation for protected - access disable : Network has lots of same - class and <nl> + # parent - class references across different objects , and some to private <nl> + # functions in base . py which should be reused . <nl> + <nl> + <nl> + _DeferredRestoration = collections . namedtuple ( <nl> + <nl> + " _DeferredRestoration " , <nl> + [ <nl> + # The map_func to use ( either user - specified or the default ) . <nl> + " map_func " , <nl> + # Boolean , True if the user specified an explicit map_func , for error <nl> + # messages . <nl> + " map_func_is_user " , <nl> + # A mapping from checkpoint names to initial values of not - yet - created <nl> + # variables which should be restored . These values come from parsing a <nl> + # checkpoint . <nl> + " checkpointed_variables_to_restore " , <nl> + # A mapping from checkpoint name to variable objects of variables which <nl> + # have already been restored , for error checking . <nl> + " restored_variables " , <nl> + # The session to restore with ( if in graph mode ) . <nl> + " session " , <nl> + # Names of the Network where the restore was requested , for error <nl> + # messages . <nl> + " network_name " , <nl> + " network_scope_name " <nl> + ] ) <nl> + <nl> + <nl> + def _default_naming_conflict_error_message ( <nl> + mapped_name , first_variable , second_variable , <nl> + network_name , network_scope_name ) : <nl> + return ( <nl> + ( " The default checkpoint variable name mapping strategy for Network " <nl> + " ' % s ' resulted in a naming conflict . We attempted to strip off the " <nl> + " variable prefix for the Network ( ' % s ' ) , but this resulted in two " <nl> + " variables named ' % s ' ( originally ' % s ' and ' % s ' ) . This should only " <nl> + " happen when using variable sharing ( i . e . the Network contains Networks " <nl> + " or Layers which were first added to another Network , and therefore " <nl> + " have that Network ' s variable prefix ) . One solution is to pass " <nl> + " ` map_func = lambda n : n ` to Network . save and Network . restore to use " <nl> + " fully qualified variable names in the checkpoint , although this will " <nl> + " require that the variable prefix of the Network being restored into " <nl> + " is also ' % s ' . You may alternatively write an arbitrary mapping . " ) <nl> + % ( <nl> + network_name , network_scope_name , mapped_name , <nl> + first_variable . _shared_name , <nl> + second_variable . _shared_name , network_scope_name <nl> + ) ) <nl> + <nl> + <nl> + def _restore_custom_map_func_error_message ( <nl> + mapped_name , first_variable , second_variable , <nl> + network_name , network_scope_name ) : <nl> + return ( <nl> + ( " The map_func passed to Network . restore for the Network ' % s ' " <nl> + " resulted in two variables named ' % s ' ( originally ' % s ' and ' % s ' ) . Since " <nl> + " this is also an error on Network . save , this Network was " <nl> + " probably not saved with this map_func . Note that map_func " <nl> + " always maps from full variable names to checkpoint names ; " <nl> + " there is no need to specify an inverse mapping . \ n \ n " <nl> + " Try stripping less from the variable names , or renaming parts " <nl> + " of the Network . For reference , variables created by sub - Layers " <nl> + " of this Network are prefixed with ' % s ' , but if they are " <nl> + " re - used after being added to another Network they will have " <nl> + " that Network ' s full variable prefix instead . " ) % ( <nl> + network_name , mapped_name , <nl> + first_variable . _shared_name , <nl> + second_variable . _shared_name , <nl> + network_scope_name ) ) <nl> + <nl> + <nl> + def _make_custom_getter_for_deferred_restorations ( ) : <nl> + " " " Returns a custom getter which searches ` deferred_restorations ` . <nl> + <nl> + Returns : A tuple of ( _custom_getter , deferred_restorations ) <nl> + _custom_getter : The getter which should be added to variable_scopes where <nl> + variables will be created . <nl> + deferred_restorations : A list for _DeferredRestoration objects . Typically <nl> + empty when the getter is set , and expanded as deferred restorations are <nl> + requested . All new deferred restorations should be appended to the end of <nl> + the list , where they will have priority over older deferred restorations . <nl> + " " " <nl> + deferred_restorations = [ ] <nl> + <nl> + def _custom_getter ( getter , name , shape = None , dtype = None , <nl> + initializer = None , <nl> + * args , * * kwargs ) : <nl> + " " " A custom getter which processes deferred restorations . " " " <nl> + # Iterate over restorations , newest first ( newer restorations will take <nl> + # precedence over older restorations , just like with immediate restorations <nl> + # into existing variables ) . <nl> + delayed_restoration = None <nl> + found_value = False <nl> + value_to_restore = None <nl> + for delayed_restoration in reversed ( <nl> + deferred_restorations ) : <nl> + checkpoint_name = delayed_restoration . map_func ( name ) <nl> + if ( checkpoint_name <nl> + in delayed_restoration . checkpointed_variables_to_restore ) : <nl> + found_value = True <nl> + value_to_restore = ( <nl> + delayed_restoration . checkpointed_variables_to_restore [ <nl> + checkpoint_name ] ) <nl> + if found_value : <nl> + break <nl> + # value_to_restore may be False because this variable is not in any <nl> + # checkpoint we are restoring , or None because we have explicitly set it to <nl> + # None when it was previously fetched . In either case , we don ' t need to <nl> + # set an initializer . <nl> + if found_value and value_to_restore is not None : <nl> + initializer = value_to_restore <nl> + shape = None <nl> + variable = getter ( name , shape = shape , dtype = dtype , initializer = initializer , <nl> + * args , * * kwargs ) <nl> + if found_value and value_to_restore is not None : <nl> + # Mark as already restored from this checkpoint . <nl> + delayed_restoration . checkpointed_variables_to_restore [ <nl> + checkpoint_name ] = None <nl> + if context . in_graph_mode ( ) : <nl> + delayed_restoration . session . run ( variable . initializer ) <nl> + if found_value : <nl> + # Error checking should run even if we ' ve already restored a value . <nl> + if delayed_restoration . restored_variables . setdefault ( <nl> + checkpoint_name , variable ) is not variable : <nl> + # Naming conflict . We ' ve tried to initialize two variables with the <nl> + # same value from the checkpoint . <nl> + if delayed_restoration . map_func_is_user : <nl> + raise ValueError ( <nl> + _restore_custom_map_func_error_message ( <nl> + mapped_name = checkpoint_name , <nl> + first_variable = delayed_restoration . restored_variables [ <nl> + checkpoint_name ] , <nl> + second_variable = variable , <nl> + network_name = delayed_restoration . network_name , <nl> + network_scope_name = delayed_restoration . network_scope_name ) ) <nl> + else : <nl> + raise ValueError ( <nl> + _default_naming_conflict_error_message ( <nl> + mapped_name = checkpoint_name , <nl> + first_variable = delayed_restoration . restored_variables [ <nl> + checkpoint_name ] , <nl> + second_variable = variable , <nl> + network_name = delayed_restoration . network_name , <nl> + network_scope_name = delayed_restoration . network_scope_name ) ) <nl> + return variable <nl> + return _custom_getter , deferred_restorations <nl> <nl> <nl> class Network ( base . Layer ) : <nl> class Network ( base . Layer ) : <nl> TODO ( josh11b , ashankar ) : <nl> - Should " trainable " be changeable on the Network object ? <nl> - Do we allow add_variable in Network ? <nl> - - Layer . name and Layer . variables . names are not in sync today <nl> - d = tf . layers . Dense ( 1 ) <nl> - d ( tf . constant ( [ [ 1 . ] ] ) ) <nl> - print ( d . name ) <nl> - print ( d . variables ) <nl> - - Note that name provided to __init__ is only for error messages ? <nl> - Detect layers used in __call__ that weren ' t registered with track_layer . <nl> - Convert inputs to __call__ to tensors . <nl> - Prevent variables from being created after the first __call__ ? <nl> ( Think about restoring from a checkpoint ) . <nl> - - Save & restore <nl> " " " <nl> <nl> def __init__ ( self , name = None ) : <nl> + if isinstance ( name , variable_scope . VariableScope ) : <nl> + raise ValueError ( " VariableScopes are not valid Network names . " ) <nl> + if name is not None and " / " in name : <nl> + raise ValueError ( <nl> + " Forward slashes ( ' / ' ) are not allowed in Network names . " ) <nl> super ( Network , self ) . __init__ ( name = name ) <nl> - self . _container = uuid . uuid4 ( ) . hex <nl> - self . _layers = collections . OrderedDict ( ) <nl> + self . _layers = [ ] <nl> + self . _sub_layer_name_uids = collections . defaultdict ( int ) <nl> + # Initially None , but set to False for networks which are first built as <nl> + # top - level . <nl> + self . _first_parent = None # A weak reference to our first parent . <nl> + self . _non_network_sublayers = [ ] <nl> + self . _owned_layers = { } <nl> + # The scope to use if we end up without a parent . <nl> + self . _default_parent_variable_scope = variable_scope . get_variable_scope ( ) <nl> + self . _custom_getter , self . _deferred_restorations = ( <nl> + _make_custom_getter_for_deferred_restorations ( ) ) <nl> + <nl> + def _init_set_name ( self , name ) : <nl> + # Anonymous Networks ( name = None ) defer setting a final name until they are <nl> + # ( 1 ) added to another Network , or ( 2 ) built / called ( where ( 2 ) is only used <nl> + # for a " top level " network ) . <nl> + # <nl> + # However , if we were provided an explicit name ( name is not None ) , that <nl> + # will always be the final name of the Network ; if it turns out not to be <nl> + # unique or if variable names can ' t be prefixed by it we will throw an <nl> + # error . <nl> + self . _name = name <nl> + self . _base_name = None <nl> + <nl> + def _finalize_name ( self , parent_network ) : <nl> + if not self . _name : <nl> + if not parent_network : <nl> + name_uid_map = base . _get_default_graph_uid_map ( ) <nl> + else : <nl> + name_uid_map = parent_network . _sub_layer_name_uids <nl> + # Were were not passed a name explicitly ( or it was blank ) , so this is an <nl> + # anonymous Network . We make up a unique name . <nl> + if parent_network : <nl> + avoid_names = parent_network . _owned_layers <nl> + else : <nl> + avoid_names = None <nl> + self . _name , self . _base_name = self . _make_unique_name ( <nl> + name_uid_map = name_uid_map , avoid_names = avoid_names ) <nl> + if self . _first_parent is None or ( self . _first_parent # False = no parent <nl> + and self . _first_parent ( ) is None ) : <nl> + # Save a pointer to the parent Network so that we can later check that the <nl> + # scope name we get is correct . <nl> + if not parent_network : <nl> + self . _first_parent = parent_network <nl> + else : <nl> + self . _first_parent = weakref . ref ( parent_network ) <nl> + <nl> + def _set_scope ( self , scope = None ) : <nl> + if self . _scope is None : <nl> + if not self . _first_parent : <nl> + first_parent = self . _first_parent <nl> + else : <nl> + first_parent = self . _first_parent ( ) <nl> + if first_parent is None : <nl> + # If we were never added to another Network , or that Network has beed <nl> + # garbage collected before being called , then we ' re a top - level Network . <nl> + self . _finalize_name ( <nl> + # Use False to make sure the value sticks and we don ' t inherit a <nl> + # parent if we ' re added to a network later . <nl> + parent_network = False ) <nl> + if scope is not None : <nl> + raise ValueError ( " Networks may not be created with explicit scopes . " ) <nl> + if first_parent : <nl> + first_parent . _set_scope ( ) <nl> + parent_scope = first_parent . _scope <nl> + else : <nl> + parent_scope = self . _default_parent_variable_scope <nl> + with variable_scope . variable_scope ( parent_scope ) : <nl> + # Make sure variables with this prefix will be unique . <nl> + with variable_scope . variable_scope ( <nl> + None , use_resource = True , default_name = self . _name ) as scope : <nl> + self . _scope = scope <nl> + scope_name = scope . name <nl> + suffix_start = scope_name . rfind ( " / " ) + 1 <nl> + # rfind is - 1 if there is no slash in the string , in which case the <nl> + # suffix starts at the beginning of the string ( there is no prefix ) . <nl> + scope_suffix = scope_name [ suffix_start : ] <nl> + scope_prefix = scope_name [ : suffix_start ] <nl> + if scope_suffix ! = self . _name : <nl> + raise ValueError ( <nl> + ( " A Network named ' % s ' already exists ( or a variable_scope was " <nl> + " created with this name ) . Names must be unique . " ) % ( <nl> + self . _name , ) ) <nl> + if ( first_parent <nl> + and scope_prefix [ : - 1 ] ! = first_parent . _scope . name ) : <nl> + raise ValueError ( <nl> + ( " Network variable names must match a nesting of sub - Network " <nl> + " names . Expected prefix ' % s ' from parent network , but got " <nl> + " ' % s ' when attempting to create a variable_scope for Network " <nl> + " ' % s ' . Likely an explicit variable_scope was inserted into " <nl> + " the nesting . " ) % ( <nl> + first_parent . _scope . name , <nl> + scope_prefix [ : - 1 ] , <nl> + self . _name ) ) <nl> + elif not first_parent and scope_prefix : <nl> + # For the case when this Network is not nested inside any other <nl> + # Network , but is in a variable_scope . This is an error for now . <nl> + raise ValueError ( <nl> + " Creating Networks inside named variable_scopes is currently " <nl> + " not supported ( to ensure that variable names match the names " <nl> + " of Networks in which they were first created ) . To set " <nl> + " options , try ` with tf . variable_scope ( ' ' ) : ` . If this " <nl> + " limitation bothers you , please file a feature request . " ) <nl> + for non_network_sublayer in self . _non_network_sublayers : <nl> + self . _set_scope_for_nonnetwork_sublayer ( non_network_sublayer ) <nl> + <nl> + def _set_scope_for_nonnetwork_sublayer ( self , sublayer ) : <nl> + if sublayer . _scope is None : <nl> + if sublayer . _first_parent is None : <nl> + constituent_first_parent = None <nl> + else : <nl> + constituent_first_parent = sublayer . _first_parent ( ) <nl> + if constituent_first_parent : <nl> + constituent_first_parent . _set_scope ( ) <nl> + parent_scope = constituent_first_parent . _scope <nl> + else : <nl> + self . _finalize_name ( False ) <nl> + raise ValueError ( <nl> + ( " The parent of a Layer added to Network % s was garbage collected " <nl> + " before the Layer was built . If this limitation bothers you " <nl> + " please , file a feature request . " ) % ( self . name , ) ) <nl> + with variable_scope . variable_scope ( parent_scope ) : <nl> + # Horrid hack to make Layer variable names which are direct <nl> + # sub - layers of Networks conform to the Network variable naming <nl> + # conventions . <nl> + with variable_scope . variable_scope ( <nl> + None , use_resource = True , <nl> + default_name = sublayer . name ) as sub_scope : <nl> + sublayer . _scope = sub_scope <nl> + <nl> + @ base . Layer . name . getter <nl> + def name ( self ) : <nl> + if self . _name is None : <nl> + raise ValueError ( <nl> + " The network does not yet have a final name , but a name was " <nl> + " requested for it . Networks get a name when they are added to " <nl> + " another Network via track_layer , or when they are first " <nl> + " called / built . " ) <nl> + return self . _name <nl> <nl> def track_layer ( self , layer ) : <nl> " " " Track a Layer in this Network . <nl> def track_layer ( self , layer ) : <nl> raise TypeError ( <nl> " Network . track_layer ( ) passed type % s , not a tf . layers . Layer " % <nl> ( type ( layer ) , ) ) <nl> - if layer . name in self . _layers : <nl> - if self . _layers [ layer . name ] is layer : <nl> - return layer <nl> - raise ValueError ( <nl> - " Attempt to add two Layers with the name ' % s ' to the same Network " <nl> - " ' % s ' " % ( layer . name , self . name ) ) <nl> - self . _layers [ layer . name ] = layer <nl> + if isinstance ( layer , Network ) : <nl> + layer . _finalize_name ( parent_network = self ) <nl> + else : <nl> + # ` layer ` is a non - Network , so it hasn ' t been named to follow Network <nl> + # conventions for contained Layers ( i . e . the same conventions as for <nl> + # sub - Networks ) . This renaming is necessary to isolate Network variable <nl> + # naming from Layers constructed outside the Network and never added to it <nl> + # ( because Layers are named globally ) . <nl> + if not layer . built : <nl> + if not hasattr ( layer , " _first_parent " ) : <nl> + dereferenced_layer_first_parent = None <nl> + else : <nl> + dereferenced_layer_first_parent = layer . _first_parent ( ) <nl> + if dereferenced_layer_first_parent is None : <nl> + if layer . _name ! = layer . _base_name : <nl> + # If name and base_name do not match , then this Layer used anonymous <nl> + # naming and we have to rename it . Otherwise there ' s an explicit <nl> + # name , and we should respect it ( subject to error checking ) . <nl> + layer . _name , layer . _base_name = layer . _make_unique_name ( <nl> + name_uid_map = self . _sub_layer_name_uids , <nl> + avoid_names = self . _owned_layers ) <nl> + layer . _first_parent = weakref . ref ( self ) <nl> + self . _non_network_sublayers . append ( layer ) <nl> + if ( not layer . built <nl> + and layer . _first_parent <nl> + and self is layer . _first_parent ( ) ) : <nl> + if layer . name in self . _owned_layers : <nl> + if self . _owned_layers [ layer . name ] is layer : <nl> + return layer <nl> + raise ValueError ( <nl> + " Attempt to add two Layers with the name ' % s ' to the same Network . " <nl> + % ( layer . name ) ) <nl> + self . _owned_layers [ layer . name ] = layer <nl> + self . _layers . append ( layer ) <nl> return layer <nl> <nl> def get_layer ( self , name = None , index = None ) : <nl> " " " Get a contained ` tf . layers . Layer ` either by name or index . <nl> <nl> Args : <nl> - name : String matching one of the names of a contained ` Layer ` . <nl> + name : String matching one of the names of a contained ` Layer ` . Note that <nl> + the names of ` Layer ` s added to ` Network ` s may not be unique when doing <nl> + layer sharing ( i . e . adding a ` Layer ` to this ` Network ` which was already <nl> + added to another ` Network ` ) . The lowest index ` Layer ` with a matching <nl> + name will be returned . <nl> index : Integer in [ 0 , number of layers ) . Layers are assigned an index <nl> by the order they are added . <nl> <nl> def get_layer ( self , name = None , index = None ) : <nl> A ` tf . layers . Layer ` object . <nl> <nl> Raises : <nl> - ValueError : If neither or both of ' index ' or ' name ' is specified . <nl> + ValueError : If neither or both of ' index ' or ' name ' is specified , or the <nl> + lookup failed . <nl> " " " <nl> if index is not None : <nl> if name is not None : <nl> raise ValueError ( " Exactly one of ' index ' or ' name ' must be provided " ) <nl> if len ( self . _layers ) < = index : <nl> - raise ValueError ( " Was asked to retrieve layer at index " + <nl> - str ( index ) + " but model only has " + str ( <nl> - len ( self . _layers ) ) + " layers . " ) <nl> - return list ( self . _layers . values ( ) ) [ index ] <nl> - if name is None : <nl> - raise ValueError ( " Exactly one of ' index ' or ' name ' must be provided " ) <nl> - return self . _layers [ index ] <nl> + raise ValueError ( " Was asked to retrieve layer at index " + str ( index ) + <nl> + " but model only has " + str ( len ( self . _layers ) ) + <nl> + " layers . " ) <nl> + else : <nl> + return self . _layers [ index ] <nl> + else : <nl> + if not name : <nl> + raise ValueError ( " Provide either a layer name or layer index . " ) <nl> + for layer in self . _layers : <nl> + if layer . name = = name : <nl> + return layer <nl> + raise ValueError ( " No such layer : " + name ) <nl> <nl> # The following methods are for implementing the Layer interface . <nl> <nl> def weights ( self ) : <nl> # variables in the case of shared layers / variables that appear in <nl> # multiple places in the Network ? <nl> weights = [ ] <nl> - for layer in six . itervalues ( self . _layers ) : <nl> + for layer in self . _layers : <nl> weights + = layer . weights <nl> return weights <nl> <nl> @ property <nl> def trainable_weights ( self ) : <nl> weights = [ ] <nl> - for layer in six . itervalues ( self . _layers ) : <nl> + for layer in self . _layers : <nl> weights + = layer . trainable_weights <nl> return weights <nl> <nl> @ property <nl> def non_trainable_weights ( self ) : <nl> weights = [ ] <nl> - for layer in six . itervalues ( self . _layers ) : <nl> + for layer in self . _layers : <nl> weights + = layer . non_trainable_weights <nl> return weights <nl> <nl> def trainable ( self , value ) : <nl> <nl> @ property <nl> def layers ( self ) : <nl> - return self . _layers . values ( ) <nl> + return self . _layers <nl> <nl> def add_variable ( self , name , shape , dtype = None , initializer = None , <nl> regularizer = None , trainable = True , constraint = None ) : <nl> def add_variable ( self , name , shape , dtype = None , initializer = None , <nl> " at https : / / github . com / tensorflow / tensorflow / issues / new if this is " <nl> " important to you " ) <nl> <nl> - def __call__ ( self , inputs , * args , * * kwargs ) : <nl> - # TODO ( josh11b , ashankar , agarwal ) : Can we reduce the number of context <nl> - # managers here and / or move some of the work into the constructor <nl> - # for performance reasons ? <nl> - with ops . container ( self . _container ) : <nl> - with variable_scope . variable_scope ( variable_scope . get_variable_scope ( ) , <nl> - use_resource = True ) : <nl> - return super ( Network , self ) . __call__ ( inputs , * args , * * kwargs ) <nl> + def _strip_variable_prefix ( self , original_variable_name ) : <nl> + " " " The default map_func for saving or restoring variables . <nl> + <nl> + Strips the variable prefix for the Network on which save / restore was called , <nl> + and leaves other variable names fully qualified in the checkpoint . <nl> + <nl> + Args : <nl> + original_variable_name : The _shared_name of the variable ( no : 0 <nl> + suffix ) to map . <nl> + Returns : <nl> + The checkpoint name of the variable . <nl> + " " " <nl> + scope_name_with_slash = self . scope_name + " / " <nl> + if original_variable_name . startswith ( scope_name_with_slash ) : <nl> + return original_variable_name [ len ( scope_name_with_slash ) : ] <nl> + else : <nl> + return original_variable_name <nl> + <nl> + def save ( self , save_path , global_step = None , map_func = None ) : <nl> + " " " Save variables from the Network to a checkpoint . <nl> + <nl> + Args : <nl> + save_path : Either a checkpoint prefix or the name of a directory to save <nl> + the checkpoint in ( in which case the checkpoint will be named based on <nl> + the Network name ) . <nl> + global_step : The global step to use when naming the checkpoint . If None <nl> + ( default ) , we will first try to get the default global step . If that <nl> + fails because no default global step exists , then the checkpoint is <nl> + created without a global step suffix . <nl> + map_func : A function mapping fully qualified variable names <nl> + ( e . g . ' my_network_1 / dense_1 / kernel ' ) to names in the checkpoint . By <nl> + default ( if ` map_func = None ` ) , the variable prefix for the network being <nl> + restored ( ` Network . scope_name + ' / ' ` , e . g . ' my_network_1 / ' ) is stripped <nl> + and all other variable names ( shared with other Networks ) are left <nl> + unchanged . <nl> + Returns : <nl> + The checkpoint prefix for the saved checkpoint , which may be passed to <nl> + ` Network . restore ` . <nl> + Raises : <nl> + ValueError : If the Network has not yet been called , or if map_func results <nl> + in a name collision . <nl> + " " " <nl> + if not self . built : <nl> + raise ValueError ( <nl> + " Attempt to save the Network before it was first called . This means " <nl> + " variables have not yet been created , so there is nothing to save . " ) <nl> + self . _set_scope ( ) # scope_name should be available to map_funcs <nl> + if global_step is None : <nl> + global_step = training_util . get_global_step ( ) <nl> + if os . path . isdir ( save_path ) : <nl> + # If we were passed a directory , default to naming based on the Network <nl> + # name . <nl> + save_path = os . path . join ( save_path , self . name ) <nl> + user_map_func = map_func <nl> + if map_func is None : <nl> + map_func = self . _strip_variable_prefix <nl> + variable_map = { } <nl> + for variable in self . variables : <nl> + mapped_name = map_func ( variable . _shared_name ) <nl> + if variable_map . setdefault ( mapped_name , variable ) is not variable : <nl> + if user_map_func is None : <nl> + # Instead of erroring out , we could just re - try and silently use the <nl> + # full variable names in the checkpoint . This could be odd for deeply <nl> + # nested sub - Networks ( since the full prefix from the nesting would <nl> + # get added ) , so for now we ' ll let the user deal with this case . <nl> + raise ValueError ( _default_naming_conflict_error_message ( <nl> + mapped_name = mapped_name , <nl> + first_variable = variable_map [ mapped_name ] , <nl> + second_variable = variable , <nl> + network_name = self . name , <nl> + network_scope_name = self . scope_name ) ) <nl> + else : <nl> + # The user passed their own problematic map_func . <nl> + raise ValueError ( <nl> + ( " The map_func passed to Network . save for the Network ' % s ' " <nl> + " resulted in two variables named ' % s ' ( ' % s ' and ' % s ' ) . Try " <nl> + " stripping less from the variable names , or renaming parts of " <nl> + " the Network . For reference , variables created by sub - Layers of " <nl> + " this Network are prefixed with ' % s ' , but if they are re - used " <nl> + " after being added to another Network , they will have that " <nl> + " Network ' s full variable prefix instead . " ) % ( <nl> + self . name , mapped_name , <nl> + variable_map [ mapped_name ] . _shared_name , <nl> + variable . _shared_name , <nl> + self . scope_name ) ) <nl> + if context . in_eager_mode ( ) : <nl> + sess = None <nl> + else : <nl> + sess = ops . get_default_session ( ) <nl> + return saver_lib . Saver ( variable_map ) . save ( <nl> + sess = sess , save_path = save_path , write_meta_graph = False , <nl> + global_step = global_step ) <nl> + <nl> + def _restore_existing_variables ( self , save_path , map_func , user_map_func ) : <nl> + " " " Use a standard Saver to restore existing variables from a checkpoint . <nl> + <nl> + Args : <nl> + save_path : The checkpoint prefix or directory to read from . <nl> + map_func : The function to use when mapping from variable names to <nl> + checkpoint names . <nl> + user_map_func : The original map_func passed by the user , for error <nl> + checking . <nl> + Returns : <nl> + A dictionary mapping from checkpoint names to variable objects which have <nl> + been restored ( for bookkeeping to avoid deferred restorations on these <nl> + variables ) . <nl> + Raises : <nl> + ValueError : If there is a name collision . <nl> + " " " <nl> + existing_variables_by_checkpoint_name = { } <nl> + for variable in self . variables : <nl> + checkpoint_name = map_func ( variable . _shared_name ) <nl> + if existing_variables_by_checkpoint_name . setdefault ( <nl> + checkpoint_name , variable ) is not variable : <nl> + if user_map_func is None : <nl> + raise ValueError ( _default_naming_conflict_error_message ( <nl> + mapped_name = checkpoint_name , <nl> + first_variable = existing_variables_by_checkpoint_name [ <nl> + checkpoint_name ] , <nl> + second_variable = variable , <nl> + network_name = self . name , <nl> + network_scope_name = self . scope_name ) ) <nl> + else : <nl> + raise ValueError ( _restore_custom_map_func_error_message ( <nl> + mapped_name = checkpoint_name , <nl> + first_variable = existing_variables_by_checkpoint_name [ <nl> + checkpoint_name ] , <nl> + second_variable = variable , <nl> + network_name = self . name , <nl> + network_scope_name = self . scope_name ) ) <nl> + if existing_variables_by_checkpoint_name : <nl> + if context . in_eager_mode ( ) : <nl> + sess = None <nl> + else : <nl> + sess = ops . get_default_session ( ) <nl> + saver_lib . Saver ( var_list = existing_variables_by_checkpoint_name ) . restore ( <nl> + sess = sess , save_path = save_path ) <nl> + return existing_variables_by_checkpoint_name <nl> + <nl> + def _set_restore_on_create ( self , save_path , map_func , user_map_func , <nl> + existing_variables_by_checkpoint_name ) : <nl> + " " " If necessary , request deferred restorations of variables . " " " <nl> + checkpoint_reader = checkpoint_utils . load_checkpoint ( save_path ) <nl> + checkpointed_variables_to_restore = { } <nl> + for checkpoint_name , _ in checkpoint_utils . list_variables ( save_path ) : <nl> + if checkpoint_name in existing_variables_by_checkpoint_name : <nl> + # This variable was already created and restored . <nl> + continue <nl> + # Save the variable for later restoration in a custom getter . <nl> + checkpointed_variables_to_restore [ checkpoint_name ] = ( <nl> + checkpoint_reader . get_tensor ( checkpoint_name ) ) <nl> + # Only set a deferred restoration if there are checkpoint variables which <nl> + # have not been assigned to existing variables . Note that this loses out on <nl> + # some opportunity for error checking , but avoids creating <nl> + # _DeferredRestoration objects once a Network has been built ( so that <nl> + # restoring in a loop does not take increasing amounts of memory ) . <nl> + if checkpointed_variables_to_restore : <nl> + if context . in_eager_mode ( ) : <nl> + sess = None <nl> + else : <nl> + sess = ops . get_default_session ( ) <nl> + # We need a name for error messages . If we haven ' t been added to another <nl> + # Network yet , we ' re top - level . <nl> + self . _finalize_name ( False ) <nl> + self . _set_scope ( ) <nl> + # Save a record of this restoration for use in the custom getter . <nl> + deferred_restoration = _DeferredRestoration ( <nl> + map_func = map_func , <nl> + map_func_is_user = ( user_map_func is not None ) , <nl> + checkpointed_variables_to_restore = checkpointed_variables_to_restore , <nl> + restored_variables = { } , <nl> + session = sess , <nl> + network_name = self . name , <nl> + network_scope_name = self . scope_name ) <nl> + self . _deferred_restorations . append ( deferred_restoration ) <nl> + # Add the deferred registration to non - Network children , and request that <nl> + # Networks propagate the request to their children . <nl> + self . _add_deferred_restoration ( deferred_restoration ) <nl> + <nl> + def _add_deferred_restoration ( self , deferred_restoration ) : <nl> + " " " Add a deferred restoration to this Network and all children . <nl> + <nl> + Restorations which are requested later have higher priority , and the highest <nl> + priority matching restoration is applied to a variable when it is created . <nl> + <nl> + Args : <nl> + deferred_restoration : A _DeferredRestoration object . <nl> + " " " <nl> + # Networks don ' t create variables at the moment , so this append isn ' t <nl> + # strictly necessary . We could get by with only adding deferred restorations <nl> + # to non - Network Layers . <nl> + self . _set_scope ( ) <nl> + # We use set_custom_getter because it avoids recursively calling up the <nl> + # variable_scope tree . We ' ve done the tree traversal ourselves and have <nl> + # added the request to each Layer which needs it . <nl> + self . _scope . set_custom_getter ( self . _custom_getter ) <nl> + self . _deferred_restorations . append ( deferred_restoration ) <nl> + for layer in self . layers : <nl> + if isinstance ( layer , Network ) : <nl> + # For Networks , request that they propagate this deferred restoration <nl> + # to all of their children recursively . <nl> + layer . _add_deferred_restoration ( deferred_restoration ) <nl> + else : <nl> + # For non - Network Layers , make sure they have a deferred restoration <nl> + # queue and a custom getter , then add our request to it . <nl> + if not hasattr ( layer , " _custom_getter " ) : <nl> + assert not hasattr ( layer , " _deferred_restorations " ) <nl> + layer . _custom_getter , layer . _deferred_restorations = ( <nl> + _make_custom_getter_for_deferred_restorations ( ) ) <nl> + self . _set_scope_for_nonnetwork_sublayer ( layer ) <nl> + layer . _scope . set_custom_getter ( layer . _custom_getter ) <nl> + layer . _deferred_restorations . append ( deferred_restoration ) <nl> + <nl> + def restore ( self , save_path , map_func = None ) : <nl> + " " " Restore the Network from a checkpoint . <nl> + <nl> + If variables have already been created ( typically when some or all of the <nl> + ` Network ` is built ) , they are assigned values from the checkpoint <nl> + immediately , overwriting any existing values ( in graph mode the default <nl> + session is used for the assignments ) . <nl> + <nl> + If there are checkpoint entries which do not correspond to any existing <nl> + variables in the ` Network ` , these values are saved for deferred restoration ; <nl> + their initial values will be the checkpointed values once they are <nl> + created . Requests for multiple deferred restorations behave the same way as <nl> + immediate restorations , in that later requests will take priority over <nl> + earlier requests relevant to the same variable . <nl> + <nl> + If this ` Network ` shares ` Layer ` s with another network , those ` Layer ` s will <nl> + also have their variables restored from the checkpoint . <nl> + <nl> + Args : <nl> + save_path : The return value of ` Network . save ` , or a directory to search <nl> + for a checkpoint . <nl> + map_func : A function mapping fully qualified variable names <nl> + ( e . g . ' my_network_1 / dense_1 / kernel ' ) to names in the checkpoint . By <nl> + default ( if ` map_func = None ` ) , the variable prefix for the network being <nl> + restored ( ` Network . scope_name + ' / ' ` , e . g . ' my_network_1 / ' ) is stripped <nl> + and all other variable names ( shared with other Networks ) are left <nl> + unchanged . Note that this is the _same_ map_func as ` Network . save ` , not <nl> + an inverse mapping . <nl> + " " " <nl> + self . _finalize_name ( parent_network = False ) <nl> + self . _set_scope ( ) # scope_name should be available to map_funcs <nl> + if os . path . isdir ( save_path ) : <nl> + # If we don ' t have a name yet , set no parent . <nl> + save_path = os . path . join ( save_path , self . name ) <nl> + user_map_func = map_func <nl> + if map_func is None : <nl> + map_func = self . _strip_variable_prefix <nl> + # Step one is to restore any existing variables from the checkpoint . <nl> + existing_variables_by_checkpoint_name = self . _restore_existing_variables ( <nl> + save_path = save_path , <nl> + map_func = map_func , <nl> + user_map_func = user_map_func ) <nl> + # Step two is to set a custom getter which restores variables on creation , <nl> + # for those variables which have not been added to sub - Layers yet . <nl> + self . _set_restore_on_create ( <nl> + save_path = save_path , <nl> + map_func = map_func , <nl> + user_map_func = user_map_func , <nl> + existing_variables_by_checkpoint_name = ( <nl> + existing_variables_by_checkpoint_name ) ) <nl> <nl> # TODO ( josh11b ) : Support other Layer methods needed for graph mode , such as for <nl> # losses and updates <nl> mmm a / tensorflow / contrib / eager / python / network_test . py <nl> ppp b / tensorflow / contrib / eager / python / network_test . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> + import gc <nl> + <nl> from tensorflow . contrib . eager . python import network <nl> from tensorflow . python . eager import test <nl> from tensorflow . python . framework import constant_op <nl> + from tensorflow . python . framework import errors_impl <nl> + from tensorflow . python . framework import test_util <nl> from tensorflow . python . layers import core <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import nn_ops <nl> + from tensorflow . python . ops import resource_variable_ops <nl> + from tensorflow . python . ops import variable_scope <nl> + from tensorflow . python . training import training_util <nl> <nl> <nl> # pylint : disable = not - callable <nl> class MyNetwork ( network . Network ) : <nl> <nl> - def __init__ ( self ) : <nl> - super ( MyNetwork , self ) . __init__ ( name = " abcd " ) <nl> + def __init__ ( self , name = None ) : <nl> + super ( MyNetwork , self ) . __init__ ( name = name ) <nl> self . l1 = self . track_layer ( core . Dense ( 1 , use_bias = False ) ) <nl> <nl> def call ( self , x ) : <nl> def call ( self , x ) : <nl> <nl> class NetworkTest ( test . TestCase ) : <nl> <nl> + def _save_modify_load_network_built ( self , net , global_step = None ) : <nl> + checkpoint_directory = self . get_temp_dir ( ) <nl> + checkpoint_path = net . save ( <nl> + save_path = checkpoint_directory , global_step = global_step ) <nl> + input_value = constant_op . constant ( [ [ 42 . 0 ] ] ) <nl> + original_output = self . evaluate ( net ( input_value ) ) <nl> + for var in net . variables : <nl> + self . evaluate ( var . assign ( var + 1 . ) ) <nl> + self . assertGreater ( <nl> + self . evaluate ( net ( input_value ) ) , <nl> + original_output ) <nl> + # Either the returned explicit checkpoint path or the directory should work . <nl> + net . restore ( save_path = checkpoint_directory ) <nl> + self . assertAllEqual ( <nl> + original_output , <nl> + self . evaluate ( net ( input_value ) ) ) <nl> + for var in net . variables : <nl> + self . evaluate ( var . assign ( var + 2 . ) ) <nl> + net . restore ( save_path = checkpoint_path ) <nl> + self . assertAllEqual ( <nl> + original_output , <nl> + self . evaluate ( net ( input_value ) ) ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTrainableAttribute ( self ) : <nl> net = network . Network ( ) <nl> self . assertTrue ( net . trainable ) <nl> def testTrainableAttribute ( self ) : <nl> net . trainable = False <nl> self . assertTrue ( net . trainable ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testNetworkCall ( self ) : <nl> - net = MyNetwork ( ) <nl> + net = MyNetwork ( name = " abcd " ) <nl> net ( constant_op . constant ( [ [ 2 . 0 ] ] ) ) # Force variables to be created . <nl> self . assertEqual ( 1 , len ( net . trainable_variables ) ) <nl> - net . trainable_variables [ 0 ] . assign ( [ [ 17 . 0 ] ] ) <nl> + self . evaluate ( net . trainable_variables [ 0 ] . assign ( [ [ 17 . 0 ] ] ) ) <nl> # TODO ( josh11b ) : Support passing Python values to networks . <nl> result = net ( constant_op . constant ( [ [ 2 . 0 ] ] ) ) <nl> - self . assertEqual ( 34 . 0 , result . numpy ( ) ) <nl> + self . assertEqual ( 34 . 0 , self . evaluate ( result ) ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testNetworkSaveRestoreAlreadyBuilt ( self ) : <nl> + net = MyNetwork ( name = " abcd " ) <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , " Attempt to save the Network before it was first called " ) : <nl> + net . save ( self . get_temp_dir ( ) ) <nl> + net ( constant_op . constant ( [ [ 2 . 0 ] ] ) ) <nl> + self . evaluate ( net . trainable_variables [ 0 ] . assign ( [ [ 17 . 0 ] ] ) ) <nl> + self . _save_modify_load_network_built ( net , global_step = None ) <nl> + self . _save_modify_load_network_built ( net , global_step = 10 ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testSaveRestoreDefaultGlobalStep ( self ) : <nl> + net = MyNetwork ( name = " abcd " ) <nl> + net ( constant_op . constant ( [ [ 2 . 0 ] ] ) ) <nl> + self . evaluate ( net . variables [ 0 ] . assign ( [ [ 3 . ] ] ) ) <nl> + default_global_step = training_util . get_or_create_global_step ( ) <nl> + self . evaluate ( default_global_step . assign ( 4242 ) ) <nl> + save_path = net . save ( self . get_temp_dir ( ) ) <nl> + self . assertIn ( " abcd - 4242 " , save_path ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testNetworkSaveAndRestoreIntoUnbuilt ( self ) : <nl> + save_dir = self . get_temp_dir ( ) <nl> + net1 = MyNetwork ( ) <nl> + test_input = constant_op . constant ( [ [ 2 . 0 ] ] ) <nl> + net1 ( test_input ) <nl> + self . evaluate ( net1 . trainable_variables [ 0 ] . assign ( [ [ 17 . 0 ] ] ) ) <nl> + save_path = net1 . save ( save_dir ) <nl> + # With a pre - build restore we should have the same value . <nl> + net2 = MyNetwork ( ) <nl> + net2 . restore ( save_path ) <nl> + self . assertAllEqual ( self . evaluate ( net1 ( test_input ) ) , <nl> + self . evaluate ( net2 ( test_input ) ) ) <nl> + self . assertIsNot ( net1 . variables [ 0 ] , net2 . variables [ 0 ] ) <nl> + self . assertAllEqual ( self . evaluate ( net1 . variables [ 0 ] ) , <nl> + self . evaluate ( net2 . variables [ 0 ] ) ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testLoadIntoUnbuiltSharedLayer ( self ) : <nl> + <nl> + class Owner ( network . Network ) : <nl> + <nl> + def __init__ ( self , name = None ) : <nl> + super ( Owner , self ) . __init__ ( name = name ) <nl> + self . first = self . track_layer ( core . Dense ( <nl> + 1 , name = " first_layer " , use_bias = False ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . first ( x ) <nl> + <nl> + first_owner = Owner ( ) <nl> + <nl> + class User ( network . Network ) : <nl> + <nl> + def __init__ ( self , use_layer , name = None ) : <nl> + super ( User , self ) . __init__ ( name = name ) <nl> + self . first = self . track_layer ( use_layer ) <nl> + self . second = self . track_layer ( core . Dense ( <nl> + 1 , name = " second_layer " , use_bias = False ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + class LikeUserButNotSharing ( network . Network ) : <nl> + <nl> + def __init__ ( self , name = None ) : <nl> + super ( LikeUserButNotSharing , self ) . __init__ ( name = name ) <nl> + self . first = self . track_layer ( core . Dense ( <nl> + 1 , name = " first_layer " , use_bias = False ) ) <nl> + self . second = self . track_layer ( core . Dense ( <nl> + 1 , name = " second_layer " , use_bias = False ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + checkpoint_creator = LikeUserButNotSharing ( name = " checkpoint_creator " ) <nl> + one = constant_op . constant ( [ [ 1 . 0 ] ] ) <nl> + checkpoint_creator ( one ) <nl> + self . assertEqual ( 2 , len ( checkpoint_creator . variables ) ) <nl> + self . evaluate ( checkpoint_creator . variables [ 0 ] . assign ( [ [ 5 . ] ] ) ) <nl> + self . evaluate ( checkpoint_creator . variables [ 1 ] . assign ( [ [ 6 . ] ] ) ) <nl> + # Re - map the variable names so that with default restore mapping we ' ll <nl> + # attempt to restore into the unbuilt Layer . <nl> + name_mapping = { <nl> + " checkpoint_creator / first_layer / kernel " : " owner_1 / first_layer / kernel " , <nl> + " checkpoint_creator / second_layer / kernel " : " second_layer / kernel " , <nl> + } <nl> + save_path = checkpoint_creator . save ( <nl> + self . get_temp_dir ( ) , <nl> + map_func = lambda full_name : name_mapping [ full_name ] ) <nl> + load_into = User ( use_layer = first_owner . first ) <nl> + load_into . restore ( save_path ) <nl> + self . assertEqual ( 0 , len ( first_owner . variables ) ) <nl> + self . assertAllEqual ( self . evaluate ( checkpoint_creator ( one ) ) , <nl> + self . evaluate ( load_into ( one ) ) ) <nl> + self . assertEqual ( 1 , len ( first_owner . variables ) ) <nl> + self . assertAllEqual ( [ [ 5 . ] ] , self . evaluate ( load_into . variables [ 0 ] ) ) <nl> + self . assertAllEqual ( [ [ 6 . ] ] , self . evaluate ( load_into . variables [ 1 ] ) ) <nl> + first_owner ( one ) <nl> + self . assertAllEqual ( [ [ 5 . ] ] , self . evaluate ( first_owner . variables [ 0 ] ) ) <nl> + <nl> + # Try again with a garbage collected parent . <nl> + first_owner = Owner ( ) <nl> + load_into = User ( use_layer = first_owner . first ) <nl> + del first_owner <nl> + gc . collect ( ) <nl> + def _restore_map_func ( original_name ) : <nl> + if original_name . startswith ( " owner_1 " ) : <nl> + return original_name . replace ( " owner_1 " , " owner_2 " ) <nl> + else : <nl> + return " user_2 / " + original_name <nl> + with self . assertRaisesRegexp ( ValueError , " garbage collected " ) : <nl> + load_into . restore ( save_path , map_func = _restore_map_func ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testRestoreIntoSubNetwork ( self ) : <nl> + <nl> + class Parent ( network . Network ) : <nl> + <nl> + def __init__ ( self , name = None ) : <nl> + super ( Parent , self ) . __init__ ( name = name ) <nl> + self . first = self . track_layer ( MyNetwork ( ) ) <nl> + self . second = self . track_layer ( MyNetwork ( ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . first ( self . second ( x ) ) <nl> + <nl> + one = constant_op . constant ( [ [ 3 . ] ] ) <nl> + whole_model_saver = Parent ( ) <nl> + whole_model_saver ( one ) <nl> + self . evaluate ( whole_model_saver . variables [ 0 ] . assign ( [ [ 15 . ] ] ) ) <nl> + self . evaluate ( whole_model_saver . variables [ 1 ] . assign ( [ [ 16 . ] ] ) ) <nl> + whole_model_checkpoint = whole_model_saver . save ( self . get_temp_dir ( ) ) <nl> + <nl> + save_from = MyNetwork ( ) <nl> + save_from ( one ) <nl> + self . evaluate ( save_from . variables [ 0 ] . assign ( [ [ 5 . ] ] ) ) <nl> + checkpoint = save_from . save ( self . get_temp_dir ( ) ) <nl> + save_into_parent = Parent ( ) <nl> + save_into_parent . restore ( whole_model_checkpoint ) <nl> + save_into_parent . first . restore ( checkpoint ) <nl> + save_into_parent . first . restore ( checkpoint ) # deferred loading multiple <nl> + # times is fine <nl> + save_into_parent ( one ) # deferred loading <nl> + self . assertAllEqual ( [ [ 5 . ] ] , self . evaluate ( save_into_parent . variables [ 0 ] ) ) <nl> + self . assertAllEqual ( [ [ 16 . ] ] , self . evaluate ( save_into_parent . variables [ 1 ] ) ) <nl> + <nl> + # Try again with the opposite ordering , and we should get different results <nl> + # ( deferred restoration should happen the same way non - deferred happens , <nl> + # with later restorations overwriting older ones ) . <nl> + save_into_parent = Parent ( ) <nl> + save_into_parent . first . restore ( checkpoint ) # deferred loading multiple <nl> + # times is fine <nl> + save_into_parent . restore ( whole_model_checkpoint ) <nl> + save_into_parent ( one ) # deferred loading <nl> + # We ' ve overwritten the sub - Network restore . <nl> + self . assertAllEqual ( [ [ 15 . ] ] , self . evaluate ( save_into_parent . variables [ 0 ] ) ) <nl> + self . assertAllEqual ( [ [ 16 . ] ] , self . evaluate ( save_into_parent . variables [ 1 ] ) ) <nl> + <nl> + self . evaluate ( save_into_parent . variables [ 0 ] . assign ( [ [ 3 . ] ] ) ) <nl> + self . evaluate ( save_into_parent . variables [ 1 ] . assign ( [ [ 4 . ] ] ) ) <nl> + save_into_parent . second . restore ( checkpoint ) <nl> + self . assertAllEqual ( [ [ 5 . ] ] , self . evaluate ( save_into_parent . variables [ 1 ] ) ) <nl> + with self . assertRaisesRegexp ( errors_impl . NotFoundError , <nl> + " not found in checkpoint " ) : <nl> + # The checkpoint is incompatible . <nl> + save_into_parent . restore ( checkpoint ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testCustomMapCollisionErrors ( self ) : <nl> + <nl> + class Parent ( network . Network ) : <nl> + <nl> + def __init__ ( self , name = None ) : <nl> + super ( Parent , self ) . __init__ ( name = name ) <nl> + self . first = self . track_layer ( MyNetwork ( ) ) <nl> + self . second = self . track_layer ( MyNetwork ( ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . first ( self . second ( x ) ) <nl> + <nl> + make_checkpoint = Parent ( ) <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + make_checkpoint ( one ) <nl> + self . evaluate ( make_checkpoint . variables [ 0 ] . assign ( [ [ 2 . ] ] ) ) <nl> + self . evaluate ( make_checkpoint . variables [ 1 ] . assign ( [ [ 3 . ] ] ) ) <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , <nl> + " The map_func passed to Network . save for the Network ' parent_1 ' " <nl> + " resulted in two variables named ' foo ' " ) : <nl> + make_checkpoint . save ( self . get_temp_dir ( ) , map_func = lambda n : " foo " ) <nl> + checkpoint = make_checkpoint . first . save ( <nl> + self . get_temp_dir ( ) , map_func = lambda n : " foo " ) <nl> + loader = Parent ( ) <nl> + loader . restore ( checkpoint , map_func = lambda n : " foo " ) <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , <nl> + ( " The map_func passed to Network . restore for the Network " <nl> + " ' parent_2 ' resulted in two variables named ' foo ' " ) ) : <nl> + loader ( one ) <nl> + loader = Parent ( ) <nl> + loader ( one ) <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , <nl> + ( " The map_func passed to Network . restore for the Network " <nl> + " ' parent_3 ' resulted in two variables named ' foo ' " ) ) : <nl> + loader . restore ( checkpoint , map_func = lambda n : " foo " ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testDefaultMapCollisionErrors ( self ) : <nl> + <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + first = core . Dense ( 1 , name = " dense_1 " , use_bias = False ) <nl> + first ( one ) <nl> + <nl> + class Parent ( network . Network ) : <nl> + <nl> + def __init__ ( self , name = None ) : <nl> + super ( Parent , self ) . __init__ ( name = name ) <nl> + self . first = self . track_layer ( first ) <nl> + self . second = self . track_layer ( core . Dense ( 1 , use_bias = False ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . first ( self . second ( x ) ) <nl> + <nl> + make_checkpoint = Parent ( ) <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + make_checkpoint ( one ) <nl> + self . evaluate ( make_checkpoint . variables [ 0 ] . assign ( [ [ 2 . ] ] ) ) <nl> + self . evaluate ( make_checkpoint . variables [ 1 ] . assign ( [ [ 3 . ] ] ) ) <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , <nl> + ( " The default checkpoint variable name mapping strategy for Network " <nl> + " ' parent_1 ' resulted in a naming conflict . " ) ) : <nl> + make_checkpoint . save ( self . get_temp_dir ( ) ) <nl> <nl> - def testNetworkAsAGraph ( self ) : <nl> - self . skipTest ( " TODO ( ashankar , josh11b ) : FIX THIS " ) <nl> - # Verify that we ' re using ResourceVariables <nl> + class Compatible ( network . Network ) : <nl> <nl> + def __init__ ( self , name = None ) : <nl> + super ( Compatible , self ) . __init__ ( name = name ) <nl> + self . first = self . track_layer ( core . Dense ( 1 , use_bias = False ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . first ( x ) <nl> + <nl> + successful_checkpoint = Compatible ( ) <nl> + successful_checkpoint ( one ) <nl> + self . evaluate ( successful_checkpoint . variables [ 0 ] . assign ( [ [ - 1 . ] ] ) ) <nl> + checkpoint_path = successful_checkpoint . save ( self . get_temp_dir ( ) ) <nl> + load_checkpoint = Parent ( ) <nl> + load_checkpoint ( one ) <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , <nl> + ( " The default checkpoint variable name mapping strategy for Network " <nl> + " ' parent_2 ' resulted in a naming conflict . " ) ) : <nl> + load_checkpoint . restore ( checkpoint_path ) <nl> + <nl> + def testNoReferenceCyclesAfterCall ( self ) : <nl> + <nl> + class ChildNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self , name = None ) : <nl> + super ( ChildNetwork , self ) . __init__ ( name = name ) <nl> + <nl> + def call ( self , x ) : <nl> + return x * 2 . <nl> + <nl> + class ParentNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self , name = None ) : <nl> + super ( ParentNetwork , self ) . __init__ ( name = name ) <nl> + self . l1 = self . track_layer ( ChildNetwork ( ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . l1 ( x ) <nl> + <nl> + one = constant_op . constant ( [ [ 1 . 0 ] ] ) <nl> + gc . disable ( ) <nl> + gc . collect ( ) <nl> + previous_gc_debug_flags = gc . get_debug ( ) <nl> + gc . set_debug ( gc . DEBUG_SAVEALL ) <nl> + preexisting = len ( gc . garbage ) <nl> + net = ParentNetwork ( ) <nl> + net ( one ) <nl> + del net <nl> + gc . collect ( ) <nl> + # There should be no additional garbage requiring collection . <nl> + self . assertEqual ( preexisting , len ( gc . garbage ) ) <nl> + gc . set_debug ( previous_gc_debug_flags ) <nl> + gc . enable ( ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testAnonymousNoNameInitially ( self ) : <nl> + net = MyNetwork ( ) <nl> + with self . assertRaisesRegexp ( ValueError , " does not yet have a final name " ) : <nl> + net . name # pylint : disable = pointless - statement <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testExplicitHasNameInitially ( self ) : <nl> + net = MyNetwork ( name = " abcd " ) <nl> + self . assertEqual ( " abcd " , net . name ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testUsingResourceVariables ( self ) : <nl> + net = MyNetwork ( ) <nl> + net ( constant_op . constant ( [ [ 0 . ] ] ) ) <nl> + self . assertIsInstance ( net . trainable_weights [ 0 ] , <nl> + resource_variable_ops . ResourceVariable ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testDuplicateNameError ( self ) : <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + net = MyNetwork ( name = " foo " ) <nl> + net ( one ) <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , " named ' foo ' already exists " ) : <nl> + net1 = MyNetwork ( name = " foo " ) <nl> + net1 ( one ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testWrappingInVariableScope ( self ) : <nl> + with variable_scope . variable_scope ( " outside_scope " ) : <nl> + net = MyNetwork ( ) <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , <nl> + ( " Creating Networks inside named variable_scopes is currently not " <nl> + " supported " ) ) : <nl> + net ( one ) <nl> + # Alternatively , we could re - name the Network to match the variable_scope : <nl> + # self . assertEqual ( " outside_scope / my_network_1 " , net . name ) <nl> + # self . assertStartsWith ( <nl> + # expected_start = " outside_scope / my_network_1 / dense / " , <nl> + # actual = net . trainable_weights [ 0 ] . name ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testLayerNamesRespected ( self ) : <nl> + class ParentNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( ParentNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( <nl> + core . Dense ( 1 , use_bias = False , name = " explicit_name " ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . first ( x ) <nl> + <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + net = ParentNetwork ( ) <nl> + net ( one ) <nl> + self . assertStartsWith ( expected_start = " parent_network_1 / explicit_name / " , <nl> + actual = net . trainable_weights [ 0 ] . name ) <nl> + self . assertEqual ( " explicit_name " , net . first . name ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testWrappingInAnonymousVariableScope ( self ) : <nl> + # Named outside variable_scopes are not supported at the moment . However , <nl> + # blank - named top level variable scopes do not change variable names , and so <nl> + # can be used to set the properties of Network variables . <nl> + was_called = [ False ] <nl> + def _custom_getter ( getter , * args , * * kwargs ) : <nl> + was_called [ 0 ] = True <nl> + return getter ( * args , * * kwargs ) <nl> + with variable_scope . variable_scope ( " " , custom_getter = _custom_getter ) : <nl> + net = MyNetwork ( ) <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + net ( one ) <nl> + self . assertTrue ( was_called [ 0 ] ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testReasonableSlashError ( self ) : <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , " not allowed in Network names " ) : <nl> + MyNetwork ( name = " slash / slash " ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testNoVariableScopeNames ( self ) : <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , " VariableScopes are not valid Network names " ) : <nl> + with variable_scope . variable_scope ( " some_scope " ) as vs : <nl> + MyNetwork ( name = vs ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testVariableScopeNameCollision ( self ) : <nl> + with variable_scope . variable_scope ( " abcd " ) : <nl> + pass <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , " or a variable_scope was created with this name " ) : <nl> + net = MyNetwork ( name = " abcd " ) <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + net ( one ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testNetworkVariablesDoNotInterfere ( self ) : <nl> - self . skipTest ( " TODO : FIX THIS " ) <nl> + core . Dense ( 1 , use_bias = True ) # Should not interfere with naming . <nl> net1 = MyNetwork ( ) <nl> net2 = MyNetwork ( ) <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + net1 ( one ) <nl> + net2 ( one ) <nl> + # Layer names typically are globally unique rather than being unique within <nl> + # the scope of their first use . However , within a Network they must be named <nl> + # locally so that previous Layer consutrciton does not interfere with <nl> + # variable naming ( e . g . add a Layer construction before the Network , <nl> + # suddenly your previously saved checkpoint is incompatible ) . <nl> + self . assertEqual ( " dense_1 " , net1 . l1 . name ) <nl> + self . assertEqual ( " dense_1 " , net2 . l1 . name ) <nl> + self . evaluate ( net1 . trainable_weights [ 0 ] . assign ( [ [ 1 . ] ] ) ) <nl> + self . evaluate ( net2 . trainable_weights [ 0 ] . assign ( [ [ 2 . ] ] ) ) <nl> + self . assertEqual ( 2 . , self . evaluate ( net2 . trainable_weights [ 0 ] ) ) <nl> + self . assertEqual ( 1 . , self . evaluate ( net1 . trainable_weights [ 0 ] ) ) <nl> + self . assertStartsWith ( expected_start = " my_network_1 / dense_1 / " , <nl> + actual = net1 . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( expected_start = " my_network_2 / dense_1 / " , <nl> + actual = net2 . trainable_weights [ 0 ] . name ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testNestableAnonymous ( self ) : <nl> + <nl> + # The case where no explicit names are specified . We make up unique names , <nl> + # and these should match the variable names . <nl> + class ParentNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( ParentNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( MyNetwork ( ) ) <nl> + self . second = self . track_layer ( MyNetwork ( ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + net = ParentNetwork ( ) <nl> + net ( one ) <nl> + self . assertStartsWith ( expected_start = " parent_network_1 / my_network_1 / dense " , <nl> + actual = net . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( expected_start = " parent_network_1 / my_network_1 / dense " , <nl> + actual = net . first . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( expected_start = " parent_network_1 / my_network_2 / dense " , <nl> + actual = net . trainable_weights [ 1 ] . name ) <nl> + self . assertStartsWith ( expected_start = " parent_network_1 / my_network_2 / dense " , <nl> + actual = net . second . trainable_weights [ 0 ] . name ) <nl> + self . assertEqual ( " parent_network_1 " , net . name ) <nl> + self . assertEqual ( " my_network_1 " , net . first . name ) <nl> + self . assertEqual ( " my_network_2 " , net . second . name ) <nl> + <nl> + net2 = ParentNetwork ( ) <nl> + net2 ( one ) <nl> + self . assertStartsWith ( expected_start = " parent_network_2 / my_network_1 / dense " , <nl> + actual = net2 . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( expected_start = " parent_network_2 / my_network_1 / dense " , <nl> + actual = net2 . first . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( expected_start = " parent_network_2 / my_network_2 / dense " , <nl> + actual = net2 . trainable_weights [ 1 ] . name ) <nl> + self . assertStartsWith ( expected_start = " parent_network_2 / my_network_2 / dense " , <nl> + actual = net2 . second . trainable_weights [ 0 ] . name ) <nl> + self . assertEqual ( " parent_network_2 " , net2 . name ) <nl> + self . assertEqual ( " my_network_1 " , net2 . first . name ) <nl> + self . assertEqual ( " my_network_2 " , net2 . second . name ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testNestableExplicit ( self ) : <nl> + <nl> + # We have explicit network names and everything is globally unique . <nl> + class ParentNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( ParentNetwork , self ) . __init__ ( name = " unique_parent_name " ) <nl> + self . first = self . track_layer ( <nl> + MyNetwork ( name = " first_unique_child_name " ) ) <nl> + self . second = self . track_layer ( <nl> + MyNetwork ( name = " second_unique_child_name " ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + net = ParentNetwork ( ) <nl> + net ( one ) <nl> + self . assertStartsWith ( <nl> + expected_start = " unique_parent_name / first_unique_child_name / dense " , <nl> + actual = net . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " unique_parent_name / second_unique_child_name / dense " , <nl> + actual = net . trainable_weights [ 1 ] . name ) <nl> + self . assertEqual ( " unique_parent_name " , net . name ) <nl> + self . assertEqual ( " first_unique_child_name " , net . first . name ) <nl> + self . assertEqual ( " second_unique_child_name " , net . second . name ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testLayerNetworkNameInteractions ( self ) : <nl> + <nl> + # Same base name as core . Dense ; Networks and non - Network Layers with the <nl> + # same base name should use the same numbering system . <nl> + class Dense ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( Dense , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( core . Dense ( 1 , use_bias = False ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . first ( x ) <nl> + <nl> + class MixedLayerNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( MixedLayerNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( core . Dense ( 1 , use_bias = False ) ) <nl> + self . second = self . track_layer ( core . Dense ( 1 , use_bias = False ) ) <nl> + self . third = self . track_layer ( Dense ( ) ) <nl> + self . fourth = self . track_layer ( core . Dense ( 1 , use_bias = False ) ) <nl> + self . fifth = self . track_layer ( core . Dense ( 1 , use_bias = False ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . fifth ( self . fourth ( self . third ( self . second ( self . first ( x ) ) ) ) ) <nl> + <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + net = MixedLayerNetwork ( ) <nl> + net ( one ) <nl> + self . assertEqual ( " dense_1 " , net . first . name ) <nl> + self . assertEqual ( " dense_2 " , net . second . name ) <nl> + self . assertEqual ( " dense_3 " , net . third . name ) <nl> + self . assertEqual ( " dense_4 " , net . fourth . name ) <nl> + self . assertEqual ( " dense_5 " , net . fifth . name ) <nl> + # Note that this is _not_ the default naming behavior for Layers . Layers <nl> + # which are added to Networks follow Network variable naming conventions <nl> + # ( i . e . variable names = network name unless variable sharing ) . Nested <nl> + # Layers revert to Layer behavior . <nl> + self . assertStartsWith ( expected_start = " mixed_layer_network_1 / dense_1 / " , <nl> + actual = net . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( expected_start = " mixed_layer_network_1 / dense_2 / " , <nl> + actual = net . trainable_weights [ 1 ] . name ) <nl> + self . assertStartsWith ( expected_start = " mixed_layer_network_1 / dense_3 / " , <nl> + actual = net . trainable_weights [ 2 ] . name ) <nl> + self . assertStartsWith ( expected_start = " mixed_layer_network_1 / dense_4 / " , <nl> + actual = net . trainable_weights [ 3 ] . name ) <nl> + self . assertStartsWith ( expected_start = " mixed_layer_network_1 / dense_5 / " , <nl> + actual = net . trainable_weights [ 4 ] . name ) <nl> + self . assertEqual ( " mixed_layer_network_1 " , net . name ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testNestableExplicitCollisions ( self ) : <nl> + <nl> + # We have explicit network names and they are unique within the layer <nl> + # they ' re added to . <nl> + class ParentNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( ParentNetwork , self ) . __init__ ( name = " nonunique_name " ) <nl> + self . first = self . track_layer ( <nl> + MyNetwork ( name = " nonunique_name " ) ) <nl> + self . second = self . track_layer ( <nl> + MyNetwork ( name = " second_unique_child_name " ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + net = ParentNetwork ( ) <nl> + net ( one ) <nl> + self . assertStartsWith ( <nl> + expected_start = " nonunique_name / nonunique_name / dense " , <nl> + actual = net . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " nonunique_name / second_unique_child_name / dense " , <nl> + actual = net . trainable_weights [ 1 ] . name ) <nl> + self . assertEqual ( " nonunique_name " , net . name ) <nl> + self . assertEqual ( " nonunique_name " , net . first . name ) <nl> + self . assertEqual ( " second_unique_child_name " , net . second . name ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testNestableExplicitWithAnonymousParent ( self ) : <nl> + <nl> + # A parent network is instantiated multiple times with explicitly named <nl> + # children . We shouldn ' t throw any name errors . <nl> + class ParentNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( ParentNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( <nl> + MyNetwork ( name = " first_unique_child_name " ) ) <nl> + self . second = self . track_layer ( <nl> + MyNetwork ( name = " second_unique_child_name " ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + net = ParentNetwork ( ) <nl> + net ( one ) <nl> + self . assertStartsWith ( <nl> + expected_start = " parent_network_1 / first_unique_child_name / dense_1 / " , <nl> + actual = net . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " parent_network_1 / second_unique_child_name / dense_1 / " , <nl> + actual = net . trainable_weights [ 1 ] . name ) <nl> + self . assertEqual ( " parent_network_1 " , net . name ) <nl> + self . assertEqual ( " first_unique_child_name " , net . first . name ) <nl> + self . assertEqual ( " second_unique_child_name " , net . second . name ) <nl> + <nl> + net2 = ParentNetwork ( ) <nl> + net2 ( one ) <nl> + self . assertStartsWith ( <nl> + expected_start = " parent_network_2 / first_unique_child_name / dense " , <nl> + actual = net2 . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " parent_network_2 / second_unique_child_name / dense " , <nl> + actual = net2 . trainable_weights [ 1 ] . name ) <nl> + self . assertEqual ( " parent_network_2 " , net2 . name ) <nl> + self . assertEqual ( " first_unique_child_name " , net2 . first . name ) <nl> + self . assertEqual ( " second_unique_child_name " , net2 . second . name ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testNestableExplicitSameLayerCollisions ( self ) : <nl> + <nl> + # We have explicit network names and they are _not_ unique within the layer <nl> + # they ' re added to . Error . <nl> + class ParentNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( ParentNetwork , self ) . __init__ ( name = " unique_parent_name " ) <nl> + self . first = self . track_layer ( MyNetwork ( name = " nonunique_name " ) ) <nl> + self . second = self . track_layer ( MyNetwork ( name = " nonunique_name " ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , " nonunique_name " ) : <nl> + ParentNetwork ( ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testAnonymousVariableSharing ( self ) : <nl> + <nl> + # Two " owned " Networks <nl> + class FirstParentNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( FirstParentNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( MyNetwork ( ) ) <nl> + self . second = self . track_layer ( MyNetwork ( ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + net = FirstParentNetwork ( ) <nl> + net ( one ) <nl> + <nl> + # One Network shared with FirstParentNetwork , one owned Network . Same name , <nl> + # but this is OK because only one is owned . This name collision is <nl> + # avoidable ; we could have looked at the base_name of the non - owned Network <nl> + # and incremented our naming based on that . <nl> + class SecondParentNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( SecondParentNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( net . first ) <nl> + self . second = self . track_layer ( MyNetwork ( ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + net2 = SecondParentNetwork ( ) <nl> + net2 ( one ) <nl> + <nl> + self . assertStartsWith ( <nl> + expected_start = " first_parent_network_1 / my_network_1 / dense_1 / " , <nl> + actual = net2 . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " second_parent_network_1 / my_network_1 / dense_1 / " , <nl> + actual = net2 . trainable_weights [ 1 ] . name ) <nl> + self . assertEqual ( " second_parent_network_1 " , net2 . name ) <nl> + self . assertTrue ( net2 . first is net . first ) <nl> + self . assertEqual ( " my_network_1 " , net2 . first . name ) <nl> + self . assertEqual ( " my_network_1 " , net2 . second . name ) <nl> + <nl> + # No name collision ; the owned Network is added first and has a different <nl> + # name than the shared Network . <nl> + class ThirdParentNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( ThirdParentNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( MyNetwork ( ) ) <nl> + self . second = self . track_layer ( net . second ) <nl> <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + net3 = ThirdParentNetwork ( ) <nl> + net3 ( one ) <nl> + <nl> + self . assertStartsWith ( <nl> + expected_start = " third_parent_network_1 / my_network_1 / dense " , <nl> + actual = net3 . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " first_parent_network_1 / my_network_2 / dense " , <nl> + actual = net3 . trainable_weights [ 1 ] . name ) <nl> + self . assertEqual ( " third_parent_network_1 " , net3 . name ) <nl> + self . assertTrue ( net3 . second is net . second ) <nl> + self . assertEqual ( " my_network_1 " , net3 . first . name ) <nl> + self . assertEqual ( " my_network_2 " , net3 . second . name ) <nl> + <nl> + # " Unavoidable " same - name Layer . The owned name is added first ( fixed ) , then <nl> + # a shared Network is added with the same name . <nl> + class FourthParentNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( FourthParentNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( MyNetwork ( ) ) <nl> + self . second = self . track_layer ( net . first ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + net4 = FourthParentNetwork ( ) <nl> + net4 ( one ) <nl> + <nl> + self . assertStartsWith ( <nl> + expected_start = " fourth_parent_network_1 / my_network_1 / dense_1 / " , <nl> + actual = net4 . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " first_parent_network_1 / my_network_1 / dense_1 / " , <nl> + actual = net4 . trainable_weights [ 1 ] . name ) <nl> + self . assertEqual ( " fourth_parent_network_1 " , net4 . name ) <nl> + self . assertTrue ( net4 . second is net . first ) <nl> + self . assertEqual ( " my_network_1 " , net4 . first . name ) <nl> + self . assertEqual ( " my_network_1 " , net4 . second . name ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testRecursiveLayerRenaming ( self ) : <nl> + core . Dense ( 1 ) # Under default Layer naming , would change subsequent names . <nl> + <nl> + class NetworkWithLayerChildren ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( NetworkWithLayerChildren , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( core . Dense ( 1 , use_bias = False ) ) <nl> + self . second = self . track_layer ( core . Dense ( 1 , use_bias = False ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + class ParentNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( ParentNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( NetworkWithLayerChildren ( ) ) <nl> + self . second = self . track_layer ( NetworkWithLayerChildren ( ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + net = ParentNetwork ( ) <nl> one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + net ( one ) <nl> + <nl> + self . assertStartsWith ( <nl> + expected_start = ( " parent_network_1 / network_with_layer_children_1 / " <nl> + " dense_1 / " ) , <nl> + actual = net . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = ( " parent_network_1 / network_with_layer_children_1 / " <nl> + " dense_2 / " ) , <nl> + actual = net . trainable_weights [ 1 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = ( " parent_network_1 / network_with_layer_children_2 / " <nl> + " dense_1 / " ) , <nl> + actual = net . trainable_weights [ 2 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = ( " parent_network_1 / network_with_layer_children_2 / " <nl> + " dense_2 / " ) , <nl> + actual = net . trainable_weights [ 3 ] . name ) <nl> + self . assertEqual ( " parent_network_1 " , net . name ) <nl> + self . assertEqual ( " network_with_layer_children_1 " , net . first . name ) <nl> + self . assertEqual ( " network_with_layer_children_2 " , net . second . name ) <nl> + self . assertEqual ( " dense_1 " , net . first . first . name ) <nl> + self . assertEqual ( " dense_2 " , net . first . second . name ) <nl> + self . assertEqual ( " dense_1 " , net . second . first . name ) <nl> + self . assertEqual ( " dense_2 " , net . second . second . name ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testCallInDifferentOrderThanConstruct ( self ) : <nl> + shared_network = MyNetwork ( ) <nl> + <nl> + class FirstNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( FirstNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( shared_network ) <nl> + self . second = self . track_layer ( MyNetwork ( ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + class SecondNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( SecondNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( shared_network ) <nl> + self . second = self . track_layer ( MyNetwork ( ) ) <nl> <nl> - print ( type ( net1 ( one ) ) ) <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + net1 = FirstNetwork ( ) <nl> + net2 = SecondNetwork ( ) <nl> + <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> net2 ( one ) <nl> + net1 ( one ) <nl> + <nl> + self . assertStartsWith ( <nl> + expected_start = " first_network_1 / my_network_1 / dense_1 / " , <nl> + actual = net1 . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " first_network_1 / my_network_2 / dense_1 / " , <nl> + actual = net1 . trainable_weights [ 1 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " first_network_1 / my_network_1 / dense_1 / " , <nl> + actual = net2 . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " second_network_1 / my_network_1 / dense_1 / " , <nl> + actual = net2 . trainable_weights [ 1 ] . name ) <nl> + self . assertTrue ( net1 . trainable_weights [ 0 ] is net2 . trainable_weights [ 0 ] ) <nl> + self . assertEqual ( " first_network_1 " , net1 . name ) <nl> + self . assertEqual ( " my_network_1 " , net1 . first . name ) <nl> + self . assertEqual ( " my_network_2 " , net1 . second . name ) <nl> + self . assertTrue ( net2 . first is net1 . first ) <nl> + self . assertEqual ( " my_network_1 " , net2 . second . name ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testLayerCallInDifferentOrderThanConstruct ( self ) : <nl> + # Same idea as testCallInDifferentOrderThanConstruct , but this time with a <nl> + # non - Network Layer shared between two Networks rather than a <nl> + # Network . Naming should follow the same rules . <nl> + shared_layer = core . Dense ( 1 , use_bias = False ) <nl> + <nl> + class FirstNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( FirstNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( shared_layer ) <nl> + self . second = self . track_layer ( core . Dense ( 1 , use_bias = False ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + class SecondNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( SecondNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( shared_layer ) <nl> + self . second = self . track_layer ( core . Dense ( 1 , use_bias = False ) ) <nl> + <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> + <nl> + net1 = FirstNetwork ( ) <nl> + net2 = SecondNetwork ( ) <nl> + <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + net2 ( one ) <nl> + net1 ( one ) <nl> + <nl> + self . assertStartsWith ( <nl> + expected_start = " first_network_1 / dense_1 / " , <nl> + actual = net1 . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " first_network_1 / dense_2 / " , <nl> + actual = net1 . trainable_weights [ 1 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " first_network_1 / dense_1 / " , <nl> + actual = net2 . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " second_network_1 / dense_1 / " , <nl> + actual = net2 . trainable_weights [ 1 ] . name ) <nl> + self . assertTrue ( net1 . trainable_weights [ 0 ] is net2 . trainable_weights [ 0 ] ) <nl> + self . assertEqual ( " first_network_1 " , net1 . name ) <nl> + self . assertEqual ( " dense_1 " , net1 . first . name ) <nl> + self . assertEqual ( " dense_2 " , net1 . second . name ) <nl> + self . assertTrue ( net2 . first is net1 . first ) <nl> + self . assertEqual ( " dense_1 " , net2 . second . name ) <nl> + <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> + def testLayerAlreadyBuilt ( self ) : <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + core . Dense ( 1 , use_bias = False ) # pre - built layers use global naming <nl> + one = constant_op . constant ( [ [ 1 . ] ] ) <nl> + core . Dense ( 1 , use_bias = False ) ( one ) <nl> + shared_layer = core . Dense ( 1 , use_bias = False ) <nl> + shared_layer ( one ) <nl> + <nl> + class FirstNetwork ( network . Network ) : <nl> + <nl> + def __init__ ( self ) : <nl> + super ( FirstNetwork , self ) . __init__ ( ) <nl> + self . first = self . track_layer ( shared_layer ) <nl> + self . second = self . track_layer ( core . Dense ( 1 , use_bias = False ) ) <nl> <nl> - net1 . trainable_weights [ 0 ] . assign ( constant_op . constant ( [ [ 1 . ] ] ) ) <nl> - net2 . trainable_weights [ 0 ] . assign ( constant_op . constant ( [ [ 2 . ] ] ) ) <nl> + def call ( self , x ) : <nl> + return self . second ( self . first ( x ) ) <nl> <nl> - print ( " NET1 " ) <nl> - print ( net1 . name ) <nl> - print ( net1 . variables ) <nl> - print ( net1 ( one ) ) <nl> + net = FirstNetwork ( ) <nl> + net ( one ) <nl> <nl> - print ( " NET2 " ) <nl> - print ( net2 . name ) <nl> - print ( net2 . variables ) <nl> - print ( net2 ( one ) ) <nl> + self . assertStartsWith ( <nl> + expected_start = " dense_1 / " , # Pre - built layers have variable names which <nl> + # do not match their layer names . <nl> + actual = net . trainable_weights [ 0 ] . name ) <nl> + self . assertStartsWith ( <nl> + expected_start = " first_network_1 / dense_1 / " , <nl> + actual = net . trainable_weights [ 1 ] . name ) <nl> + self . assertTrue ( <nl> + net . trainable_weights [ 0 ] is shared_layer . trainable_weights [ 0 ] ) <nl> + self . assertEqual ( " first_network_1 " , net . name ) <nl> + self . assertEqual ( " dense_3 " , net . first . name ) <nl> + self . assertEqual ( " dense_1 " , net . second . name ) <nl> <nl> <nl> class SequentialTest ( test . TestCase ) : <nl> mmm a / tensorflow / contrib / eager / python / saver . py <nl> ppp b / tensorflow / contrib / eager / python / saver . py <nl> def restore_variables_on_create ( save_path , map_func = None ) : <nl> for k , _ in checkpoint_utils . list_variables ( save_path ) : <nl> ckpt_var_cache [ k ] = reader . get_tensor ( k ) <nl> <nl> - old_init = getattr ( <nl> - resource_variable_ops . ResourceVariable , " _init_from_args " , None ) <nl> + old_init = getattr ( resource_variable_ops . ResourceVariable , <nl> + " _init_from_args " , None ) <nl> assert old_init , " ResourceVariable misses _init_from_args method . " <nl> setattr ( resource_variable_ops . ResourceVariable , " _init_from_args " , <nl> _init_from_checkpoint ) <nl> def restore_variables_on_create ( save_path , map_func = None ) : <nl> <nl> <nl> class Saver ( object ) : <nl> - " " " A simple tf . train . Saver adapter for eager mode . <nl> - <nl> - save and restore API are similar to the tf . train . Saver , except that <nl> - session is not needed . <nl> - <nl> - Args : <nl> - var_list : Same as tf . train . Saver . <nl> + " " " A tf . train . Saver adapter for use when eager execution is enabled . <nl> " " " <nl> <nl> def __init__ ( self , var_list ) : <nl> + " " " A tf . train . Saver adapter for use when eager execution is enabled . <nl> + <nl> + The API , and on - disk format , mimic tf . train . Saver except that no <nl> + Session is needed . <nl> + <nl> + Args : <nl> + var_list : The list of variables that will be saved and restored . Either a <nl> + list of ` tfe . Variable ` objects , or a dictionary mapping names to <nl> + ` tfe . Variable ` objects . <nl> + <nl> + Raises : <nl> + RuntimeError : if invoked when eager execution has not been enabled . <nl> + " " " <nl> if context . in_graph_mode ( ) : <nl> - raise ValueError ( " Currently , tfe . Saver can only be used when eager " <nl> - " execution is enabled . Use tf . train . Saver when " <nl> - " building graphs . " ) <nl> + raise RuntimeError ( " tfe . Saver can only be used when eager " <nl> + " execution is enabled . Use tf . train . Saver when " <nl> + " building graphs . " ) <nl> self . _saver = _saver . Saver ( var_list = var_list ) <nl> <nl> - def save ( self , save_path , global_step = None ) : <nl> + def save ( self , file_prefix , global_step = None ) : <nl> " " " Saves variables . <nl> <nl> Args : <nl> - save_path : See save method in tf . train . Saver . <nl> - global_step : See save method in tf . train . Saver . <nl> + file_prefix : Path prefix of files created for the checkpoint . <nl> + global_step : If provided the global step number is appended to file_prefix <nl> + to create the checkpoint filename . The optional argument can be a <nl> + Tensor , a Variable , or an integer . <nl> <nl> Returns : <nl> - See save method in tf . train . Saver . <nl> + A string : prefix of filenames created for the checkpoint . This may be <nl> + an extension of file_prefix that is suitable to pass as an argument <nl> + to a subsequent call to ` restore ( ) ` . <nl> " " " <nl> with ops . device ( " / device : CPU : 0 " ) : <nl> - return self . _saver . save ( None , save_path , write_meta_graph = False , <nl> - global_step = global_step ) <nl> + return self . _saver . save ( <nl> + None , file_prefix , write_meta_graph = False , global_step = global_step ) <nl> <nl> - def restore ( self , save_path ) : <nl> + def restore ( self , file_prefix ) : <nl> " " " Restores previously saved variables . <nl> <nl> Args : <nl> - save_path : See restore method in tf . train . Saver . <nl> + file_prefix : Path prefix where parameters were previously saved . <nl> + Typically obtained from a previous ` save ( ) ` call , or from <nl> + @ { tf . train . latest_checkpoint } . <nl> " " " <nl> with ops . device ( " / device : CPU : 0 " ) : <nl> - self . _saver . restore ( None , save_path ) <nl> - <nl> + self . _saver . restore ( None , file_prefix ) <nl> mmm a / tensorflow / contrib / eager / python / tfe . py <nl> ppp b / tensorflow / contrib / eager / python / tfe . py <nl> <nl> <nl> To use , at program startup , call ` tfe . enable_eager_execution ( ) ` . <nl> <nl> + @ @ metrics <nl> + <nl> @ @ list_devices <nl> @ @ num_gpus <nl> <nl> <nl> <nl> # pylint : disable = g - bad - import - order , g - import - not - at - top , unused - import <nl> # <nl> + from tensorflow . contrib . eager . python import metrics <nl> from tensorflow . contrib . eager . python . datasets import Iterator <nl> from tensorflow . contrib . eager . python . network import Network <nl> from tensorflow . contrib . eager . python . saver import restore_variables_on_create <nl> mmm a / tensorflow / contrib / estimator / BUILD <nl> ppp b / tensorflow / contrib / estimator / BUILD <nl> py_test ( <nl> deps = [ <nl> " : logit_fns " , <nl> " / / tensorflow / python : client_testlib " , <nl> - " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : session " , <nl> " / / tensorflow / python / estimator : model_fn " , <nl> ] , <nl> ) <nl> mmm a / tensorflow / contrib / estimator / python / estimator / extenders . py <nl> ppp b / tensorflow / contrib / estimator / python / estimator / extenders . py <nl> <nl> from tensorflow . python . framework import sparse_tensor as sparse_tensor_lib <nl> from tensorflow . python . ops import clip_ops <nl> from tensorflow . python . training import optimizer as optimizer_lib <nl> - from tensorflow . python . util import tf_inspect <nl> + <nl> <nl> _VALID_METRIC_FN_ARGS = set ( [ ' features ' , ' labels ' , ' predictions ' , ' config ' ] ) <nl> <nl> def get_slot_names ( self , * args , * * kwargs ) : <nl> <nl> def _verify_metric_fn_args ( metric_fn ) : <nl> args = set ( estimator_util . fn_args ( metric_fn ) ) <nl> - if tf_inspect . ismethod ( metric_fn ) : <nl> - if ' self ' in args : <nl> - args . remove ( ' self ' ) <nl> invalid_args = list ( args - _VALID_METRIC_FN_ARGS ) <nl> if invalid_args : <nl> raise ValueError ( ' metric_fn ( % s ) has following not expected args : % s ' % <nl> mmm a / tensorflow / contrib / framework / BUILD <nl> ppp b / tensorflow / contrib / framework / BUILD <nl> py_test ( <nl> deps = [ <nl> " : framework_py " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : client " , <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : errors " , <nl> " / / tensorflow / python : framework_for_generated_wrappers " , <nl> py_test ( <nl> " / / tensorflow / python : nn_ops " , <nl> " / / tensorflow / python : partitioned_variables " , <nl> " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : session " , <nl> " / / tensorflow / python : training " , <nl> " / / tensorflow / python : variable_scope " , <nl> " / / tensorflow / python : variables " , <nl> py_test ( <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : constant_op " , <nl> " / / tensorflow / python : dtypes " , <nl> - " / / tensorflow / python : errors " , <nl> " / / tensorflow / python : framework_ops " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : partitioned_variables " , <nl> mmm a / tensorflow / contrib / fused_conv / python / ops / fused_conv2d_bias_activation_op_test . py <nl> ppp b / tensorflow / contrib / fused_conv / python / ops / fused_conv2d_bias_activation_op_test . py <nl> class FusedConv2DBiasActivationTest ( test . TestCase ) : <nl> def _DtypesToTest ( self , use_gpu ) : <nl> return [ dtypes . float32 ] <nl> <nl> + def _FilterFormatsToTest ( self , use_gpu ) : <nl> + return [ " HWIO " , " OIHW " ] <nl> + <nl> def _SetupValuesForDevice ( self , tensor_in_sizes , filter_in_sizes , bias , <nl> strides , padding , activation_mode , data_format , <nl> - dtype ) : <nl> + filter_format , dtype ) : <nl> " " " Verifies the output values of the convolution function . <nl> <nl> Args : <nl> def _SetupValuesForDevice ( self , tensor_in_sizes , filter_in_sizes , bias , <nl> padding : Padding type . <nl> activation_mode : Activation mode . <nl> data_format : Format of the data tensors . <nl> + filter_format : Filter format to use for the fused convolution . <nl> dtype : Data type for inputs and outputs . <nl> Returns : <nl> Symbolic tensor value and reference value that can be used to <nl> def _SetupValuesForDevice ( self , tensor_in_sizes , filter_in_sizes , bias , <nl> with self . test_session ( use_gpu = True ) : <nl> t1 = constant_op . constant ( x1 , shape = tensor_in_sizes , dtype = dtype ) <nl> t2 = constant_op . constant ( x2 , shape = filter_in_sizes , dtype = dtype ) <nl> + fused_t2 = t2 <nl> + if filter_format = = " OIHW " : <nl> + fused_t2 = HwioToOihw ( t2 ) <nl> t3 = constant_op . constant ( x3 , shape = [ bias_size ] , dtype = dtype ) <nl> strides = [ 1 ] + strides + [ 1 ] <nl> if data_format = = " NCHW " : <nl> def _SetupValuesForDevice ( self , tensor_in_sizes , filter_in_sizes , bias , <nl> strides = test_util . NHWCToNCHW ( strides ) <nl> output = fused_conv2d_bias_activation_op . fused_conv2d_bias_activation ( <nl> t1 , <nl> - t2 , <nl> + fused_t2 , <nl> t3 , <nl> strides = strides , <nl> padding = padding , <nl> data_format = data_format , <nl> + filter_format = filter_format , <nl> activation_mode = activation_mode ) <nl> ref_conv_output = nn_ops . conv2d ( <nl> t1 , t2 , strides = strides , padding = padding , data_format = data_format ) <nl> def _VerifyValues ( self , tensor_in_sizes , filter_in_sizes , bias , strides , <nl> ref_tensors = [ ] <nl> for ( data_format , use_gpu ) in GetTestConfigs ( ) : <nl> for dtype in self . _DtypesToTest ( use_gpu ) : <nl> - result , expected = self . _SetupValuesForDevice ( <nl> - tensor_in_sizes , filter_in_sizes , bias , strides , padding , " Relu " , <nl> - data_format , dtype ) <nl> + for filter_format in self . _FilterFormatsToTest ( use_gpu ) : <nl> + result , expected = self . _SetupValuesForDevice ( <nl> + tensor_in_sizes , filter_in_sizes , bias , strides , padding , " Relu " , <nl> + data_format , filter_format , dtype ) <nl> tensors . append ( result ) <nl> ref_tensors . append ( expected ) <nl> with self . test_session ( ) as sess : <nl> def NchwToNchwVectC ( in_tensor ) : <nl> return array_ops . transpose ( t , [ 0 , 1 , 3 , 4 , 2 ] ) <nl> <nl> <nl> + def HwioToOihw ( in_tensor ) : <nl> + return array_ops . transpose ( in_tensor , [ 3 , 2 , 0 , 1 ] ) <nl> + <nl> + <nl> def SimulateFusedConv2dBiasActivationInt8 ( conv_input_scale , conv_input , kernel , <nl> padding , strides , side_input_scale , <nl> side_input , biases ) : <nl> mmm a / tensorflow / contrib / gan / BUILD <nl> ppp b / tensorflow / contrib / gan / BUILD <nl> py_library ( <nl> " / / tensorflow / python : embedding_ops " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : tensor_util " , <nl> + " / / tensorflow / python : util " , <nl> " / / tensorflow / python : variable_scope " , <nl> ] , <nl> ) <nl> py_library ( <nl> " / / tensorflow / python : nn " , <nl> " / / tensorflow / python : tensor_shape " , <nl> " / / tensorflow / python : tensor_util " , <nl> + " / / tensorflow / python : util " , <nl> " / / tensorflow / python : variable_scope " , <nl> ] , <nl> ) <nl> py_library ( <nl> " python / features / python / clip_weights_impl . py " , <nl> ] , <nl> srcs_version = " PY2AND3 " , <nl> - deps = [ " / / tensorflow / contrib / opt : opt_py " ] , <nl> + deps = [ <nl> + " / / tensorflow / contrib / opt : opt_py " , <nl> + " / / tensorflow / python : util " , <nl> + ] , <nl> ) <nl> <nl> py_test ( <nl> mmm a / tensorflow / contrib / gdr / BUILD <nl> ppp b / tensorflow / contrib / gdr / BUILD <nl> cc_library ( <nl> " : gdr_memory_manager " , <nl> " : gdr_rendezvous_mgr " , <nl> " : gdr_worker " , <nl> - " / / tensorflow / core : lib_internal " , <nl> " / / tensorflow / core / distributed_runtime / rpc : grpc_server_lib " , <nl> ] , <nl> alwayslink = 1 , <nl> mmm a / tensorflow / contrib / graph_editor / BUILD <nl> ppp b / tensorflow / contrib / graph_editor / BUILD <nl> py_test ( <nl> " : graph_editor_py " , <nl> " : match " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : client " , <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : control_flow_ops " , <nl> " / / tensorflow / python : framework_for_generated_wrappers " , <nl> " / / tensorflow / python : gradients " , <nl> " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : session " , <nl> " / / tensorflow / python : variables " , <nl> " / / third_party / py / numpy " , <nl> ] , <nl> mmm a / tensorflow / contrib / hooks / BUILD <nl> ppp b / tensorflow / contrib / hooks / BUILD <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " / / tensorflow / python : training " , <nl> + " / / tensorflow / python : util " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / contrib / image / BUILD <nl> ppp b / tensorflow / contrib / image / BUILD <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " : distort_image_ops " , <nl> + " : single_image_random_dot_stereograms_py " , <nl> " / / tensorflow / contrib / util : util_py " , <nl> - " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : framework_for_generated_wrappers " , <nl> " / / tensorflow / python : image_ops " , <nl> " / / tensorflow / python : platform " , <nl> " / / tensorflow / python : random_ops " , <nl> + " / / tensorflow / python : util " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / contrib / kernel_methods / BUILD <nl> ppp b / tensorflow / contrib / kernel_methods / BUILD <nl> py_test ( <nl> name = " kernel_estimators_test " , <nl> srcs = [ " python / kernel_estimators_test . py " ] , <nl> srcs_version = " PY2AND3 " , <nl> + tags = [ " notsan " ] , <nl> deps = [ <nl> " : kernel_methods " , <nl> " / / tensorflow / contrib / layers : layers_py " , <nl> mmm a / tensorflow / contrib / kfac / python / kernel_tests / BUILD <nl> ppp b / tensorflow / contrib / kfac / python / kernel_tests / BUILD <nl> py_test ( <nl> deps = [ <nl> " / / tensorflow / contrib / kfac / python / ops : kfac_optimizer " , <nl> " / / tensorflow / contrib / kfac / python / ops : layer_collection " , <nl> - " / / tensorflow / contrib / kfac / python / ops : loss_functions " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : framework_ops " , <nl> mmm a / tensorflow / contrib / kfac / python / kernel_tests / layer_collection_test . py <nl> ppp b / tensorflow / contrib / kfac / python / kernel_tests / layer_collection_test . py <nl> def testLossFunctionByName ( self ) : <nl> self . assertEqual ( 1 , len ( lc . losses ) ) <nl> <nl> # Add logits to same loss function . <nl> - with self . assertRaises ( NotImplementedError ) : <nl> - lc . register_categorical_predictive_distribution ( logits , name = ' loss1 ' ) <nl> + lc . register_categorical_predictive_distribution ( <nl> + logits , name = ' loss1 ' , reuse = True ) <nl> self . assertEqual ( 1 , len ( lc . losses ) ) <nl> <nl> # Add another new loss function . <nl> def testLossFunctionWithoutName ( self ) : <nl> logits = linalg_ops . eye ( 2 ) <nl> lc = layer_collection . LayerCollection ( ) <nl> <nl> - # Create a new loss function by name . <nl> + # Create a new loss function with default names . <nl> lc . register_categorical_predictive_distribution ( logits ) <nl> lc . register_categorical_predictive_distribution ( logits ) <nl> self . assertEqual ( 2 , len ( lc . losses ) ) <nl> <nl> + def testCategoricalPredictiveDistributionMultipleMinibatches ( self ) : <nl> + " " " Ensure multiple minibatches are registered . " " " <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + batch_size = 3 <nl> + output_size = 2 <nl> + logits = array_ops . zeros ( [ batch_size , output_size ] ) <nl> + targets = array_ops . ones ( [ batch_size ] , dtype = dtypes . int32 ) <nl> + lc = layer_collection . LayerCollection ( ) <nl> + <nl> + # Create a new loss function . <nl> + lc . register_categorical_predictive_distribution ( <nl> + logits , targets = targets , name = ' loss1 ' ) <nl> + <nl> + # Can add when reuse = True <nl> + lc . register_categorical_predictive_distribution ( <nl> + logits , targets = targets , name = ' loss1 ' , reuse = True ) <nl> + <nl> + # Can add when reuse = VARIABLE_SCOPE and reuse = True there . <nl> + with variable_scope . variable_scope ( <nl> + variable_scope . get_variable_scope ( ) , reuse = True ) : <nl> + lc . register_categorical_predictive_distribution ( <nl> + logits , <nl> + targets = targets , <nl> + name = ' loss1 ' , <nl> + reuse = layer_collection . VARIABLE_SCOPE ) <nl> + <nl> + # Can ' t add when reuse = False <nl> + with self . assertRaises ( KeyError ) : <nl> + lc . register_categorical_predictive_distribution ( <nl> + logits , targets = targets , name = ' loss1 ' , reuse = False ) <nl> + <nl> + # Can ' t add when reuse = VARIABLE_SCOPE and reuse = False there . <nl> + with self . assertRaises ( KeyError ) : <nl> + lc . register_categorical_predictive_distribution ( <nl> + logits , <nl> + targets = targets , <nl> + name = ' loss1 ' , <nl> + reuse = layer_collection . VARIABLE_SCOPE ) <nl> + <nl> + self . assertEqual ( len ( lc . losses ) , 1 ) <nl> + loss = lc . losses [ 0 ] <nl> + <nl> + # Three successful registrations . <nl> + self . assertEqual ( loss . params . shape . as_list ( ) , <nl> + [ 3 * batch_size , output_size ] ) <nl> + self . assertEqual ( loss . targets . shape . as_list ( ) , [ 3 * batch_size ] ) <nl> + <nl> def testRegisterCategoricalPredictiveDistributionBatchSize1 ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl> random_seed . set_random_seed ( 200 ) <nl> mmm a / tensorflow / contrib / kfac / python / ops / BUILD <nl> ppp b / tensorflow / contrib / kfac / python / ops / BUILD <nl> py_library ( <nl> deps = [ <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : tensor_shape " , <nl> " / / tensorflow / python / ops / distributions " , <nl> " @ six_archive / / : six " , <nl> ] , <nl> py_library ( <nl> " : utils " , <nl> " / / tensorflow / python : gradients " , <nl> " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : util " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / contrib / kfac / python / ops / layer_collection . py <nl> ppp b / tensorflow / contrib / kfac / python / ops / layer_collection . py <nl> def register_categorical_predictive_distribution ( self , <nl> logits , <nl> seed = None , <nl> targets = None , <nl> - name = None ) : <nl> + name = None , <nl> + reuse = VARIABLE_SCOPE ) : <nl> " " " Registers a categorical predictive distribution . <nl> <nl> Args : <nl> def register_categorical_predictive_distribution ( self , <nl> ( Default : None ) <nl> name : ( OPTIONAL ) str or None . Unique name for this loss function . If None , <nl> a new name is generated . ( Default : None ) <nl> + reuse : ( OPTIONAL ) bool or str . If True , reuse an existing FisherBlock . <nl> + If False , create a new FisherBlock . If VARIABLE_SCOPE , use <nl> + tf . get_variable_scope ( ) . reuse . <nl> + <nl> + Raises : <nl> + ValueError : If reuse = True and name ! = None . <nl> + ValueError : If reuse = True and seed ! = None . <nl> + KeyError : If reuse = True and no existing LossFunction with ' name ' found . <nl> + KeyError : If reuse = False and existing LossFunction with ' name ' found . <nl> " " " <nl> name = name or self . _graph . unique_name ( <nl> " register_categorical_predictive_distribution " ) <nl> - if name in self . _loss_dict : <nl> - raise NotImplementedError ( <nl> - " Adding logits to an existing LossFunction not yet supported . " ) <nl> - loss = lf . CategoricalLogitsNegativeLogProbLoss ( <nl> - logits , targets = targets , seed = seed ) <nl> - self . _loss_dict [ name ] = loss <nl> + <nl> + if reuse = = VARIABLE_SCOPE : <nl> + reuse = variable_scope . get_variable_scope ( ) . reuse <nl> + <nl> + if reuse : <nl> + if name is None : <nl> + raise ValueError ( <nl> + " If reuse is enabled , loss function ' s name must be set . " ) <nl> + if seed is not None : <nl> + raise ValueError ( <nl> + " Seed can only be specified at LossFunction instantiation . " ) <nl> + <nl> + loss = self . _loss_dict . get ( name , None ) <nl> + <nl> + if loss is None : <nl> + raise KeyError ( <nl> + " Unable to find loss function named { } . Create a new LossFunction " <nl> + " with reuse = False . " . format ( name ) ) <nl> + <nl> + loss . register_additional_minibatch ( logits , targets = targets ) <nl> + else : <nl> + if name in self . _loss_dict : <nl> + raise KeyError ( <nl> + " Loss function named { } already exists . Set reuse = True to append " <nl> + " another minibatch . " . format ( name ) ) <nl> + loss = lf . CategoricalLogitsNegativeLogProbLoss ( <nl> + logits , targets = targets , seed = seed ) <nl> + self . _loss_dict [ name ] = loss <nl> <nl> def register_normal_predictive_distribution ( self , <nl> mean , <nl> mmm a / tensorflow / contrib / labeled_tensor / BUILD <nl> ppp b / tensorflow / contrib / labeled_tensor / BUILD <nl> py_test ( <nl> " : test_util " , <nl> " / / tensorflow / core : protos_all_py " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : client " , <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : session " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / contrib / layers / BUILD <nl> ppp b / tensorflow / contrib / layers / BUILD <nl> py_test ( <nl> deps = [ <nl> " : layers_py " , <nl> " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : client " , <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : framework_for_generated_wrappers " , <nl> " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : session " , <nl> " / / third_party / py / numpy " , <nl> ] , <nl> ) <nl> py_test ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " : layers_py " , <nl> - " / / tensorflow / python : client " , <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : session " , <nl> " / / tensorflow / python : variable_scope " , <nl> " / / tensorflow / python : variables " , <nl> " / / third_party / py / numpy " , <nl> py_test ( <nl> " : layers_py " , <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : lookup_ops " , <nl> " / / tensorflow / python : parsing_ops " , <nl> " / / tensorflow / python : sparse_tensor " , <nl> " / / tensorflow / python : state_ops " , <nl> py_test ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " : layers_py " , <nl> - " / / tensorflow / python : client " , <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : session " , <nl> " / / tensorflow / python : variables " , <nl> ] , <nl> ) <nl> py_test ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " : layers_py " , <nl> - " / / tensorflow / python : client " , <nl> " / / tensorflow / python : client_testlib " , <nl> " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : session " , <nl> " / / tensorflow / python : sparse_ops " , <nl> " / / tensorflow / python : sparse_tensor " , <nl> " / / third_party / py / numpy " , <nl> mmm a / tensorflow / contrib / learn / BUILD <nl> ppp b / tensorflow / contrib / learn / BUILD <nl> py_test ( <nl> " : learn " , <nl> " / / tensorflow / contrib / layers : layers_py " , <nl> " / / tensorflow / contrib / session_bundle : exporter " , <nl> - " / / tensorflow / contrib / session_bundle : manifest_proto_py " , <nl> + " / / tensorflow / contrib / session_bundle : manifest_proto_py_pb2 " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : client " , <nl> " / / tensorflow / python : client_testlib " , <nl> mmm a / tensorflow / contrib / metrics / python / ops / metric_ops . py <nl> ppp b / tensorflow / contrib / metrics / python / ops / metric_ops . py <nl> <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import check_ops <nl> - from tensorflow . python . ops import confusion_matrix <nl> from tensorflow . python . ops import control_flow_ops <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import metrics <nl> def streaming_true_negatives ( predictions , <nl> with variable_scope . variable_scope ( name , ' true_negatives ' , <nl> ( predictions , labels , weights ) ) : <nl> <nl> - predictions , labels , weights = _remove_squeezable_dimensions ( <nl> + predictions , labels , weights = metrics_impl . _remove_squeezable_dimensions ( # pylint : disable = protected - access <nl> predictions = math_ops . cast ( predictions , dtype = dtypes . bool ) , <nl> labels = math_ops . cast ( labels , dtype = dtypes . bool ) , <nl> weights = weights ) <nl> def _true_negatives ( labels , <nl> with variable_scope . variable_scope ( name , ' true_negatives ' , <nl> ( predictions , labels , weights ) ) : <nl> <nl> - predictions , labels , weights = _remove_squeezable_dimensions ( <nl> + predictions , labels , weights = metrics_impl . _remove_squeezable_dimensions ( # pylint : disable = protected - access <nl> predictions = math_ops . cast ( predictions , dtype = dtypes . bool ) , <nl> labels = math_ops . cast ( labels , dtype = dtypes . bool ) , <nl> weights = weights ) <nl> def streaming_false_positive_rate ( predictions , <nl> " " " <nl> with variable_scope . variable_scope ( name , ' false_positive_rate ' , <nl> ( predictions , labels , weights ) ) : <nl> - predictions , labels , weights = _remove_squeezable_dimensions ( <nl> + predictions , labels , weights = metrics_impl . _remove_squeezable_dimensions ( # pylint : disable = protected - access <nl> predictions = math_ops . cast ( predictions , dtype = dtypes . bool ) , <nl> labels = math_ops . cast ( labels , dtype = dtypes . bool ) , <nl> weights = weights ) <nl> def streaming_false_negative_rate ( predictions , <nl> " " " <nl> with variable_scope . variable_scope ( name , ' false_negative_rate ' , <nl> ( predictions , labels , weights ) ) : <nl> - predictions , labels , weights = _remove_squeezable_dimensions ( <nl> + predictions , labels , weights = metrics_impl . _remove_squeezable_dimensions ( # pylint : disable = protected - access <nl> predictions = math_ops . cast ( predictions , dtype = dtypes . bool ) , <nl> labels = math_ops . cast ( labels , dtype = dtypes . bool ) , <nl> weights = weights ) <nl> def _streaming_confusion_matrix_at_thresholds ( predictions , <nl> if include not in all_includes : <nl> raise ValueError ( ' Invaild key : % s . ' % include ) <nl> <nl> - predictions , labels , weights = _remove_squeezable_dimensions ( <nl> + predictions , labels , weights = metrics_impl . _remove_squeezable_dimensions ( # pylint : disable = protected - access <nl> predictions , labels , weights ) <nl> predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> <nl> def streaming_precision_recall_at_equal_thresholds ( predictions , <nl> math_ops . cast ( 1 . 0 , dtype = predictions . dtype ) , <nl> message = ' predictions must be in [ 0 , 1 ] ' ) <nl> ] ) : <nl> - predictions , labels , weights = _remove_squeezable_dimensions ( <nl> - predictions = predictions , labels = labels , weights = weights ) <nl> + predictions , labels , weights = metrics_impl . _remove_squeezable_dimensions ( # pylint : disable = protected - access <nl> + predictions = predictions , <nl> + labels = labels , <nl> + weights = weights ) <nl> <nl> predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> <nl> def streaming_covariance ( predictions , <nl> " " " <nl> with variable_scope . variable_scope ( name , ' covariance ' , <nl> ( predictions , labels , weights ) ) : <nl> - predictions , labels , weights = _remove_squeezable_dimensions ( <nl> + predictions , labels , weights = metrics_impl . _remove_squeezable_dimensions ( # pylint : disable = protected - access <nl> predictions , labels , weights ) <nl> predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> count = _create_local ( ' count ' , [ ] ) <nl> def streaming_pearson_correlation ( predictions , <nl> " " " <nl> with variable_scope . variable_scope ( name , ' pearson_r ' , <nl> ( predictions , labels , weights ) ) : <nl> - predictions , labels , weights = _remove_squeezable_dimensions ( <nl> + predictions , labels , weights = metrics_impl . _remove_squeezable_dimensions ( # pylint : disable = protected - access <nl> predictions , labels , weights ) <nl> predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> # Broadcast weights here to avoid duplicate broadcasting in each call to <nl> def streaming_mean_cosine_distance ( predictions , <nl> either ` metrics_collections ` or ` updates_collections ` are not a list or <nl> tuple . <nl> " " " <nl> - predictions , labels , weights = _remove_squeezable_dimensions ( <nl> + predictions , labels , weights = metrics_impl . _remove_squeezable_dimensions ( # pylint : disable = protected - access <nl> predictions , labels , weights ) <nl> predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> radial_diffs = math_ops . multiply ( predictions , labels ) <nl> def aggregate_metric_map ( names_to_tuples ) : <nl> return dict ( zip ( metric_names , value_ops ) ) , dict ( zip ( metric_names , update_ops ) ) <nl> <nl> <nl> - def _remove_squeezable_dimensions ( predictions , labels , weights ) : <nl> - " " " Squeeze last dim if needed . <nl> - <nl> - Squeezes ` predictions ` and ` labels ` if their rank differs by 1 . <nl> - Squeezes ` weights ` if its rank is 1 more than the new rank of ` predictions ` <nl> - <nl> - This will use static shape if available . Otherwise , it will add graph <nl> - operations , which could result in a performance hit . <nl> - <nl> - Args : <nl> - predictions : Predicted values , a ` Tensor ` of arbitrary dimensions . <nl> - labels : Label values , a ` Tensor ` whose dimensions match ` predictions ` . <nl> - weights : Optional weight ` Tensor ` . It will be squeezed if its rank is 1 <nl> - more than the new rank of ` predictions ` <nl> - <nl> - Returns : <nl> - Tuple of ` predictions ` , ` labels ` and ` weights ` , possibly with the last <nl> - dimension squeezed . <nl> - " " " <nl> - labels , predictions = confusion_matrix . remove_squeezable_dimensions ( <nl> - labels , predictions ) <nl> - predictions . get_shape ( ) . assert_is_compatible_with ( labels . get_shape ( ) ) <nl> - <nl> - if weights is not None : <nl> - weights = ops . convert_to_tensor ( weights ) <nl> - predictions_shape = predictions . get_shape ( ) <nl> - predictions_rank = predictions_shape . ndims <nl> - weights_shape = weights . get_shape ( ) <nl> - weights_rank = weights_shape . ndims <nl> - <nl> - if ( predictions_rank is not None ) and ( weights_rank is not None ) : <nl> - # Use static rank . <nl> - if weights_rank - predictions_rank = = 1 : <nl> - weights = array_ops . squeeze ( weights , [ - 1 ] ) <nl> - elif ( weights_rank is <nl> - None ) or ( weights_shape . dims [ - 1 ] . is_compatible_with ( 1 ) ) : <nl> - # Use dynamic rank <nl> - weights = control_flow_ops . cond ( <nl> - math_ops . equal ( <nl> - array_ops . rank ( weights ) , <nl> - math_ops . add ( array_ops . rank ( predictions ) , 1 ) ) , <nl> - lambda : array_ops . squeeze ( weights , [ - 1 ] ) , lambda : weights ) <nl> - return predictions , labels , weights <nl> - <nl> - <nl> __all__ = [ <nl> ' aggregate_metric_map ' , <nl> ' aggregate_metrics ' , <nl> mmm a / tensorflow / contrib / metrics / python / ops / metric_ops_test . py <nl> ppp b / tensorflow / contrib / metrics / python / ops / metric_ops_test . py <nl> def testManyValuesWithWeights ( self ) : <nl> ' recall ' : [ 1 . 0 , 1 . 0 , 0 . 0 ] , <nl> ' thresholds ' : [ 0 . 0 , 0 . 5 , 1 . 0 ] , <nl> } , <nl> - weights = [ 0 . 0 , 0 . 5 , 2 . 0 , 0 . 0 , 0 . 5 , 1 . 0 ] ) <nl> + weights = [ [ 0 . 0 , 0 . 5 , 2 . 0 , 0 . 0 , 0 . 5 , 1 . 0 ] ] ) <nl> <nl> <nl> class StreamingSpecificityAtSensitivityTest ( test . TestCase ) : <nl> mmm a / tensorflow / contrib / rnn / python / kernel_tests / core_rnn_cell_test . py <nl> ppp b / tensorflow / contrib / rnn / python / kernel_tests / core_rnn_cell_test . py <nl> <nl> from tensorflow . python . ops import variable_scope <nl> from tensorflow . python . ops import variables as variables_lib <nl> from tensorflow . python . platform import test <nl> - from tensorflow . python . framework import test_util <nl> <nl> <nl> # pylint : enable = protected - access <nl> mmm a / tensorflow / contrib / rnn / python / kernel_tests / core_rnn_test . py <nl> ppp b / tensorflow / contrib / rnn / python / kernel_tests / core_rnn_test . py <nl> def testRNN ( self ) : <nl> self . assertEqual ( out . get_shape ( ) , inp . get_shape ( ) ) <nl> self . assertEqual ( out . dtype , inp . dtype ) <nl> <nl> - with self . test_session ( use_gpu = False ) as sess : <nl> + with self . test_session ( use_gpu = True ) as sess : <nl> input_value = np . random . randn ( batch_size , input_size ) <nl> values = sess . run ( outputs + [ state ] , feed_dict = { inputs [ 0 ] : input_value } ) <nl> <nl> def testDropout ( self ) : <nl> self . assertEqual ( out . get_shape ( ) . as_list ( ) , inp . get_shape ( ) . as_list ( ) ) <nl> self . assertEqual ( out . dtype , inp . dtype ) <nl> <nl> - with self . test_session ( use_gpu = False ) as sess : <nl> + with self . test_session ( use_gpu = True ) as sess : <nl> input_value = np . random . randn ( batch_size , input_size ) <nl> values = sess . run ( outputs + [ state ] , feed_dict = { inputs [ 0 ] : input_value } ) <nl> full_dropout_values = sess . run ( dropped_outputs , <nl> def testDropout ( self ) : <nl> for d_v in full_dropout_values [ : - 1 ] : # Add 1 . 0 to dropped_out ( all zeros ) <nl> self . assertAllClose ( d_v , np . ones_like ( input_value ) ) <nl> <nl> - def _testDynamicCalculation ( self , use_gpu ) : <nl> + def testDynamicCalculation ( self ) : <nl> cell = Plus1RNNCell ( ) <nl> sequence_length = array_ops . placeholder ( dtypes . int64 ) <nl> batch_size = 2 <nl> def _testDynamicCalculation ( self , use_gpu ) : <nl> cell , inputs , sequence_length = sequence_length , dtype = dtypes . float32 ) <nl> self . assertEqual ( len ( dynamic_outputs ) , len ( inputs ) ) <nl> <nl> - with self . test_session ( use_gpu = use_gpu ) as sess : <nl> + with self . test_session ( use_gpu = True ) as sess : <nl> input_value = np . random . randn ( batch_size , input_size ) <nl> dynamic_values = sess . run ( <nl> dynamic_outputs , <nl> def _testDynamicCalculation ( self , use_gpu ) : <nl> np . vstack ( ( 1 . 0 * ( 1 + 1 ) * np . ones ( ( input_size ) ) , <nl> 1 . 0 * ( 2 + 1 ) * np . ones ( ( input_size ) ) ) ) ) <nl> <nl> - def testDynamicCalculation ( self ) : <nl> - self . _testDynamicCalculation ( True ) <nl> - self . _testDynamicCalculation ( False ) <nl> - <nl> def _testScope ( self , factory , prefix = " prefix " , use_outer_scope = True ) : <nl> with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) : <nl> if use_outer_scope : <nl> def setUp ( self ) : <nl> self . _seed = 23489 <nl> np . random . seed ( self . _seed ) <nl> <nl> - def _testNoProjNoSharding ( self , use_gpu ) : <nl> + def testNoProjNoSharding ( self ) : <nl> num_units = 3 <nl> input_size = 5 <nl> batch_size = 2 <nl> max_length = 8 <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> initializer = init_ops . random_uniform_initializer ( <nl> - 0 . 01 , 0 . 01 , seed = self . _seed ) <nl> cell = rnn_cell . LSTMCell ( <nl> def _testNoProjNoSharding ( self , use_gpu ) : <nl> input_value = np . random . randn ( batch_size , input_size ) <nl> sess . run ( outputs , feed_dict = { inputs [ 0 ] : input_value } ) <nl> <nl> - def _testCellClipping ( self , use_gpu ) : <nl> + def testCellClipping ( self ) : <nl> num_units = 3 <nl> input_size = 5 <nl> batch_size = 2 <nl> max_length = 8 <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> initializer = init_ops . random_uniform_initializer ( <nl> - 0 . 01 , 0 . 01 , seed = self . _seed ) <nl> cell = rnn_cell . LSTMCell ( <nl> def _testCellClipping ( self , use_gpu ) : <nl> # if cell c is clipped to 0 , tanh ( c ) = 0 = > m = = 0 <nl> self . assertAllEqual ( value , np . zeros ( ( batch_size , num_units ) ) ) <nl> <nl> - def _testNoProjNoShardingSimpleStateSaver ( self , use_gpu ) : <nl> + def testNoProjNoShardingSimpleStateSaver ( self ) : <nl> num_units = 3 <nl> input_size = 5 <nl> batch_size = 2 <nl> max_length = 8 <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> initializer = init_ops . random_uniform_initializer ( <nl> - 0 . 01 , 0 . 01 , seed = self . _seed ) <nl> state_saver = TestStateSaver ( batch_size , 2 * num_units ) <nl> def _cell ( i ) : <nl> self . assertAllEqual ( last_states [ i ] , <nl> named_saved_states [ flat_state_names [ i ] ] ) <nl> <nl> - def _testProjNoSharding ( self , use_gpu ) : <nl> + def testProjNoSharding ( self ) : <nl> num_units = 3 <nl> input_size = 5 <nl> batch_size = 2 <nl> num_proj = 4 <nl> max_length = 8 <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> initializer = init_ops . random_uniform_initializer ( <nl> - 0 . 01 , 0 . 01 , seed = self . _seed ) <nl> inputs = max_length * [ <nl> def _testStateTupleWithProjAndSequenceLength ( self ) : <nl> state_tuple_v = sess . run ( state_tuple , feed_dict = { inputs [ 0 ] : input_value } ) <nl> self . assertAllEqual ( state_notuple_v , np . hstack ( state_tuple_v ) ) <nl> <nl> - def _testProjSharding ( self , use_gpu ) : <nl> + def testProjSharding ( self ) : <nl> num_units = 3 <nl> input_size = 5 <nl> batch_size = 2 <nl> def _testProjSharding ( self , use_gpu ) : <nl> num_proj_shards = 3 <nl> num_unit_shards = 2 <nl> max_length = 8 <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> initializer = init_ops . random_uniform_initializer ( <nl> - 0 . 01 , 0 . 01 , seed = self . _seed ) <nl> <nl> def _testProjSharding ( self , use_gpu ) : <nl> input_value = np . random . randn ( batch_size , input_size ) <nl> sess . run ( outputs , feed_dict = { inputs [ 0 ] : input_value } ) <nl> <nl> - def _testDoubleInput ( self , use_gpu ) : <nl> + def testDoubleInput ( self ) : <nl> num_units = 3 <nl> input_size = 5 <nl> batch_size = 2 <nl> def _testDoubleInput ( self , use_gpu ) : <nl> num_proj_shards = 3 <nl> num_unit_shards = 2 <nl> max_length = 8 <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> initializer = init_ops . random_uniform_initializer ( - 1 , 1 , seed = self . _seed ) <nl> inputs = max_length * [ <nl> array_ops . placeholder ( <nl> def _testDoubleInput ( self , use_gpu ) : <nl> values = sess . run ( outputs , feed_dict = { inputs [ 0 ] : input_value } ) <nl> self . assertEqual ( values [ 0 ] . dtype , input_value . dtype ) <nl> <nl> - def _testShardNoShardEquivalentOutput ( self , use_gpu ) : <nl> + def testShardNoShardEquivalentOutput ( self ) : <nl> num_units = 3 <nl> input_size = 5 <nl> batch_size = 2 <nl> def _testShardNoShardEquivalentOutput ( self , use_gpu ) : <nl> num_proj_shards = 3 <nl> num_unit_shards = 2 <nl> max_length = 8 <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> inputs = max_length * [ <nl> array_ops . placeholder ( <nl> dtypes . float32 , shape = ( None , input_size ) ) <nl> def _testShardNoShardEquivalentOutput ( self , use_gpu ) : <nl> for ( s_noshard , s_shard ) in zip ( state_values_noshard , state_values_shard ) : <nl> self . assertAllClose ( s_noshard , s_shard , atol = 1e - 3 ) <nl> <nl> - def _testDoubleInputWithDropoutAndDynamicCalculation ( self , use_gpu ) : <nl> + def testDoubleInputWithDropoutAndDynamicCalculation ( self ) : <nl> " " " Smoke test for using LSTM with doubles , dropout , dynamic calculation . " " " <nl> <nl> num_units = 3 <nl> def _testDoubleInputWithDropoutAndDynamicCalculation ( self , use_gpu ) : <nl> num_proj_shards = 3 <nl> num_unit_shards = 2 <nl> max_length = 8 <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> sequence_length = array_ops . placeholder ( dtypes . int64 ) <nl> initializer = init_ops . random_uniform_initializer ( <nl> - 0 . 01 , 0 . 01 , seed = self . _seed ) <nl> def testSharingWeightsWithDifferentNamescope ( self ) : <nl> for out0 , out1 in zip ( outputs0_values , outputs1_values ) : <nl> self . assertAllEqual ( out0 , out1 ) <nl> <nl> - def testNoProjNoShardingSimpleStateSaver ( self ) : <nl> - self . _testNoProjNoShardingSimpleStateSaver ( use_gpu = False ) <nl> - self . _testNoProjNoShardingSimpleStateSaver ( use_gpu = True ) <nl> - <nl> - def testNoProjNoSharding ( self ) : <nl> - self . _testNoProjNoSharding ( use_gpu = False ) <nl> - self . _testNoProjNoSharding ( use_gpu = True ) <nl> - <nl> - def testCellClipping ( self ) : <nl> - self . _testCellClipping ( use_gpu = False ) <nl> - self . _testCellClipping ( use_gpu = True ) <nl> - <nl> - def testProjNoSharding ( self ) : <nl> - self . _testProjNoSharding ( use_gpu = False ) <nl> - self . _testProjNoSharding ( use_gpu = True ) <nl> - <nl> - def testProjSharding ( self ) : <nl> - self . _testProjSharding ( use_gpu = False ) <nl> - self . _testProjSharding ( use_gpu = True ) <nl> - <nl> - def testShardNoShardEquivalentOutput ( self ) : <nl> - self . _testShardNoShardEquivalentOutput ( use_gpu = False ) <nl> - self . _testShardNoShardEquivalentOutput ( use_gpu = True ) <nl> - <nl> - def testDoubleInput ( self ) : <nl> - self . _testDoubleInput ( use_gpu = False ) <nl> - self . _testDoubleInput ( use_gpu = True ) <nl> - <nl> - def testDoubleInputWithDropoutAndDynamicCalculation ( self ) : <nl> - self . _testDoubleInputWithDropoutAndDynamicCalculation ( use_gpu = False ) <nl> - self . _testDoubleInputWithDropoutAndDynamicCalculation ( use_gpu = True ) <nl> - <nl> def testDynamicRNNAllowsUnknownTimeDimension ( self ) : <nl> inputs = array_ops . placeholder ( dtypes . float32 , shape = [ 1 , None , 20 ] ) <nl> cell = rnn_cell . GRUCell ( 30 ) <nl> def _cell ( i ) : <nl> state_dynamic = [ s . numpy ( ) for s in nest . flatten ( state_dynamic ) ] <nl> self . assertAllEqual ( np . hstack ( state_static ) , np . hstack ( state_dynamic ) ) <nl> <nl> - def _testDynamicEquivalentToStaticRNN ( self , use_gpu , use_sequence_length ) : <nl> + def _testDynamicEquivalentToStaticRNN ( self , use_sequence_length ) : <nl> time_steps = 8 <nl> num_units = 3 <nl> num_proj = 4 <nl> def _testDynamicEquivalentToStaticRNN ( self , use_gpu , use_sequence_length ) : <nl> state_is_tuple = False ) <nl> <nl> # # # # # # # # # # # Step 1 : Run static graph and generate readouts <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> if in_graph_mode : <nl> concat_inputs = array_ops . placeholder ( <nl> dtypes . float32 , shape = ( time_steps , batch_size , input_size ) ) <nl> def _testDynamicEquivalentToStaticRNN ( self , use_gpu , use_sequence_length ) : <nl> static_individual_variable_gradients , feed_dict = feeds ) <nl> <nl> # # # # # # # # # # Step 2 : Run dynamic graph and generate readouts <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> if in_graph_mode : <nl> concat_inputs = array_ops . placeholder ( <nl> dtypes . float32 , shape = ( time_steps , batch_size , input_size ) ) <nl> def _testDynamicEquivalentToStaticRNN ( self , use_gpu , use_sequence_length ) : <nl> <nl> @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testDynamicEquivalentToStaticRNN ( self ) : <nl> - self . _testDynamicEquivalentToStaticRNN ( <nl> - use_gpu = False , use_sequence_length = False ) <nl> - self . _testDynamicEquivalentToStaticRNN ( <nl> - use_gpu = True , use_sequence_length = False ) <nl> - self . _testDynamicEquivalentToStaticRNN ( <nl> - use_gpu = False , use_sequence_length = True ) <nl> - self . _testDynamicEquivalentToStaticRNN ( <nl> - use_gpu = True , use_sequence_length = True ) <nl> + self . _testDynamicEquivalentToStaticRNN ( use_sequence_length = False ) <nl> + self . _testDynamicEquivalentToStaticRNN ( use_sequence_length = False ) <nl> <nl> <nl> class BidirectionalRNNTest ( test . TestCase ) : <nl> def setUp ( self ) : <nl> np . random . seed ( self . _seed ) <nl> <nl> def _createBidirectionalRNN ( self , <nl> - use_gpu , <nl> use_shape , <nl> use_sequence_length , <nl> scope = None ) : <nl> def _createBidirectionalRNN ( self , <nl> <nl> return input_value , inputs , outputs , state_fw , state_bw , sequence_length <nl> <nl> - def _testBidirectionalRNN ( self , use_gpu , use_shape ) : <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + def _testBidirectionalRNN ( self , use_shape ) : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> input_value , inputs , outputs , state_fw , state_bw , sequence_length = ( <nl> - self . _createBidirectionalRNN ( use_gpu , use_shape , True ) ) <nl> + self . _createBidirectionalRNN ( use_shape , True ) ) <nl> variables_lib . global_variables_initializer ( ) . run ( ) <nl> # Run with pre - specified sequence length of 2 , 3 <nl> out , s_fw , s_bw = sess . run ( <nl> def _testBidirectionalRNN ( self , use_gpu , use_shape ) : <nl> # exactly the same <nl> self . assertAllClose ( s_fw , s_bw ) <nl> <nl> - def _testBidirectionalRNNWithoutSequenceLength ( self , use_gpu , use_shape ) : <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + def _testBidirectionalRNNWithoutSequenceLength ( self , use_shape ) : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> input_value , inputs , outputs , state_fw , state_bw , _ = ( <nl> - self . _createBidirectionalRNN ( use_gpu , use_shape , False ) ) <nl> + self . _createBidirectionalRNN ( use_shape , False ) ) <nl> variables_lib . global_variables_initializer ( ) . run ( ) <nl> out , s_fw , s_bw = sess . run ( [ outputs , state_fw , state_bw ] , <nl> feed_dict = { inputs [ 0 ] : input_value } ) <nl> def _testBidirectionalRNNWithoutSequenceLength ( self , use_gpu , use_shape ) : <nl> self . assertAllClose ( s_fw , s_bw ) <nl> <nl> def testBidirectionalRNN ( self ) : <nl> - self . _testBidirectionalRNN ( use_gpu = False , use_shape = False ) <nl> - self . _testBidirectionalRNN ( use_gpu = True , use_shape = False ) <nl> - self . _testBidirectionalRNN ( use_gpu = False , use_shape = True ) <nl> - self . _testBidirectionalRNN ( use_gpu = True , use_shape = True ) <nl> + self . _testBidirectionalRNN ( use_shape = False ) <nl> + self . _testBidirectionalRNN ( use_shape = True ) <nl> <nl> def testBidirectionalRNNWithoutSequenceLength ( self ) : <nl> - self . _testBidirectionalRNNWithoutSequenceLength ( <nl> - use_gpu = False , use_shape = False ) <nl> - self . _testBidirectionalRNNWithoutSequenceLength ( <nl> - use_gpu = True , use_shape = False ) <nl> - self . _testBidirectionalRNNWithoutSequenceLength ( <nl> - use_gpu = False , use_shape = True ) <nl> - self . _testBidirectionalRNNWithoutSequenceLength ( <nl> - use_gpu = True , use_shape = True ) <nl> + self . _testBidirectionalRNNWithoutSequenceLength ( use_shape = False ) <nl> + self . _testBidirectionalRNNWithoutSequenceLength ( use_shape = True ) <nl> <nl> def _createBidirectionalDynamicRNN ( self , <nl> - use_gpu , <nl> use_shape , <nl> use_state_tuple , <nl> use_time_major , <nl> def _createBidirectionalDynamicRNN ( self , <nl> <nl> return input_value , inputs , outputs , state_fw , state_bw , sequence_length <nl> <nl> - def _testBidirectionalDynamicRNN ( self , use_gpu , use_shape , use_state_tuple , <nl> + def _testBidirectionalDynamicRNN ( self , use_shape , use_state_tuple , <nl> use_time_major , use_sequence_length ) : <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> input_value , inputs , outputs , state_fw , state_bw , sequence_length = ( <nl> - self . _createBidirectionalDynamicRNN ( use_gpu , use_shape , <nl> + self . _createBidirectionalDynamicRNN ( use_shape , <nl> use_state_tuple , use_time_major , <nl> use_sequence_length ) ) <nl> variables_lib . global_variables_initializer ( ) . run ( ) <nl> def _testBidirectionalDynamicRNN ( self , use_gpu , use_shape , use_state_tuple , <nl> def testBidirectionalDynamicRNN ( self ) : <nl> # Generate 2 ^ 5 option values <nl> # from [ True , True , True , True , True ] to [ False , False , False , False , False ] <nl> - options = itertools . product ( [ True , False ] , repeat = 5 ) <nl> + options = itertools . product ( [ True , False ] , repeat = 4 ) <nl> for option in options : <nl> self . _testBidirectionalDynamicRNN ( <nl> - use_gpu = option [ 0 ] , <nl> - use_shape = option [ 1 ] , <nl> - use_state_tuple = option [ 2 ] , <nl> - use_time_major = option [ 3 ] , <nl> - use_sequence_length = option [ 4 ] ) <nl> + use_shape = option [ 0 ] , <nl> + use_state_tuple = option [ 1 ] , <nl> + use_time_major = option [ 2 ] , <nl> + use_sequence_length = option [ 3 ] ) <nl> <nl> def _testScope ( self , factory , prefix = " prefix " , use_outer_scope = True ) : <nl> # REMARKS : factory ( scope ) is a function accepting a scope <nl> def testBidirectionalRNNScope ( self ) : <nl> <nl> def factory ( scope ) : <nl> return self . _createBidirectionalRNN ( <nl> - use_gpu = True , use_shape = True , use_sequence_length = True , scope = scope ) <nl> + use_shape = True , use_sequence_length = True , scope = scope ) <nl> <nl> self . _testScope ( factory , use_outer_scope = True ) <nl> self . _testScope ( factory , use_outer_scope = False ) <nl> def get_factory ( use_time_major ) : <nl> <nl> def factory ( scope ) : <nl> return self . _createBidirectionalDynamicRNN ( <nl> - use_gpu = True , <nl> use_shape = True , <nl> use_state_tuple = True , <nl> use_sequence_length = True , <nl> def setUp ( self ) : <nl> self . _seed = 23489 <nl> np . random . seed ( self . _seed ) <nl> <nl> - def _testDynamic ( self , use_gpu ) : <nl> + def testDynamic ( self ) : <nl> time_steps = 8 <nl> num_units = 3 <nl> input_size = 5 <nl> def _testDynamic ( self , use_gpu ) : <nl> <nl> sequence_length = np . random . randint ( 0 , time_steps , size = batch_size ) <nl> <nl> - with self . test_session ( use_gpu = use_gpu , graph = ops_lib . Graph ( ) ) as sess : <nl> + with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) as sess : <nl> concat_inputs = array_ops . placeholder ( <nl> dtypes . float32 , shape = ( time_steps , batch_size , input_size ) ) <nl> <nl> def _testDynamic ( self , use_gpu ) : <nl> <nl> sess . run ( [ outputs_dynamic , state_dynamic ] , feed_dict = feeds ) <nl> <nl> - def testDynamic ( self ) : <nl> - self . _testDynamic ( use_gpu = False ) <nl> - self . _testDynamic ( use_gpu = True ) <nl> - <nl> def _testScope ( self , factory , prefix = " prefix " , use_outer_scope = True ) : <nl> with self . test_session ( use_gpu = True , graph = ops_lib . Graph ( ) ) : <nl> if use_outer_scope : <nl> mmm a / tensorflow / contrib / summary / summary_ops . py <nl> ppp b / tensorflow / contrib / summary / summary_ops . py <nl> <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . layers import utils <nl> from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import control_flow_ops <nl> + from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import resource_variable_ops <nl> from tensorflow . python . ops import summary_op_util <nl> from tensorflow . python . training import training_util <nl> def record_summaries_every_n_global_steps ( n ) : <nl> " " " Sets the should_record_summaries Tensor to true if global_step % n = = 0 . " " " <nl> collection_ref = ops . get_collection_ref ( _SHOULD_RECORD_SUMMARIES_NAME ) <nl> old = collection_ref [ : ] <nl> - collection_ref [ : ] = [ training_util . get_global_step ( ) % n = = 0 ] <nl> + with ops . device ( " cpu : 0 " ) : <nl> + collection_ref [ : ] = [ math_ops . equal ( training_util . get_global_step ( ) % n , 0 ) ] <nl> yield <nl> collection_ref [ : ] = old <nl> <nl> def set_as_default ( self ) : <nl> <nl> @ tf_contextlib . contextmanager <nl> def as_default ( self ) : <nl> - old = context . context ( ) . summary_writer_resource <nl> - context . context ( ) . summary_writer_resource = self . _resource <nl> - yield <nl> - # Flushes the summary writer in eager mode or in graph functions , but not in <nl> - # legacy graph mode ( you ' re on your own there ) . <nl> - gen_summary_ops . flush_summary_writer ( self . _resource ) <nl> - context . context ( ) . summary_writer_resource = old <nl> + if self . _resource is None : <nl> + yield <nl> + else : <nl> + old = context . context ( ) . summary_writer_resource <nl> + context . context ( ) . summary_writer_resource = self . _resource <nl> + yield <nl> + # Flushes the summary writer in eager mode or in graph functions , but not <nl> + # in legacy graph mode ( you ' re on your own there ) . <nl> + with ops . device ( " cpu : 0 " ) : <nl> + gen_summary_ops . flush_summary_writer ( self . _resource ) <nl> + context . context ( ) . summary_writer_resource = old <nl> <nl> <nl> def create_summary_file_writer ( logdir , <nl> def create_summary_file_writer ( logdir , <nl> flush_secs = None , <nl> filename_suffix = None , <nl> name = None ) : <nl> - " " " Creates a summary file writer in the current context . " " " <nl> - if max_queue is None : <nl> - max_queue = constant_op . constant ( 10 ) <nl> - if flush_secs is None : <nl> - flush_secs = constant_op . constant ( 120 ) <nl> - if filename_suffix is None : <nl> - filename_suffix = constant_op . constant ( " " ) <nl> - resource = gen_summary_ops . summary_writer ( shared_name = name ) <nl> - # TODO ( apassos ) ensure the initialization op runs when in graph mode ; consider <nl> - # calling session . run here . <nl> - ops . add_to_collection ( <nl> - _SUMMARY_WRITER_INIT_COLLECTION_NAME , <nl> - gen_summary_ops . create_summary_file_writer ( resource , logdir , max_queue , <nl> - flush_secs , filename_suffix ) ) <nl> - return SummaryWriter ( resource ) <nl> + " " " Creates a summary file writer in the current context . <nl> + <nl> + Args : <nl> + logdir : a string , or None . If a string , creates a summary file writer <nl> + which writes to the directory named by the string . If None , returns <nl> + a mock object which acts like a summary writer but does nothing , <nl> + useful to use as a context manager . <nl> + max_queue : the largest number of summaries to keep in a queue ; will <nl> + flush once the queue gets bigger than this . <nl> + flush_secs : the largest interval ( in seconds ) between flushes . <nl> + filename_suffix : optional suffix for the event file name . <nl> + name : name for the summary writer . <nl> + <nl> + Returns : <nl> + Either a summary writer or an empty object which can be used as a <nl> + summary writer . <nl> + " " " <nl> + if logdir is None : <nl> + return SummaryWriter ( None ) <nl> + with ops . device ( " cpu : 0 " ) : <nl> + if max_queue is None : <nl> + max_queue = constant_op . constant ( 10 ) <nl> + if flush_secs is None : <nl> + flush_secs = constant_op . constant ( 120 ) <nl> + if filename_suffix is None : <nl> + filename_suffix = constant_op . constant ( " " ) <nl> + resource = gen_summary_ops . summary_writer ( shared_name = name ) <nl> + # TODO ( apassos ) ensure the initialization op runs when in graph mode ; <nl> + # consider calling session . run here . <nl> + ops . add_to_collection ( <nl> + _SUMMARY_WRITER_INIT_COLLECTION_NAME , <nl> + gen_summary_ops . create_summary_file_writer ( resource , logdir , max_queue , <nl> + flush_secs , filename_suffix ) ) <nl> + return SummaryWriter ( resource ) <nl> <nl> <nl> def _nothing ( ) : <nl> def record ( ) : <nl> with ops . control_dependencies ( [ function ( tag , scope ) ] ) : <nl> return constant_op . constant ( True ) <nl> <nl> + if context . context ( ) . summary_writer_resource is None : <nl> + return control_flow_ops . no_op ( ) <nl> with ops . device ( " cpu : 0 " ) : <nl> op = utils . smart_cond ( <nl> should_record_summaries ( ) , record , _nothing , name = " " ) <nl> new file mode 100644 <nl> index 0000000000000 . . f0566322958a6 <nl> mmm / dev / null <nl> ppp b / tensorflow / contrib / tensorboard / db / BUILD <nl> <nl> + # Description : <nl> + # TensorBoard database code . <nl> + <nl> + package ( default_visibility = [ " / / tensorflow : internal " ] ) <nl> + <nl> + licenses ( [ " notice " ] ) # Apache 2 . 0 <nl> + <nl> + load ( " / / tensorflow : tensorflow . bzl " , " tf_cc_test " ) <nl> + <nl> + cc_library ( <nl> + name = " schema " , <nl> + srcs = [ " schema . cc " ] , <nl> + hdrs = [ " schema . h " ] , <nl> + deps = [ <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core / lib / db : sqlite " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_cc_test ( <nl> + name = " schema_test " , <nl> + srcs = [ " schema_test . cc " ] , <nl> + deps = [ <nl> + " : schema " , <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : test " , <nl> + " / / tensorflow / core : test_main " , <nl> + " / / tensorflow / core / lib / db : sqlite " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " all_files " , <nl> + srcs = glob ( [ " * " ] ) , <nl> + visibility = [ " / / tensorflow : __pkg__ " ] , <nl> + ) <nl> new file mode 100644 <nl> index 0000000000000 . . f5a8e02a9bb2d <nl> mmm / dev / null <nl> ppp b / tensorflow / contrib / tensorboard / db / schema . cc <nl> <nl> + / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + # include " tensorflow / contrib / tensorboard / db / schema . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace db { <nl> + namespace { <nl> + <nl> + class SqliteSchema { <nl> + public : <nl> + explicit SqliteSchema ( Sqlite * db ) : db_ ( db ) { } <nl> + ~ SqliteSchema ( ) { db_ = nullptr ; } <nl> + <nl> + / / / \ brief Creates Tensors table . <nl> + / / / <nl> + / / / Fields : <nl> + / / / rowid : Ephemeral b - tree ID dictating locality . <nl> + / / / tag_id : ID of associated Tag . <nl> + / / / computed_time : Float UNIX timestamp with microsecond precision . <nl> + / / / In the old summaries system that uses FileWriter , this is the <nl> + / / / wall time around when tf . Session . run finished . In the new <nl> + / / / summaries system , it is the wall time of when the tensor was <nl> + / / / computed . On systems with monotonic clocks , it is calculated <nl> + / / / by adding the monotonic run duration to Run . started_time . <nl> + / / / This field is not indexed because , in practice , it should be <nl> + / / / ordered the same or nearly the same as TensorIndex , so local <nl> + / / / insertion sort might be more suitable . <nl> + / / / step : User - supplied number , ordering this tensor in Tag . <nl> + / / / If NULL then the Tag must have only one Tensor . <nl> + / / / tensor : Can be an INTEGER ( DT_INT64 ) , FLOAT ( DT_DOUBLE ) , or <nl> + / / / BLOB . The structure of a BLOB is currently undefined , but in <nl> + / / / essence it is a Snappy tf . TensorProto that spills over into <nl> + / / / TensorChunks . <nl> + Status CreateTensorsTable ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE TABLE IF NOT EXISTS Tensors ( <nl> + rowid INTEGER PRIMARY KEY , <nl> + tag_id INTEGER NOT NULL , <nl> + computed_time REAL , <nl> + step INTEGER , <nl> + tensor BLOB <nl> + ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Creates TensorChunks table . <nl> + / / / <nl> + / / / This table can be used to split up a tensor across many rows , <nl> + / / / which has the advantage of not slowing down table scans on the <nl> + / / / main table , allowing asynchronous fetching , minimizing copying , <nl> + / / / and preventing large buffers from being allocated . <nl> + / / / <nl> + / / / Fields : <nl> + / / / rowid : Ephemeral b - tree ID dictating locality . <nl> + / / / tag_id : ID of associated Tag . <nl> + / / / step : Same as corresponding Tensors . step . <nl> + / / / sequence : 1 - indexed sequence number for ordering chunks . Please <nl> + / / / note that the 0th index is Tensors . tensor . <nl> + / / / chunk : Bytes of next chunk in tensor . <nl> + Status CreateTensorChunksTable ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE TABLE IF NOT EXISTS TensorChunks ( <nl> + rowid INTEGER PRIMARY KEY , <nl> + tag_id INTEGER NOT NULL , <nl> + step INTEGER , <nl> + sequence INTEGER , <nl> + chunk BLOB <nl> + ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Creates Tags table . <nl> + / / / <nl> + / / / Fields : <nl> + / / / rowid : Ephemeral b - tree ID dictating locality . <nl> + / / / tag_id : Permanent > 0 unique ID . <nl> + / / / run_id : Optional ID of associated Run . <nl> + / / / tag_name : The tag field in summary . proto , unique across Run . <nl> + / / / inserted_time : Float UNIX timestamp with µs precision . This is <nl> + / / / always the wall time of when the row was inserted into the <nl> + / / / DB . It may be used as a hint for an archival job . <nl> + / / / metadata : Optional BLOB of SummaryMetadata proto . <nl> + / / / display_name : Optional for GUI and defaults to tag_name . <nl> + / / / summary_description : Optional markdown information . <nl> + Status CreateTagsTable ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE TABLE IF NOT EXISTS Tags ( <nl> + rowid INTEGER PRIMARY KEY , <nl> + run_id INTEGER , <nl> + tag_id INTEGER NOT NULL , <nl> + tag_name TEXT , <nl> + inserted_time DOUBLE , <nl> + metadata BLOB , <nl> + display_name TEXT , <nl> + description TEXT <nl> + ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Creates Runs table . <nl> + / / / <nl> + / / / This table stores information about runs . Each row usually <nl> + / / / represents a single attempt at training or testing a TensorFlow <nl> + / / / model , with a given set of hyper - parameters , whose summaries are <nl> + / / / written out to a single event logs directory with a monotonic step <nl> + / / / counter . <nl> + / / / <nl> + / / / When a run is deleted from this table , TensorBoard should treat all <nl> + / / / information associated with it as deleted , even if those rows in <nl> + / / / different tables still exist . <nl> + / / / <nl> + / / / Fields : <nl> + / / / rowid : Ephemeral b - tree ID dictating locality . <nl> + / / / run_id : Permanent > 0 unique ID . <nl> + / / / experiment_id : Optional ID of associated Experiment . <nl> + / / / run_name : User - supplied string , unique across Experiment . <nl> + / / / inserted_time : Float UNIX timestamp with µs precision . This is <nl> + / / / always the time the row was inserted into the database . It <nl> + / / / does not change . <nl> + / / / started_time : Float UNIX timestamp with µs precision . In the <nl> + / / / old summaries system that uses FileWriter , this is <nl> + / / / approximated as the first tf . Event . wall_time . In the new <nl> + / / / summaries system , it is the wall time of when summary writing <nl> + / / / started , from the perspective of whichever machine talks to <nl> + / / / the database . This field will be mutated if the run is <nl> + / / / restarted . <nl> + / / / description : Optional markdown information . <nl> + / / / graph : Snappy tf . GraphDef proto with node field cleared . That <nl> + / / / field can be recreated using GraphNodes and NodeDefs . <nl> + Status CreateRunsTable ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE TABLE IF NOT EXISTS Runs ( <nl> + rowid INTEGER PRIMARY KEY , <nl> + experiment_id INTEGER , <nl> + run_id INTEGER NOT NULL , <nl> + run_name TEXT , <nl> + inserted_time REAL , <nl> + started_time REAL , <nl> + description TEXT , <nl> + graph BLOB <nl> + ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Creates Experiments table . <nl> + / / / <nl> + / / / This table stores information about experiments , which are sets of <nl> + / / / runs . <nl> + / / / <nl> + / / / Fields : <nl> + / / / rowid : Ephemeral b - tree ID dictating locality . <nl> + / / / user_id : Optional ID of associated User . <nl> + / / / experiment_id : Permanent > 0 unique ID . <nl> + / / / experiment_name : User - supplied string , unique across User . <nl> + / / / inserted_time : Float UNIX timestamp with µs precision . This is <nl> + / / / always the time the row was inserted into the database . It <nl> + / / / does not change . <nl> + / / / started_time : Float UNIX timestamp with µs precision . This is <nl> + / / / the MIN ( experiment . started_time , run . started_time ) of each <nl> + / / / Run added to the database . <nl> + / / / description : Optional markdown information . <nl> + Status CreateExperimentsTable ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE TABLE IF NOT EXISTS Experiments ( <nl> + rowid INTEGER PRIMARY KEY , <nl> + user_id INTEGER , <nl> + experiment_id INTEGER NOT NULL , <nl> + experiment_name TEXT , <nl> + inserted_time REAL , <nl> + started_time REAL , <nl> + description TEXT <nl> + ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Creates Users table . <nl> + / / / <nl> + / / / Fields : <nl> + / / / rowid : Ephemeral b - tree ID dictating locality . <nl> + / / / user_id : Permanent > 0 unique ID . <nl> + / / / user_name : Unique user name . <nl> + / / / email : Optional unique email address . <nl> + / / / inserted_time : Float UNIX timestamp with µs precision . This is <nl> + / / / always the time the row was inserted into the database . It <nl> + / / / does not change . <nl> + Status CreateUsersTable ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE TABLE IF NOT EXISTS Users ( <nl> + rowid INTEGER PRIMARY KEY , <nl> + user_id INTEGER NOT NULL , <nl> + user_name TEXT , <nl> + email TEXT , <nl> + inserted_time REAL <nl> + ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Creates NodeDefs table . <nl> + / / / <nl> + / / / This table stores NodeDef protos which define the GraphDef for a <nl> + / / / Run . This functions like a hash table so rows can be shared by <nl> + / / / multiple Runs in an Experiment . <nl> + / / / <nl> + / / / Fields : <nl> + / / / rowid : Ephemeral b - tree ID dictating locality . <nl> + / / / experiment_id : Optional int64 for grouping rows . <nl> + / / / node_def_id : Permanent > 0 unique ID . <nl> + / / / fingerprint : Optional farmhash : : Fingerprint64 ( ) of uncompressed <nl> + / / / node_def bytes , coerced to int64 . <nl> + / / / node_def : BLOB containing a Snappy tf . NodeDef proto . <nl> + Status CreateNodeDefsTable ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE TABLE IF NOT EXISTS NodeDefs ( <nl> + rowid INTEGER PRIMARY KEY , <nl> + experiment_id INTEGER , <nl> + node_def_id INTEGER NOT NULL , <nl> + fingerprint INTEGER , <nl> + node_def TEXT <nl> + ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Creates RunNodeDefs table . <nl> + / / / <nl> + / / / Table mapping Runs to NodeDefs . This is used to recreate the node <nl> + / / / field of the GraphDef proto . <nl> + / / / <nl> + / / / Fields : <nl> + / / / rowid : Ephemeral b - tree ID dictating locality . <nl> + / / / run_id : Mandatory ID of associated Run . <nl> + / / / node_def_id : Mandatory ID of associated NodeDef . <nl> + Status CreateRunNodeDefsTable ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE TABLE IF NOT EXISTS RunNodeDefs ( <nl> + rowid INTEGER PRIMARY KEY , <nl> + run_id INTEGER NOT NULL , <nl> + node_def_id INTEGER NOT NULL <nl> + ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Uniquely indexes ( tag_id , step ) on Tensors table . <nl> + Status CreateTensorIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE UNIQUE INDEX IF NOT EXISTS TensorIndex <nl> + ON Tensors ( tag_id , step ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Uniquely indexes ( tag_id , step , sequence ) on TensorChunks table . <nl> + Status CreateTensorChunkIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE UNIQUE INDEX IF NOT EXISTS TensorChunkIndex <nl> + ON TensorChunks ( tag_id , step , sequence ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Uniquely indexes tag_id on Tags table . <nl> + Status CreateTagIdIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE UNIQUE INDEX IF NOT EXISTS TagIdIndex <nl> + ON Tags ( tag_id ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Uniquely indexes run_id on Runs table . <nl> + Status CreateRunIdIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE UNIQUE INDEX IF NOT EXISTS RunIdIndex <nl> + ON Runs ( run_id ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Uniquely indexes experiment_id on Experiments table . <nl> + Status CreateExperimentIdIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE UNIQUE INDEX IF NOT EXISTS ExperimentIdIndex <nl> + ON Experiments ( experiment_id ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Uniquely indexes user_id on Users table . <nl> + Status CreateUserIdIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE UNIQUE INDEX IF NOT EXISTS UserIdIndex <nl> + ON Users ( user_id ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Uniquely indexes node_def_id on NodeDefs table . <nl> + Status CreateNodeDefIdIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE UNIQUE INDEX IF NOT EXISTS NodeDefIdIndex <nl> + ON NodeDefs ( node_def_id ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Uniquely indexes ( run_id , tag_name ) on Tags table . <nl> + Status CreateTagNameIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE UNIQUE INDEX IF NOT EXISTS TagNameIndex <nl> + ON Tags ( run_id , tag_name ) <nl> + WHERE tag_name IS NOT NULL <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Uniquely indexes ( experiment_id , run_name ) on Runs table . <nl> + Status CreateRunNameIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE UNIQUE INDEX IF NOT EXISTS RunNameIndex <nl> + ON Runs ( experiment_id , run_name ) <nl> + WHERE run_name IS NOT NULL <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Uniquely indexes ( user_id , experiment_name ) on Experiments table . <nl> + Status CreateExperimentNameIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE UNIQUE INDEX IF NOT EXISTS ExperimentNameIndex <nl> + ON Experiments ( user_id , experiment_name ) <nl> + WHERE experiment_name IS NOT NULL <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Uniquely indexes user_name on Users table . <nl> + Status CreateUserNameIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE UNIQUE INDEX IF NOT EXISTS UserNameIndex <nl> + ON Users ( user_name ) <nl> + WHERE user_name IS NOT NULL <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Uniquely indexes email on Users table . <nl> + Status CreateUserEmailIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE UNIQUE INDEX IF NOT EXISTS UserEmailIndex <nl> + ON Users ( email ) <nl> + WHERE email IS NOT NULL <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Indexes ( experiment_id , fingerprint ) on NodeDefs table . <nl> + Status CreateNodeDefFingerprintIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE INDEX IF NOT EXISTS NodeDefFingerprintIndex <nl> + ON NodeDefs ( experiment_id , fingerprint ) <nl> + WHERE fingerprint IS NOT NULL <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + / / / \ brief Uniquely indexes ( run_id , node_def_id ) on RunNodeDefs table . <nl> + Status CreateRunNodeDefIndex ( ) { <nl> + return Run ( R " sql ( <nl> + CREATE UNIQUE INDEX IF NOT EXISTS RunNodeDefIndex <nl> + ON RunNodeDefs ( run_id , node_def_id ) <nl> + ) sql " ) ; <nl> + } <nl> + <nl> + Status Run ( const char * sql ) { <nl> + auto stmt = db_ - > Prepare ( sql ) ; <nl> + TF_RETURN_WITH_CONTEXT_IF_ERROR ( stmt - > StepAndReset ( ) , sql ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + private : <nl> + Sqlite * db_ ; <nl> + } ; <nl> + <nl> + } / / namespace <nl> + <nl> + Status SetupTensorboardSqliteDb ( Sqlite * db ) { <nl> + SqliteSchema s ( db ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateTensorsTable ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateTensorChunksTable ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateTagsTable ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateRunsTable ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateExperimentsTable ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateUsersTable ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateNodeDefsTable ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateRunNodeDefsTable ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateTensorIndex ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateTensorChunkIndex ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateTagIdIndex ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateRunIdIndex ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateExperimentIdIndex ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateUserIdIndex ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateNodeDefIdIndex ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateTagNameIndex ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateRunNameIndex ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateExperimentNameIndex ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateUserNameIndex ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateUserEmailIndex ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateNodeDefFingerprintIndex ( ) ) ; <nl> + TF_RETURN_IF_ERROR ( s . CreateRunNodeDefIndex ( ) ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + } / / namespace db <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . d3a6922d94a50 <nl> mmm / dev / null <nl> ppp b / tensorflow / contrib / tensorboard / db / schema . h <nl> <nl> + / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + # ifndef TENSORFLOW_CONTRIB_TENSORBOARD_DB_SCHEMA_H_ <nl> + # define TENSORFLOW_CONTRIB_TENSORBOARD_DB_SCHEMA_H_ <nl> + <nl> + # include " tensorflow / core / lib / core / status . h " <nl> + # include " tensorflow / core / lib / db / sqlite . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace db { <nl> + <nl> + / / / \ brief Creates TensorBoard SQLite tables and indexes . <nl> + / / / <nl> + / / / If they are already created , this has no effect . If schema <nl> + / / / migrations are necessary , they will be performed with logging . <nl> + Status SetupTensorboardSqliteDb ( Sqlite * db ) ; <nl> + <nl> + } / / namespace db <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_CONTRIB_TENSORBOARD_DB_SCHEMA_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . a4302dda44764 <nl> mmm / dev / null <nl> ppp b / tensorflow / contrib / tensorboard / db / schema_test . cc <nl> <nl> + / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + # include " tensorflow / contrib / tensorboard / db / schema . h " <nl> + <nl> + # include < memory > <nl> + <nl> + # include " tensorflow / core / lib / core / status_test_util . h " <nl> + # include " tensorflow / core / platform / test . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace db { <nl> + namespace { <nl> + <nl> + TEST ( SchemaTest , SmokeTestTensorboardSchema ) { <nl> + std : : unique_ptr < Sqlite > db ; <nl> + TF_ASSERT_OK ( Sqlite : : Open ( " : memory : " , & db ) ) ; <nl> + TF_ASSERT_OK ( SetupTensorboardSqliteDb ( db . get ( ) ) ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + } / / namespace db <nl> + } / / namespace tensorflow <nl> mmm a / tensorflow / contrib / tpu / BUILD <nl> ppp b / tensorflow / contrib / tpu / BUILD <nl> cc_library ( <nl> ] , <nl> ) <nl> <nl> + py_library ( <nl> + name = " tpu_test_util " , <nl> + srcs = [ <nl> + " python / tpu / test_util . py " , <nl> + ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : tpu_lib " , <nl> + " : tpu_py " , <nl> + ] , <nl> + ) <nl> + <nl> py_library ( <nl> name = " tpu_estimator " , <nl> srcs = [ <nl> new file mode 100644 <nl> index 0000000000000 . . f30c27f1298e2 <nl> mmm / dev / null <nl> ppp b / tensorflow / contrib / tpu / python / tpu / test_util . py <nl> <nl> + # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Utilities to ease testing on TPU devices . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + from tensorflow . contrib . tpu . python . tpu import tpu <nl> + <nl> + from tensorflow . python . client import session <nl> + from tensorflow . python . framework import errors <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . framework import test_util <nl> + from tensorflow . python . ops import gen_array_ops <nl> + from tensorflow . python . ops import variables <nl> + <nl> + <nl> + def has_tpu ( ) : <nl> + " " " Check if a TPU device is available . <nl> + <nl> + Device enumeration via ` device_lib ` currently fails for TPU systems . <nl> + ( http : / / b / 68333779 ) . To work around this , we determine the existence of a <nl> + TPU by a successful call to ` initialize_system ` . <nl> + <nl> + Returns : <nl> + boolean , True if a TPU device is available , otherwise False . <nl> + " " " <nl> + def _check ( ) : <nl> + with session . Session ( ) as sess : <nl> + sess . run ( tpu . initialize_system ( ) ) <nl> + sess . run ( tpu . shutdown_system ( ) ) <nl> + <nl> + try : <nl> + _check ( ) <nl> + return True <nl> + except errors . OpError as _ : <nl> + return False <nl> + <nl> + <nl> + def _available_devices ( ) : <nl> + devices = [ " cpu " ] <nl> + if not test_util . gpu_device_name ( ) : <nl> + devices . append ( " gpu " ) <nl> + <nl> + if has_tpu ( ) : <nl> + devices . append ( " tpu " ) <nl> + <nl> + return tuple ( devices ) <nl> + <nl> + <nl> + class TPUTestCase ( test_util . TensorFlowTestCase ) : <nl> + " " " Adds helpers for testing on TPU devices to ` TensorFlowTestCase ` . <nl> + <nl> + Example usage : <nl> + <nl> + ` ` ` <nl> + def model_fn ( features ) : <nl> + return tf . reduce_sum ( features * 2 ) <nl> + <nl> + class ModelTests ( test_util . TPUTestCase ) : <nl> + def test_sum ( self ) : <nl> + v = np . random . randn ( 10 , 10 ) . astype ( " float32 " ) <nl> + self . assert_device_output ( model_fn , [ v ] , ( v * 2 ) . sum ( ) , <nl> + devices = ( " cpu " , " tpu " ) ) <nl> + ` ` ` <nl> + " " " <nl> + <nl> + def __init__ ( self , methodName = " runTest " ) : # pylint : disable = invalid - name <nl> + super ( TPUTestCase , self ) . __init__ ( methodName ) <nl> + self . _available_devices = _available_devices ( ) <nl> + <nl> + def run_on_device ( self , model_fn , model_inputs , device ) : <nl> + " " " Runs ` model_fn ` on the given device . <nl> + <nl> + Raises an exception if no such device is available . ` model_fn ` should <nl> + return one or more tensors as a list or tuple . <nl> + <nl> + Args : <nl> + model_fn : Function returning one or more tensors . <nl> + model_inputs : An iterable of Numpy arrays or scalars . <nl> + These will be passed as arguments to ` model_fn ` . <nl> + device : Device to run on . One of ( " tpu " , " gpu " , " cpu " ) . <nl> + <nl> + Returns : <nl> + Output from the model function . <nl> + " " " <nl> + def _make_placeholders ( ) : <nl> + return dict ( <nl> + [ ( gen_array_ops . placeholder_with_default ( v , v . shape ) , v ) <nl> + for v in model_inputs ] ) <nl> + <nl> + if device = = " tpu " : <nl> + with self . test_session ( graph = ops . Graph ( ) ) as sess : <nl> + placeholders = _make_placeholders ( ) <nl> + tpu_computation = tpu . rewrite ( model_fn , placeholders . keys ( ) ) <nl> + sess . run ( tpu . initialize_system ( ) ) <nl> + sess . run ( variables . global_variables_initializer ( ) ) <nl> + result = sess . run ( tpu_computation , placeholders ) <nl> + sess . run ( tpu . shutdown_system ( ) ) <nl> + # TODO ( b / 36891278 ) : supports non - flat returns lists in tpu . rewrite ( ) . <nl> + if len ( result ) = = 1 : <nl> + return result [ 0 ] <nl> + return result <nl> + elif device = = " gpu " : <nl> + with self . test_session ( graph = ops . Graph ( ) , use_gpu = True ) as sess : <nl> + placeholders = _make_placeholders ( ) <nl> + sess . run ( variables . global_variables_initializer ( ) ) <nl> + return sess . run ( model_fn ( placeholders . keys ( ) ) , placeholders ) <nl> + elif device = = " cpu " : <nl> + # TODO ( power ) - - will this interact poorly with cached GPU sessions ? <nl> + with self . test_session ( graph = ops . Graph ( ) , use_gpu = False ) as sess : <nl> + placeholders = _make_placeholders ( ) <nl> + sess . run ( variables . global_variables_initializer ( ) ) <nl> + return sess . run ( model_fn ( placeholders . keys ( ) ) , placeholders ) <nl> + <nl> + def _compare_values ( self , actual_outputs , expected_outputs ) : <nl> + if isinstance ( expected_outputs , ( list , tuple ) ) : <nl> + for a , b in zip ( actual_outputs , expected_outputs ) : <nl> + self . assertAllCloseAccordingToType ( a , b ) <nl> + else : <nl> + self . assertAllCloseAccordingToType ( actual_outputs , expected_outputs ) <nl> + <nl> + def assert_device_output ( self , model_fn , model_inputs , expected_outputs , <nl> + devices = ( " cpu " , " gpu " , " tpu " ) ) : <nl> + " " " Run ` model_fn ` on the given devices . <nl> + <nl> + Results are compared via ` assertAllCloseAccordingToType ` . <nl> + <nl> + Args : <nl> + model_fn : Function returning one or more tensors <nl> + model_inputs : Numpy arrays or scalars passed as arguments to model_fn <nl> + expected_outputs : Numpy arrays or scalars to compare against . <nl> + devices : Set of devices to run on . If a device is not available , tests <nl> + will be skipped for that device . <nl> + " " " <nl> + devices = set ( devices ) . intersection ( self . _available_devices ) <nl> + <nl> + for device in devices : <nl> + device_out = self . run_on_device ( model_fn , model_inputs , device = device ) <nl> + self . _compare_values ( device_out , expected_outputs ) <nl> mmm a / tensorflow / contrib / tpu / python / tpu / tpu . py <nl> ppp b / tensorflow / contrib / tpu / python / tpu / tpu . py <nl> def AddInnerOp ( self , op ) : <nl> if self . _outer_context : <nl> self . _outer_context . AddInnerOp ( op ) <nl> <nl> + @ property <nl> + def grad_state ( self ) : <nl> + # Define the gradient loop state associated with the TPUReplicateContext to <nl> + # be None as the TPUReplicateContext does not get nested nor does the <nl> + # grad_state outside the TPUReplicateContext affect the graph inside so the <nl> + # grad_state should be as if this is the top - level gradient state . <nl> + return None <nl> + <nl> <nl> def replicate ( computation , <nl> inputs = None , <nl> mmm a / tensorflow / contrib / tpu / python / tpu / tpu_estimator . py <nl> ppp b / tensorflow / contrib / tpu / python / tpu / tpu_estimator . py <nl> def validate ( eval_metrics ) : <nl> <nl> if isinstance ( eval_metrics [ 1 ] , ( tuple , list ) ) : <nl> fn_args = util . fn_args ( eval_metrics [ 0 ] ) <nl> - if ' self ' in fn_args : <nl> - fn_args = tuple ( [ arg for arg in fn_args if arg ! = ' self ' ] ) <nl> if len ( eval_metrics [ 1 ] ) ! = len ( fn_args ) : <nl> raise RuntimeError ( <nl> ' In TPUEstimatorSpec . eval_metrics , length of tensors does not ' <nl> mmm a / tensorflow / contrib / training / python / training / hparam . py <nl> ppp b / tensorflow / contrib / training / python / training / hparam . py <nl> def values ( self ) : <nl> " " " <nl> return { n : getattr ( self , n ) for n in self . _hparam_types . keys ( ) } <nl> <nl> + def __contains__ ( self , key ) : <nl> + return key in self . _hparam_types <nl> + <nl> def __str__ ( self ) : <nl> return str ( sorted ( self . values ( ) . items ( ) ) ) <nl> <nl> mmm a / tensorflow / contrib / training / python / training / hparam_test . py <nl> ppp b / tensorflow / contrib / training / python / training / hparam_test . py <nl> def testEmpty ( self ) : <nl> with self . assertRaisesRegexp ( ValueError , ' Unknown hyperparameter ' ) : <nl> hparams . parse ( ' xyz = 123 ' ) <nl> <nl> + def testContains ( self ) : <nl> + hparams = hparam . HParams ( foo = 1 ) <nl> + self . assertTrue ( ' foo ' in hparams ) <nl> + self . assertFalse ( ' bar ' in hparams ) <nl> + <nl> def testSomeValues ( self ) : <nl> hparams = hparam . HParams ( aaa = 1 , b = 2 . 0 , c_c = ' relu6 ' ) <nl> self . assertDictEqual ( { ' aaa ' : 1 , ' b ' : 2 . 0 , ' c_c ' : ' relu6 ' } , hparams . values ( ) ) <nl> mmm a / tensorflow / core / common_runtime / constant_folding . cc <nl> ppp b / tensorflow / core / common_runtime / constant_folding . cc <nl> Graph * GetConstantGraph ( <nl> / / new constant node . <nl> bool ReplaceTensorWithConstant ( Graph * graph , Device * partition_device , <nl> NodeAndOutput tensor , const Tensor & constant , <nl> - const gtl : : FlatSet < Node * > & control_deps ) { <nl> + const gtl : : FlatSet < Node * > & control_deps , <nl> + int64 max_constant_size_in_bytes ) { <nl> / / Be conservative when replacing a tensor with a constant , when not <nl> / / running on CPU . <nl> / / 1 ) If the destination tensor is not an int32 tensor , and has HOST_MEMORY <nl> bool ReplaceTensorWithConstant ( Graph * graph , Device * partition_device , <nl> / / constraint , do not replace it . <nl> / / 3 ) If the constant op created does not have a kernel implementation <nl> / / for the device , do not use it . <nl> - / / 4 ) If the size of the constant in bytes is too large ( > 10M ) , do not <nl> - / / replace it . This prevents the size of the Graph from growing too large . <nl> + / / 4 ) If the size of the constant in bytes is too large ( > <nl> + / / max_constant_in_bytes ) , do not replace it . This prevents the size of the <nl> + / / Graph from growing too large . <nl> / / TODO ( keveman ) : Consider adding a new constant op that has a kernel <nl> / / implementation for all types , but with HostMemory constraint on it ' s <nl> / / output . <nl> bool ReplaceTensorWithConstant ( Graph * graph , Device * partition_device , <nl> return false ; <nl> } <nl> } <nl> - if ( constant . TotalBytes ( ) > 10 * 1024 * 1024 ) { <nl> + if ( constant . TotalBytes ( ) > max_constant_size_in_bytes ) { <nl> return false ; <nl> } <nl> <nl> Status ConstantFold ( const ConstantFoldingOptions & opts , <nl> for ( size_t c = 0 ; c < outputs . size ( ) ; + + c ) { <nl> const gtl : : FlatSet < Node * > & control_deps = <nl> constant_control_deps [ tensors_to_replace [ c ] . first ] ; <nl> - if ( ReplaceTensorWithConstant ( graph , partition_device , <nl> - tensors_to_replace [ c ] , outputs [ c ] , <nl> - control_deps ) ) { <nl> + if ( ReplaceTensorWithConstant ( <nl> + graph , partition_device , tensors_to_replace [ c ] , outputs [ c ] , <nl> + control_deps , opts . max_constant_size_in_bytes ) ) { <nl> + + num_nodes_replaced ; <nl> } <nl> } <nl> mmm a / tensorflow / core / common_runtime / constant_folding . h <nl> ppp b / tensorflow / core / common_runtime / constant_folding . h <nl> struct ConstantFoldingOptions { <nl> / / outputs . <nl> const std : : unordered_map < string , std : : vector < PartialTensorShape > > * shape_map = <nl> nullptr ; / / not owned <nl> + / / The maximum size of each constant created during constant folding <nl> + / / optimization . <nl> + int64 max_constant_size_in_bytes = 10 * 1024 * 1024 ; <nl> } ; <nl> <nl> / / Perform constant folding optimization on " graph " . <nl> mmm a / tensorflow / core / common_runtime / constant_folding_test . cc <nl> ppp b / tensorflow / core / common_runtime / constant_folding_test . cc <nl> TEST_F ( ConstantFoldingTest , TestNoReplaceLargeConstant ) { <nl> TF_EXPECT_OK ( ConstantFold ( ConstantFoldingOptions { } , nullptr , Env : : Default ( ) , <nl> nullptr , & g , & was_mutated ) ) ; <nl> EXPECT_FALSE ( was_mutated ) ; <nl> + <nl> + / / Increase the limit and the concat should now be constant folded . <nl> + ConstantFoldingOptions opt ; <nl> + opt . max_constant_size_in_bytes = 10 * 1024 * 1024 + 4 ; <nl> + TF_EXPECT_OK ( <nl> + ConstantFold ( opt , nullptr , Env : : Default ( ) , nullptr , & g , & was_mutated ) ) ; <nl> + EXPECT_TRUE ( was_mutated ) ; <nl> } <nl> <nl> TEST_F ( ConstantFoldingTest , TestNoReplaceFunctionCall ) { <nl> mmm a / tensorflow / core / common_runtime / graph_optimizer . cc <nl> ppp b / tensorflow / core / common_runtime / graph_optimizer . cc <nl> void GraphOptimizer : : Optimize ( <nl> if ( opts_ . do_constant_folding ( ) ) { <nl> ConstantFoldingOptions cf_opts ; <nl> cf_opts . shape_map = shape_map ; <nl> + if ( opts_ . max_folded_constant_in_bytes ( ) > 0 ) { <nl> + cf_opts . max_constant_size_in_bytes = <nl> + opts_ . max_folded_constant_in_bytes ( ) ; <nl> + } <nl> bool was_mutated ; <nl> ConstantFold ( cf_opts , runtime , env , device , g , & was_mutated ) <nl> . IgnoreError ( ) ; <nl> mmm a / tensorflow / core / kernels / BUILD <nl> ppp b / tensorflow / core / kernels / BUILD <nl> tf_cc_tests ( <nl> ] , <nl> ) <nl> <nl> + tf_kernel_library ( <nl> + name = " eye_functor " , <nl> + hdrs = [ " eye_functor . h " ] , <nl> + gpu_srcs = [ <nl> + " eye_functor_gpu . cu . cc " , <nl> + " eye_functor . h " , <nl> + ] , <nl> + visibility = [ " : friends " ] , <nl> + deps = [ <nl> + " / / tensorflow / core : framework " , <nl> + " / / third_party / eigen3 " , <nl> + ] , <nl> + alwayslink = 0 , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " fifo_queue " , <nl> srcs = [ " fifo_queue . cc " ] , <nl> tf_kernel_library ( <nl> name = " cuda_solvers " , <nl> srcs = [ " cuda_solvers . cc " ] , <nl> hdrs = [ " cuda_solvers . h " ] , <nl> - gpu_srcs = [ <nl> - " cuda_solvers . h " , <nl> - " cuda_solvers_gpu . cu . cc " , <nl> - ] , <nl> # @ local_config_cuda / / cuda : cusolver , / / third_party / eigen3 : blas , <nl> # and / / third_party / libf2c all contain various parts of BLAS , LAPACK , <nl> # and f2c helper functions in global namespace . Tell the compiler to <nl> tf_kernel_library ( <nl> tf_kernel_library ( <nl> name = " matrix_inverse_op " , <nl> prefix = " matrix_inverse_op " , <nl> - deps = LINALG_DEPS , <nl> + deps = LINALG_DEPS + if_cuda ( [ " : eye_functor " ] ) , <nl> ) <nl> <nl> tf_kernel_library ( <nl> tf_kernel_library ( <nl> prefix = " qr_op " , <nl> deps = LINALG_DEPS + if_cuda ( [ <nl> " : cwise_op " , <nl> + " : eye_functor " , <nl> " : matrix_band_part_op " , <nl> ] ) , <nl> ) <nl> STATE_DEPS = [ <nl> tf_kernel_library ( <nl> name = " count_up_to_op " , <nl> prefix = " count_up_to_op " , <nl> - deps = STATE_DEPS , <nl> + deps = STATE_DEPS + [ " : variable_ops " ] , <nl> ) <nl> <nl> tf_kernel_library ( <nl> tf_kernel_library ( <nl> ) <nl> <nl> tf_kernel_library ( <nl> - name = " sloppy_interleave_dataset_op " , <nl> - srcs = [ " sloppy_interleave_dataset_op . cc " ] , <nl> + name = " parallel_interleave_dataset_op " , <nl> + srcs = [ " parallel_interleave_dataset_op . cc " ] , <nl> deps = [ <nl> " : captured_function " , <nl> " : dataset " , <nl> tf_kernel_library ( <nl> " : map_and_batch_dataset_op " , <nl> " : map_dataset_op " , <nl> " : padded_batch_dataset_op " , <nl> + " : parallel_interleave_dataset_op " , <nl> " : parallel_map_dataset_op " , <nl> " : prefetch_dataset_op " , <nl> " : range_dataset_op " , <nl> tf_kernel_library ( <nl> " : scan_dataset_op " , <nl> " : shuffle_dataset_op " , <nl> " : skip_dataset_op " , <nl> - " : sloppy_interleave_dataset_op " , <nl> " : sparse_tensor_slice_dataset_op " , <nl> " : sql_dataset_ops " , <nl> " : take_dataset_op " , <nl> mmm a / tensorflow / core / kernels / count_up_to_op . cc <nl> ppp b / tensorflow / core / kernels / count_up_to_op . cc <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> + # include " tensorflow / core / kernels / variable_ops . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / platform / mutex . h " <nl> # include " tensorflow / core / platform / types . h " <nl> class CountUpToOp : public OpKernel { <nl> T limit_ ; <nl> } ; <nl> <nl> - # define REGISTER ( TYPE ) \ <nl> - REGISTER_KERNEL_BUILDER ( \ <nl> - Name ( " CountUpTo " ) . TypeConstraint < TYPE > ( " T " ) . Device ( DEVICE_CPU ) , \ <nl> - CountUpToOp < TYPE > ) <nl> + template < class T > <nl> + class ResourceCountUpToOp : public OpKernel { <nl> + public : <nl> + explicit ResourceCountUpToOp ( OpKernelConstruction * context ) <nl> + : OpKernel ( context ) { <nl> + OP_REQUIRES_OK ( context , context - > GetAttr ( " limit " , & limit_ ) ) ; <nl> + OP_REQUIRES_OK ( context , context - > GetAttr ( " T " , & dtype_ ) ) ; <nl> + } <nl> + <nl> + void Compute ( OpKernelContext * context ) override { <nl> + Var * variable = nullptr ; <nl> + OP_REQUIRES_OK ( <nl> + context , <nl> + LookupResource < Var > ( context , HandleFromInput ( context , 0 ) , & variable ) ) ; <nl> + core : : ScopedUnref s ( variable ) ; <nl> + mutex_lock l ( * variable - > mu ( ) ) ; <nl> + Tensor before_increment = * variable - > tensor ( ) ; <nl> + OP_REQUIRES ( <nl> + context , TensorShapeUtils : : IsScalar ( before_increment . shape ( ) ) , <nl> + errors : : InvalidArgument ( " input is not a scalar : " , <nl> + before_increment . shape ( ) . DebugString ( ) ) ) ; <nl> + if ( before_increment . scalar < T > ( ) ( ) > = limit_ ) { <nl> + context - > SetStatus ( errors : : OutOfRange ( " Reached limit of " , limit_ ) ) ; <nl> + return ; <nl> + } <nl> + / / Allocate new buffer <nl> + AllocatorAttributes attr ; <nl> + attr . set_gpu_compatible ( true ) ; <nl> + attr . set_nic_compatible ( true ) ; <nl> + PersistentTensor unused ; <nl> + Tensor * tmp ; <nl> + OP_REQUIRES_OK ( context , context - > allocate_persistent ( <nl> + dtype_ , TensorShape ( { } ) , & unused , & tmp , attr ) ) ; <nl> + * variable - > tensor ( ) = * tmp ; <nl> + tmp - > scalar < T > ( ) ( ) = before_increment . scalar < T > ( ) ( ) + 1 ; <nl> + context - > set_output ( 0 , before_increment ) ; <nl> + } <nl> + <nl> + private : <nl> + T limit_ ; <nl> + DataType dtype_ ; <nl> + } ; <nl> + <nl> + # define REGISTER ( TYPE ) \ <nl> + REGISTER_KERNEL_BUILDER ( \ <nl> + Name ( " CountUpTo " ) . TypeConstraint < TYPE > ( " T " ) . Device ( DEVICE_CPU ) , \ <nl> + CountUpToOp < TYPE > ) \ <nl> + REGISTER_KERNEL_BUILDER ( \ <nl> + Name ( " ResourceCountUpTo " ) . TypeConstraint < TYPE > ( " T " ) . Device ( DEVICE_CPU ) , \ <nl> + ResourceCountUpToOp < TYPE > ) <nl> <nl> REGISTER ( int32 ) ; <nl> REGISTER ( int64 ) ; <nl> mmm a / tensorflow / core / kernels / cuda_solvers . h <nl> ppp b / tensorflow / core / kernels / cuda_solvers . h <nl> limitations under the License . <nl> # include " cuda / include / cusolverDn . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> - # include " tensorflow / core / framework / tensor_types . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> # include " tensorflow / core / platform / stream_executor . h " <nl> <nl> class DeviceLapackInfo : public ScratchSpace < int > { <nl> } <nl> } ; <nl> <nl> - namespace functor { <nl> - <nl> - / / Helper functor to set a batch of matrices to the identity . <nl> - / / TODO ( rmlarsen ) : Use this kernel to replace the horribly inefficient tf . eye <nl> - / / op . <nl> - template < typename Device , typename Scalar > <nl> - struct EyeFunctor { <nl> - void operator ( ) ( const Device & device , <nl> - typename TTypes < Scalar , 3 > : : Tensor matrix_batch ) ; <nl> - } ; <nl> - <nl> - } / / namespace functor <nl> - <nl> template < typename Scalar > <nl> ScratchSpace < Scalar > CudaSolver : : GetScratchSpace ( const TensorShape & shape , <nl> const string & debug_info , <nl> new file mode 100644 <nl> index 0000000000000 . . 70f093f81366e <nl> mmm / dev / null <nl> ppp b / tensorflow / core / kernels / eye_functor . h <nl> <nl> + / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + # ifndef THIRD_PARTY_TENSORFLOW_CORE_KERNELS_EYE_FUNCTOR_H_ <nl> + # define THIRD_PARTY_TENSORFLOW_CORE_KERNELS_EYE_FUNCTOR_H_ <nl> + <nl> + # include " tensorflow / core / framework / tensor_types . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace functor { <nl> + <nl> + template < typename Device , typename Scalar > <nl> + struct EyeFunctor { <nl> + void operator ( ) ( const Device & device , <nl> + typename TTypes < Scalar , 3 > : : Tensor matrix_batch ) ; <nl> + } ; <nl> + <nl> + } / / namespace functor <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / THIRD_PARTY_TENSORFLOW_CORE_KERNELS_EYE_FUNCTOR_H_ <nl> similarity index 62 % <nl> rename from tensorflow / core / kernels / cuda_solvers_gpu . cu . cc <nl> rename to tensorflow / core / kernels / eye_functor_gpu . cu . cc <nl> mmm a / tensorflow / core / kernels / cuda_solvers_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / eye_functor_gpu . cu . cc <nl> limitations under the License . <nl> <nl> # define EIGEN_USE_GPU <nl> <nl> - # include " tensorflow / core / kernels / cuda_solvers . h " <nl> + # include " tensorflow / core / kernels / eye_functor . h " <nl> <nl> - # include < complex > <nl> # include " third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor " <nl> # include " tensorflow / core / framework / tensor_types . h " <nl> + # include " tensorflow / core / platform / types . h " <nl> # include " tensorflow / core / util / cuda_kernel_helper . h " <nl> <nl> namespace tensorflow { <nl> namespace functor { <nl> typedef Eigen : : GpuDevice GPUDevice ; <nl> <nl> template < typename Scalar > <nl> - __global__ void EyeKernel ( Cuda3DLaunchConfig config , int batch_size , int m , <nl> - int n , Scalar * matrix_batch_ptr ) { <nl> - const int matrix_size = m * n ; <nl> + __global__ void EyeKernel ( int num_threads , int batch_size , int m , int n , <nl> + Scalar * output_ptr ) { <nl> const Scalar one = Scalar ( 1 ) ; <nl> - CUDA_AXIS_KERNEL_LOOP ( batch , config . virtual_thread_count , x ) { <nl> - if ( batch > = batch_size ) { <nl> - break ; <nl> - } <nl> - CUDA_AXIS_KERNEL_LOOP ( row , config . virtual_thread_count , y ) { <nl> - if ( row > = m ) { <nl> - break ; <nl> - } <nl> - const int row_start = batch * matrix_size + row * n ; <nl> - CUDA_AXIS_KERNEL_LOOP ( col , config . virtual_thread_count , z ) { <nl> - if ( col > = n ) { <nl> - break ; <nl> - } <nl> - matrix_batch_ptr [ row_start + col ] = row = = col ? one : Scalar ( ) ; <nl> - } <nl> - } <nl> + const Scalar zero = Scalar ( 0 ) ; <nl> + CUDA_1D_KERNEL_LOOP ( index , num_threads ) { <nl> + / / TODO ( rmlarsen ) : Benchmark to see if it ' s just as fast to use mod ( % ) , <nl> + / / since it ' s easier to read . <nl> + const int global_row = index / n ; <nl> + const int col = index - global_row * n ; <nl> + const int batch = global_row / m ; <nl> + const int row = global_row - batch * m ; <nl> + output_ptr [ index ] = col = = row ? one : zero ; <nl> } <nl> } <nl> <nl> struct EyeFunctor < GPUDevice , Scalar > { <nl> const int batch_size = matrix_batch . dimension ( 0 ) ; <nl> const int m = matrix_batch . dimension ( 1 ) ; <nl> const int n = matrix_batch . dimension ( 2 ) ; <nl> - Cuda3DLaunchConfig config = GetCuda3DLaunchConfig ( batch_size , m , n , device , <nl> - EyeKernel < Scalar > , 0 , 0 ) ; <nl> + CudaLaunchConfig config = GetCudaLaunchConfig ( batch_size * m * n , device ) ; <nl> EyeKernel < < < config . block_count , config . thread_per_block , 0 , <nl> - device . stream ( ) > > > ( config , batch_size , m , n , <nl> - matrix_batch . data ( ) ) ; <nl> + device . stream ( ) > > > ( config . virtual_thread_count , batch_size , m , <nl> + n , matrix_batch . data ( ) ) ; <nl> } <nl> } ; <nl> <nl> mmm a / tensorflow / core / kernels / map_and_batch_dataset_op . cc <nl> ppp b / tensorflow / core / kernels / map_and_batch_dataset_op . cc <nl> class MapAndBatchDatasetOp : public UnaryDatasetOpKernel { <nl> const DataTypeVector output_types_ ; <nl> const std : : vector < PartialTensorShape > output_shapes_ ; <nl> const std : : unique_ptr < CapturedFunction > captured_func_ ; <nl> - const Eigen : : ThreadPoolDevice * device_ ; / / not owned <nl> + const Eigen : : ThreadPoolDevice * device_ ; / / not owned <nl> } ; <nl> <nl> const int graph_def_version_ ; <nl> mmm a / tensorflow / core / kernels / matrix_inverse_op . cc <nl> ppp b / tensorflow / core / kernels / matrix_inverse_op . cc <nl> limitations under the License . <nl> # if GOOGLE_CUDA <nl> # include " third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor " <nl> # include " tensorflow / core / kernels / cuda_solvers . h " <nl> + # include " tensorflow / core / kernels / eye_functor . h " <nl> # include " tensorflow / core / kernels / transpose_functor . h " <nl> # endif <nl> <nl> similarity index 84 % <nl> rename from tensorflow / core / kernels / sloppy_interleave_dataset_op . cc <nl> rename to tensorflow / core / kernels / parallel_interleave_dataset_op . cc <nl> mmm a / tensorflow / core / kernels / sloppy_interleave_dataset_op . cc <nl> ppp b / tensorflow / core / kernels / parallel_interleave_dataset_op . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / function . h " <nl> # include " tensorflow / core / framework / partial_tensor_shape . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> + # include " tensorflow / core / kernels / captured_function . h " <nl> # include " tensorflow / core / kernels / dataset_utils . h " <nl> # include " tensorflow / core / lib / gtl / cleanup . h " <nl> # include " tensorflow / core / lib / random / random . h " <nl> <nl> - # include " tensorflow / core / kernels / captured_function . h " <nl> - <nl> namespace tensorflow { <nl> <nl> namespace { <nl> namespace { <nl> / / See documentation in . . / ops / dataset_ops . cc for a high - level <nl> / / description of the following op . <nl> <nl> - class SloppyInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> + class ParallelInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> public : <nl> - explicit SloppyInterleaveDatasetOp ( OpKernelConstruction * ctx ) <nl> + explicit ParallelInterleaveDatasetOp ( OpKernelConstruction * ctx ) <nl> : UnaryDatasetOpKernel ( ctx ) , <nl> graph_def_version_ ( ctx - > graph_def_version ( ) ) { <nl> OP_REQUIRES_OK ( ctx , ctx - > GetAttr ( " f " , & func_ ) ) ; <nl> class SloppyInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> OP_REQUIRES ( ctx , block_length > 0 , <nl> errors : : InvalidArgument ( " ` block_length ` must be > 0 " ) ) ; <nl> <nl> + bool sloppy ; <nl> + OP_REQUIRES_OK ( ctx , ParseScalarArgument ( ctx , " sloppy " , & sloppy ) ) ; <nl> + <nl> std : : unique_ptr < CapturedFunction > captured_func ; <nl> OP_REQUIRES_OK ( ctx , CapturedFunction : : Create ( ctx , func_ , graph_def_version_ , <nl> std : : move ( other_arguments ) , <nl> & captured_func ) ) ; <nl> <nl> * output = new Dataset ( input , std : : move ( captured_func ) , cycle_length , <nl> - block_length , output_types_ , output_shapes_ ) ; <nl> + block_length , sloppy , output_types_ , output_shapes_ ) ; <nl> } <nl> <nl> private : <nl> class SloppyInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> public : <nl> Dataset ( const DatasetBase * input , <nl> std : : unique_ptr < CapturedFunction > captured_func , int64 cycle_length , <nl> - int64 block_length , const DataTypeVector & output_types , <nl> + int64 block_length , bool sloppy , const DataTypeVector & output_types , <nl> const std : : vector < PartialTensorShape > & output_shapes ) <nl> : input_ ( input ) , <nl> captured_func_ ( std : : move ( captured_func ) ) , <nl> cycle_length_ ( cycle_length ) , <nl> block_length_ ( block_length ) , <nl> + sloppy_ ( sloppy ) , <nl> output_types_ ( output_types ) , <nl> output_shapes_ ( output_shapes ) { <nl> input_ - > Ref ( ) ; <nl> class SloppyInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> <nl> std : : unique_ptr < IteratorBase > MakeIterator ( <nl> const string & prefix ) const override { <nl> - return std : : unique_ptr < IteratorBase > ( <nl> - new Iterator ( { this , strings : : StrCat ( prefix , " : : SloppyInterleave " ) } ) ) ; <nl> + return std : : unique_ptr < IteratorBase > ( new Iterator ( <nl> + { this , strings : : StrCat ( prefix , " : : ParallelInterleave " ) } ) ) ; <nl> } <nl> <nl> const DataTypeVector & output_dtypes ( ) const override { <nl> class SloppyInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> } <nl> <nl> string DebugString ( ) override { <nl> - return " SloppyInterleaveDatasetOp : : Dataset " ; <nl> + return " ParallelInterleaveDatasetOp : : Dataset " ; <nl> } <nl> <nl> private : <nl> class SloppyInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> bool * end_of_sequence ) override { <nl> mutex_lock l ( mu_ ) ; <nl> TF_RETURN_IF_ERROR ( EnsureWorkerThreadsStarted ( ctx ) ) ; <nl> - / / Search for available items , blocking if necessary . <nl> + const int64 num_workers = worker_threads_ . size ( ) ; <nl> + if ( num_workers = = 0 ) { <nl> + * end_of_sequence = true ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> while ( ! cancelled_ ) { <nl> - for ( size_t i = 0 ; i < dataset ( ) - > cycle_length_ ; + + i ) { <nl> - size_t index = ( next_index_ + i ) % dataset ( ) - > cycle_length_ ; <nl> + / / Wait for an item to become available , blocking if necessary . If we <nl> + / / are allowed to be sloppy , we can skip over input datasets that do <nl> + / / not have an item readily available . <nl> + const int64 n = dataset ( ) - > sloppy_ ? num_workers : 1LL ; <nl> + for ( int64 i = 0 ; i < n ; + + i ) { <nl> + int64 index = ( next_index_ + i ) % num_workers ; <nl> if ( output_elements_ [ index ] . is_produced ) { <nl> next_index_ = index ; <nl> if ( i = = 0 ) { <nl> block_count_ + + ; <nl> if ( block_count_ = = dataset ( ) - > block_length_ ) { <nl> - next_index_ = ( index + 1 ) % dataset ( ) - > cycle_length_ ; <nl> + next_index_ = ( index + 1 ) % num_workers ; <nl> block_count_ = 0 ; <nl> } <nl> } else { <nl> class SloppyInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> if ( output_elements_ [ index ] . end_of_sequence ) { <nl> output_elements_ [ index ] . is_produced = false ; <nl> output_elements_ [ index ] . cond_var . notify_one ( ) ; <nl> - next_index_ = ( index + 1 ) % dataset ( ) - > cycle_length_ ; <nl> + next_index_ = ( index + 1 ) % num_workers ; <nl> block_count_ = 0 ; <nl> i = - 1 ; / / Restart the inner loop <nl> continue ; <nl> class SloppyInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> * end_of_sequence = true ; <nl> return Status : : OK ( ) ; <nl> } <nl> + <nl> + / / If we are not allowed to be sloppy and <nl> + / / ` worker_threads_ [ next_index ] ` has finished , advance ` next_index ` . <nl> + if ( ! dataset ( ) - > sloppy_ & & worker_threads_ [ next_index_ ] . finished ) { <nl> + next_index_ = ( next_index_ + 1 ) % num_workers ; <nl> + continue ; <nl> + } <nl> + <nl> / / No values available ; wait until woken up . <nl> + / / TODO ( jsimsa ) : Use slot - specific condition variable for <nl> + / / coordination of elements consumption . <nl> cond_var_ . wait ( l ) ; <nl> } <nl> return errors : : Cancelled ( <nl> - " SloppyInterleaveDatasetOp : : Dataset : : Iterator : : GetNext " ) ; <nl> + " ParallelInterleaveDatasetOp : : Dataset : : Iterator : : GetNext " ) ; <nl> } <nl> <nl> private : <nl> class SloppyInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> condition_variable cond_var ; <nl> } ; <nl> <nl> + struct ThreadStatus { <nl> + / / The underlying thread uses ` finished ` to communicate to the producer <nl> + / / that it has finished . <nl> + bool finished = false ; <nl> + / / The underlying thread object . <nl> + std : : unique_ptr < Thread > thread ; <nl> + <nl> + explicit ThreadStatus ( Thread * thread ) : thread ( thread ) { } <nl> + } ; <nl> + <nl> Status EnsureWorkerThreadsStarted ( IteratorContext * ctx ) <nl> EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) { <nl> if ( worker_threads_ . empty ( ) ) { <nl> class SloppyInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> std : : unique_ptr < IteratorBase > itr ; <nl> TF_RETURN_IF_ERROR ( dataset : : MakeIteratorFromInputElement ( <nl> ctx , args , i , dataset ( ) - > captured_func_ . get ( ) , prefix ( ) , & itr ) ) ; <nl> - worker_threads_ . emplace_back ( <nl> - std : : unique_ptr < Thread > ( ctx - > env ( ) - > StartThread ( <nl> - { } , " worker_thread " , <nl> - std : : bind ( & Iterator : : WorkerThread , this , <nl> - new IteratorContext ( * ctx ) , i , itr . release ( ) ) ) ) ) ; <nl> + worker_threads_ . emplace_back ( ctx - > env ( ) - > StartThread ( <nl> + { } , " worker_thread " , <nl> + std : : bind ( & Iterator : : WorkerThread , this , <nl> + new IteratorContext ( * ctx ) , i , itr . release ( ) ) ) ) ; <nl> num_active_threads_ = i + 1 ; <nl> } <nl> } <nl> class SloppyInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> std : : unique_ptr < IteratorBase > out_iterator ( out_iterator_ptr ) ; <nl> auto cleanup = gtl : : MakeCleanup ( [ this , thread_index ] { <nl> mutex_lock l ( mu_ ) ; <nl> + worker_threads_ [ thread_index ] . finished = true ; <nl> num_active_threads_ - - ; <nl> cond_var_ . notify_all ( ) ; <nl> } ) ; <nl> class SloppyInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> / / Pointers to the worker threads . This must be last to ensure the <nl> / / threads have exited before any other members are deallocated . <nl> / / TODO ( b / 65178177 ) : Avoid allocating additional threads . <nl> - std : : vector < std : : unique_ptr < Thread > > worker_threads_ GUARDED_BY ( mu_ ) ; <nl> + std : : vector < ThreadStatus > worker_threads_ GUARDED_BY ( mu_ ) ; <nl> } ; <nl> <nl> const DatasetBase * const input_ ; <nl> const std : : unique_ptr < CapturedFunction > captured_func_ ; <nl> const int64 cycle_length_ ; <nl> const int64 block_length_ ; <nl> + const bool sloppy_ ; <nl> const DataTypeVector output_types_ ; <nl> const std : : vector < PartialTensorShape > output_shapes_ ; <nl> } ; <nl> class SloppyInterleaveDatasetOp : public UnaryDatasetOpKernel { <nl> NameAttrList func_ ; <nl> } ; <nl> <nl> - REGISTER_KERNEL_BUILDER ( Name ( " SloppyInterleaveDataset " ) . Device ( DEVICE_CPU ) , <nl> - SloppyInterleaveDatasetOp ) ; <nl> + REGISTER_KERNEL_BUILDER ( Name ( " ParallelInterleaveDataset " ) . Device ( DEVICE_CPU ) , <nl> + ParallelInterleaveDatasetOp ) ; <nl> <nl> } / / namespace <nl> <nl> mmm a / tensorflow / core / kernels / prefetch_dataset_op . cc <nl> ppp b / tensorflow / core / kernels / prefetch_dataset_op . cc <nl> class PrefetchDatasetOp : public UnaryDatasetOpKernel { <nl> Dataset ( const DatasetBase * input , int64 buffer_size , <nl> IteratorContext : : Params ctx_params ) <nl> : input_ ( input ) , <nl> - <nl> buffer_size_ ( buffer_size ) , <nl> ctx_params_ ( std : : move ( ctx_params ) ) { <nl> input_ - > Ref ( ) ; <nl> mmm a / tensorflow / core / kernels / qr_op_impl . h <nl> ppp b / tensorflow / core / kernels / qr_op_impl . h <nl> limitations under the License . <nl> # include " third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor " <nl> # include " tensorflow / core / kernels / cuda_solvers . h " <nl> # include " tensorflow / core / kernels / cwise_ops . h " <nl> + # include " tensorflow / core / kernels / eye_functor . h " <nl> # include " tensorflow / core / kernels / matrix_band_part_op . h " <nl> # include " tensorflow / core / kernels / transpose_functor . h " <nl> # endif <nl> mmm a / tensorflow / core / kernels / reader_dataset_ops . cc <nl> ppp b / tensorflow / core / kernels / reader_dataset_ops . cc <nl> class TextLineDatasetOp : public DatasetOpKernel { <nl> <nl> io : : ZlibCompressionOptions zlib_compression_options = <nl> io : : ZlibCompressionOptions : : DEFAULT ( ) ; <nl> - bool use_compression = false ; <nl> - if ( compression_type . empty ( ) ) { <nl> - use_compression = false ; <nl> - } else if ( compression_type = = " ZLIB " ) { <nl> - use_compression = true ; <nl> + if ( compression_type = = " ZLIB " ) { <nl> zlib_compression_options = io : : ZlibCompressionOptions : : DEFAULT ( ) ; <nl> } else if ( compression_type = = " GZIP " ) { <nl> - use_compression = true ; <nl> zlib_compression_options = io : : ZlibCompressionOptions : : GZIP ( ) ; <nl> } else { <nl> OP_REQUIRES ( ctx , compression_type . empty ( ) , <nl> class TextLineDatasetOp : public DatasetOpKernel { <nl> filenames . push_back ( filenames_tensor - > flat < string > ( ) ( i ) ) ; <nl> } <nl> <nl> - * output = new Dataset ( std : : move ( filenames ) , use_compression , <nl> + * output = new Dataset ( ctx , std : : move ( filenames ) , compression_type , <nl> zlib_compression_options ) ; <nl> } <nl> <nl> private : <nl> - class Dataset : public DatasetBase { <nl> + class Dataset : public GraphDatasetBase { <nl> public : <nl> - Dataset ( std : : vector < string > filenames , bool use_compression , <nl> + Dataset ( OpKernelContext * ctx , std : : vector < string > filenames , <nl> + const string & compression_type , <nl> const io : : ZlibCompressionOptions & options ) <nl> - : filenames_ ( std : : move ( filenames ) ) , <nl> - use_compression_ ( use_compression ) , <nl> + : GraphDatasetBase ( ctx ) , <nl> + filenames_ ( std : : move ( filenames ) ) , <nl> + compression_type_ ( compression_type ) , <nl> + use_compression_ ( ! compression_type . empty ( ) ) , <nl> options_ ( options ) { } <nl> <nl> std : : unique_ptr < IteratorBase > MakeIterator ( <nl> class TextLineDatasetOp : public DatasetOpKernel { <nl> <nl> string DebugString ( ) override { return " TextLineDatasetOp : : Dataset " ; } <nl> <nl> + protected : <nl> + Status AsGraphDefInternal ( DatasetGraphDefBuilder * b , <nl> + Node * * output ) const override { <nl> + Node * filenames = nullptr ; <nl> + Node * compression_type = nullptr ; <nl> + Node * buffer_size = nullptr ; <nl> + TF_RETURN_IF_ERROR ( b - > AddVector ( filenames_ , & filenames ) ) ; <nl> + TF_RETURN_IF_ERROR ( b - > AddScalar ( compression_type_ , & compression_type ) ) ; <nl> + TF_RETURN_IF_ERROR ( <nl> + b - > AddScalar ( options_ . input_buffer_size , & buffer_size ) ) ; <nl> + TF_RETURN_IF_ERROR ( b - > AddDataset ( <nl> + this , { filenames , compression_type , buffer_size } , output ) ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> private : <nl> class Iterator : public DatasetIterator < Dataset > { <nl> public : <nl> class TextLineDatasetOp : public DatasetOpKernel { <nl> mutex_lock l ( mu_ ) ; <nl> do { <nl> / / We are currently processing a file , so try to read the next line . <nl> - if ( processing_file_ ) { <nl> + if ( buffered_input_stream_ ) { <nl> string line_contents ; <nl> Status s = buffered_input_stream_ - > ReadLine ( & line_contents ) ; <nl> <nl> class TextLineDatasetOp : public DatasetOpKernel { <nl> / / Report non - EOF errors to the caller . <nl> return s ; <nl> } <nl> - <nl> / / We have reached the end of the current file , so maybe <nl> / / move on to next file . <nl> - processing_file_ = false ; <nl> - input_stream_ . reset ( ) ; <nl> - zlib_input_stream_ . reset ( ) ; <nl> - buffered_input_stream_ . reset ( ) ; <nl> - file_ . reset ( ) ; <nl> + ResetStreamsLocked ( ) ; <nl> + + current_file_index_ ; <nl> } <nl> <nl> class TextLineDatasetOp : public DatasetOpKernel { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - / / Actually move on to next file . <nl> - TF_RETURN_IF_ERROR ( ctx - > env ( ) - > NewRandomAccessFile ( <nl> - dataset ( ) - > filenames_ [ current_file_index_ ] , & file_ ) ) ; <nl> - processing_file_ = true ; <nl> - input_stream_ . reset ( <nl> - new io : : RandomAccessInputStream ( file_ . get ( ) , false ) ) ; <nl> - if ( dataset ( ) - > use_compression_ ) { <nl> - zlib_input_stream_ . reset ( new io : : ZlibInputStream ( <nl> - input_stream_ . get ( ) , dataset ( ) - > options_ . input_buffer_size , <nl> - dataset ( ) - > options_ . input_buffer_size , dataset ( ) - > options_ ) ) ; <nl> - buffered_input_stream_ . reset ( new io : : BufferedInputStream ( <nl> - zlib_input_stream_ . get ( ) , dataset ( ) - > options_ . input_buffer_size , <nl> - false ) ) ; <nl> - } else { <nl> - buffered_input_stream_ . reset ( new io : : BufferedInputStream ( <nl> - input_stream_ . get ( ) , dataset ( ) - > options_ . input_buffer_size , <nl> - false ) ) ; <nl> - } <nl> + TF_RETURN_IF_ERROR ( SetupStreamsLocked ( ctx - > env ( ) ) ) ; <nl> } while ( true ) ; <nl> } <nl> <nl> + protected : <nl> + Status SaveInternal ( IteratorStateWriter * writer ) override { <nl> + mutex_lock l ( mu_ ) ; <nl> + TF_RETURN_IF_ERROR ( writer - > WriteScalar ( full_name ( " current_file_index " ) , <nl> + current_file_index_ ) ) ; <nl> + <nl> + / / ` buffered_input_stream_ ` is empty if <nl> + / / 1 . GetNext has not been called even once . <nl> + / / 2 . All files have been read and iterator has been exhausted . <nl> + if ( buffered_input_stream_ ) { <nl> + TF_RETURN_IF_ERROR ( writer - > WriteScalar ( <nl> + full_name ( " current_pos " ) , buffered_input_stream_ - > Tell ( ) ) ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status RestoreInternal ( OpKernelContext * ctx , <nl> + IteratorStateReader * reader ) override { <nl> + mutex_lock l ( mu_ ) ; <nl> + ResetStreamsLocked ( ) ; <nl> + int64 current_file_index ; <nl> + TF_RETURN_IF_ERROR ( reader - > ReadScalar ( full_name ( " current_file_index " ) , <nl> + & current_file_index ) ) ; <nl> + current_file_index_ = size_t ( current_file_index ) ; <nl> + / / The key " current_pos " is written only if the iterator was saved <nl> + / / with an open file . <nl> + if ( reader - > Contains ( full_name ( " current_pos " ) ) ) { <nl> + int64 current_pos ; <nl> + TF_RETURN_IF_ERROR ( <nl> + reader - > ReadScalar ( full_name ( " current_pos " ) , & current_pos ) ) ; <nl> + <nl> + TF_RETURN_IF_ERROR ( SetupStreamsLocked ( ctx - > env ( ) ) ) ; <nl> + TF_RETURN_IF_ERROR ( buffered_input_stream_ - > Seek ( current_pos ) ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> private : <nl> + / / Sets up reader streams to read from the file at ` current_file_index_ ` . <nl> + Status SetupStreamsLocked ( Env * env ) EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) { <nl> + if ( current_file_index_ > = dataset ( ) - > filenames_ . size ( ) ) { <nl> + return errors : : InvalidArgument ( <nl> + " current_file_index_ : " , current_file_index_ , <nl> + " > = filenames_ . size ( ) : " , dataset ( ) - > filenames_ . size ( ) ) ; <nl> + } <nl> + <nl> + / / Actually move on to next file . <nl> + TF_RETURN_IF_ERROR ( env - > NewRandomAccessFile ( <nl> + dataset ( ) - > filenames_ [ current_file_index_ ] , & file_ ) ) ; <nl> + input_stream_ . reset ( <nl> + new io : : RandomAccessInputStream ( file_ . get ( ) , false ) ) ; <nl> + <nl> + if ( dataset ( ) - > use_compression_ ) { <nl> + zlib_input_stream_ . reset ( new io : : ZlibInputStream ( <nl> + input_stream_ . get ( ) , dataset ( ) - > options_ . input_buffer_size , <nl> + dataset ( ) - > options_ . input_buffer_size , dataset ( ) - > options_ ) ) ; <nl> + buffered_input_stream_ . reset ( new io : : BufferedInputStream ( <nl> + zlib_input_stream_ . get ( ) , dataset ( ) - > options_ . input_buffer_size , <nl> + false ) ) ; <nl> + } else { <nl> + buffered_input_stream_ . reset ( new io : : BufferedInputStream ( <nl> + input_stream_ . get ( ) , dataset ( ) - > options_ . input_buffer_size , <nl> + false ) ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + / / Resets all reader streams . <nl> + void ResetStreamsLocked ( ) EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) { <nl> + input_stream_ . reset ( ) ; <nl> + zlib_input_stream_ . reset ( ) ; <nl> + buffered_input_stream_ . reset ( ) ; <nl> + file_ . reset ( ) ; <nl> + } <nl> + <nl> mutex mu_ ; <nl> - bool processing_file_ GUARDED_BY ( mu_ ) = false ; <nl> std : : unique_ptr < io : : RandomAccessInputStream > input_stream_ <nl> GUARDED_BY ( mu_ ) ; <nl> std : : unique_ptr < io : : ZlibInputStream > zlib_input_stream_ GUARDED_BY ( mu_ ) ; <nl> class TextLineDatasetOp : public DatasetOpKernel { <nl> } ; <nl> <nl> const std : : vector < string > filenames_ ; <nl> + const string compression_type_ ; <nl> const bool use_compression_ ; <nl> const io : : ZlibCompressionOptions options_ ; <nl> } ; <nl> mmm a / tensorflow / core / kernels / summary_interface . cc <nl> ppp b / tensorflow / core / kernels / summary_interface . cc <nl> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + # include " tensorflow / core / kernels / summary_interface . h " <nl> + <nl> + # include < utility > <nl> <nl> # include " tensorflow / compiler / xla / ptr_util . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> limitations under the License . <nl> # include " tensorflow / core / framework / summary . pb . h " <nl> # include " tensorflow / core / framework / types . h " <nl> # include " tensorflow / core / framework / types . pb . h " <nl> - # include " tensorflow / core / kernels / summary_interface . h " <nl> # include " tensorflow / core / lib / histogram / histogram . h " <nl> # include " tensorflow / core / lib / io / path . h " <nl> # include " tensorflow / core / lib / png / png_io . h " <nl> # include " tensorflow / core / lib / wav / wav_io . h " <nl> - # include " tensorflow / core / util / event . pb . h " <nl> # include " tensorflow / core / util / events_writer . h " <nl> <nl> namespace tensorflow { <nl> class SummaryWriterImpl : public SummaryWriterInterface { <nl> <nl> Status WriteTensor ( int64 global_step , Tensor t , const string & tag , <nl> const string & serialized_metadata ) override { <nl> - Summary s ; <nl> - Summary : : Value * v = s . add_value ( ) ; <nl> + std : : unique_ptr < Event > e { new Event } ; <nl> + e - > set_step ( global_step ) ; <nl> + e - > set_wall_time ( GetWallTime ( ) ) ; <nl> + Summary : : Value * v = e - > mutable_summary ( ) - > add_value ( ) ; <nl> t . AsProtoTensorContent ( v - > mutable_tensor ( ) ) ; <nl> v - > set_tag ( tag ) ; <nl> v - > mutable_metadata ( ) - > ParseFromString ( serialized_metadata ) ; <nl> - return Enqueue ( global_step , s ) ; <nl> + return WriteEvent ( std : : move ( e ) ) ; <nl> } <nl> <nl> Status WriteScalar ( int64 global_step , Tensor t , const string & tag ) override { <nl> - Summary s ; <nl> - Summary : : Value * v = s . add_value ( ) ; <nl> + std : : unique_ptr < Event > e { new Event } ; <nl> + e - > set_step ( global_step ) ; <nl> + e - > set_wall_time ( GetWallTime ( ) ) ; <nl> + Summary : : Value * v = e - > mutable_summary ( ) - > add_value ( ) ; <nl> v - > set_tag ( tag ) ; <nl> float value ; <nl> TF_RETURN_IF_ERROR ( TensorValueAt < float > ( t , 0 , & value ) ) ; <nl> v - > set_simple_value ( value ) ; <nl> - return Enqueue ( global_step , s ) ; <nl> + return WriteEvent ( std : : move ( e ) ) ; <nl> } <nl> <nl> Status WriteHistogram ( int64 global_step , Tensor t , <nl> const string & tag ) override { <nl> - Summary s ; <nl> - Summary : : Value * v = s . add_value ( ) ; <nl> + std : : unique_ptr < Event > e { new Event } ; <nl> + e - > set_step ( global_step ) ; <nl> + e - > set_wall_time ( GetWallTime ( ) ) ; <nl> + Summary : : Value * v = e - > mutable_summary ( ) - > add_value ( ) ; <nl> v - > set_tag ( tag ) ; <nl> histogram : : Histogram histo ; <nl> for ( int64 i = 0 ; i < t . NumElements ( ) ; i + + ) { <nl> class SummaryWriterImpl : public SummaryWriterInterface { <nl> } <nl> <nl> histo . EncodeToProto ( v - > mutable_histo ( ) , false / * Drop zero buckets * / ) ; <nl> - return Enqueue ( global_step , s ) ; <nl> + return WriteEvent ( std : : move ( e ) ) ; <nl> } <nl> <nl> Status WriteImage ( int64 global_step , Tensor tensor , const string & tag , <nl> class SummaryWriterImpl : public SummaryWriterInterface { <nl> return errors : : InvalidArgument ( " Tensor too large for summary " , <nl> tensor . shape ( ) . DebugString ( ) ) ; <nl> } <nl> - Summary s ; <nl> + std : : unique_ptr < Event > e { new Event } ; <nl> + e - > set_step ( global_step ) ; <nl> + e - > set_wall_time ( GetWallTime ( ) ) ; <nl> + Summary * s = e - > mutable_summary ( ) ; <nl> / / The casts and h * w cannot overflow because of the limits above . <nl> const int batch_size = static_cast < int > ( tensor . dim_size ( 0 ) ) ; <nl> const int h = static_cast < int > ( tensor . dim_size ( 1 ) ) ; <nl> class SummaryWriterImpl : public SummaryWriterInterface { <nl> & values ( i , 0 , 0 ) , Eigen : : DSizes < Eigen : : DenseIndex , 2 > ( hw , depth ) ) ; <nl> } ; <nl> TF_RETURN_IF_ERROR ( <nl> - AddImages ( tag , max_images , batch_size , w , h , depth , ith_image , & s ) ) ; <nl> + AddImages ( tag , max_images , batch_size , w , h , depth , ith_image , s ) ) ; <nl> } else if ( tensor . dtype ( ) = = DT_HALF ) { <nl> TF_RETURN_IF_ERROR ( NormalizeAndAddImages < Eigen : : half > ( <nl> - tensor , max_images , h , w , hw , depth , batch_size , tag , bad_color , & s ) ) ; <nl> + tensor , max_images , h , w , hw , depth , batch_size , tag , bad_color , s ) ) ; <nl> } else if ( tensor . dtype ( ) = = DT_FLOAT ) { <nl> TF_RETURN_IF_ERROR ( NormalizeAndAddImages < float > ( <nl> - tensor , max_images , h , w , hw , depth , batch_size , tag , bad_color , & s ) ) ; <nl> + tensor , max_images , h , w , hw , depth , batch_size , tag , bad_color , s ) ) ; <nl> } else { <nl> return errors : : InvalidArgument ( <nl> " Only DT_INT8 , DT_HALF , and DT_FLOAT images are supported . Got " , <nl> DataTypeString ( tensor . dtype ( ) ) ) ; <nl> } <nl> <nl> - return Enqueue ( global_step , s ) ; <nl> + return WriteEvent ( std : : move ( e ) ) ; <nl> } <nl> <nl> Status WriteAudio ( int64 global_step , Tensor tensor , const string & tag , <nl> class SummaryWriterImpl : public SummaryWriterInterface { <nl> const int64 length_frames = tensor . dim_size ( 1 ) ; <nl> const int64 num_channels = <nl> tensor . dims ( ) = = 2 ? 1 : tensor . dim_size ( tensor . dims ( ) - 1 ) ; <nl> - Summary s ; <nl> + std : : unique_ptr < Event > e { new Event } ; <nl> + e - > set_step ( global_step ) ; <nl> + e - > set_wall_time ( GetWallTime ( ) ) ; <nl> + Summary * s = e - > mutable_summary ( ) ; <nl> const int N = std : : min < int > ( max_outputs , batch_size ) ; <nl> for ( int i = 0 ; i < N ; + + i ) { <nl> - Summary : : Value * v = s . add_value ( ) ; <nl> + Summary : : Value * v = s - > add_value ( ) ; <nl> if ( max_outputs > 1 ) { <nl> v - > set_tag ( strings : : StrCat ( tag , " / audio / " , i ) ) ; <nl> } else { <nl> class SummaryWriterImpl : public SummaryWriterInterface { <nl> channels_by_frames . data ( ) , sample_rate_truncated , num_channels , <nl> length_frames , sa - > mutable_encoded_audio_string ( ) ) ) ; <nl> } <nl> - <nl> - return Enqueue ( global_step , s ) ; <nl> + return WriteEvent ( std : : move ( e ) ) ; <nl> } <nl> <nl> - string DebugString ( ) override { return " SummaryWriterImpl " ; } <nl> - <nl> - private : <nl> - Status Enqueue ( int64 global_step , const Summary & summary ) { <nl> + Status WriteEvent ( std : : unique_ptr < Event > event ) override { <nl> mutex_lock ml ( mu_ ) ; <nl> - queue_ . emplace_back ( global_step , summary , env_ - > NowMicros ( ) ) ; <nl> + queue_ . emplace_back ( std : : move ( event ) ) ; <nl> if ( queue_ . size ( ) > = max_queue_ | | <nl> env_ - > NowMicros ( ) - last_flush_ > 1000 * flush_millis_ ) { <nl> return InternalFlush ( ) ; <nl> class SummaryWriterImpl : public SummaryWriterInterface { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + string DebugString ( ) override { return " SummaryWriterImpl " ; } <nl> + <nl> + private : <nl> + double GetWallTime ( ) { <nl> + return static_cast < double > ( env_ - > NowMicros ( ) ) / 1 . 0e6 ; <nl> + } <nl> + <nl> Status InternalFlush ( ) EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) { <nl> - for ( const EventInfo & e : queue_ ) { <nl> - Event event ; <nl> - event . set_step ( std : : get < 0 > ( e ) ) ; <nl> - * event . mutable_summary ( ) = std : : get < 1 > ( e ) ; <nl> - event . set_wall_time ( static_cast < double > ( std : : get < 2 > ( e ) ) / 1 . 0e6 ) ; <nl> - events_writer_ - > WriteEvent ( event ) ; <nl> + for ( const std : : unique_ptr < Event > & e : queue_ ) { <nl> + events_writer_ - > WriteEvent ( * e ) ; <nl> } <nl> queue_ . clear ( ) ; <nl> if ( ! events_writer_ - > Flush ( ) ) { <nl> class SummaryWriterImpl : public SummaryWriterInterface { <nl> const int flush_millis_ ; <nl> uint64 last_flush_ ; <nl> Env * env_ ; <nl> - using EventInfo = std : : tuple < int64 , Summary , int64 > ; <nl> mutex mu_ ; <nl> - std : : vector < EventInfo > queue_ GUARDED_BY ( mu_ ) ; <nl> + std : : vector < std : : unique_ptr < Event > > queue_ GUARDED_BY ( mu_ ) ; <nl> / / A pointer to allow deferred construction . <nl> std : : unique_ptr < EventsWriter > events_writer_ GUARDED_BY ( mu_ ) ; <nl> std : : vector < std : : pair < string , SummaryMetadata > > registered_summaries_ <nl> mmm a / tensorflow / core / kernels / summary_interface . h <nl> ppp b / tensorflow / core / kernels / summary_interface . h <nl> limitations under the License . <nl> # ifndef TENSORFLOW_CORE_KERNELS_SUMMARY_INTERFACE_H_ <nl> # define TENSORFLOW_CORE_KERNELS_SUMMARY_INTERFACE_H_ <nl> <nl> + # include < memory > <nl> <nl> # include " tensorflow / core / framework / resource_mgr . h " <nl> + # include " tensorflow / core / util / event . pb . h " <nl> <nl> namespace tensorflow { <nl> <nl> class SummaryWriterInterface : public ResourceBase { <nl> <nl> virtual Status WriteAudio ( int64 global_step , Tensor t , const string & tag , <nl> int max_outputs_ , float sample_rate ) = 0 ; <nl> + <nl> + virtual Status WriteEvent ( std : : unique_ptr < Event > e ) = 0 ; <nl> } ; <nl> <nl> / / Creates a SummaryWriterInterface instance which writes to a file . It will <nl> mmm a / tensorflow / core / kernels / summary_interface_test . cc <nl> ppp b / tensorflow / core / kernels / summary_interface_test . cc <nl> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - # include < vector > <nl> + # include " tensorflow / core / kernels / summary_interface . h " <nl> <nl> # include " tensorflow / core / framework / summary . pb . h " <nl> - # include " tensorflow / core / kernels / summary_interface . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / lib / core / refcount . h " <nl> # include " tensorflow / core / lib / io / path . h " <nl> class SummaryInterfaceTest : public : : testing : : Test { <nl> protected : <nl> Status SummaryTestHelper ( <nl> const string & test_name , <nl> - std : : function < Status ( SummaryWriterInterface * ) > writer_fn , <nl> - std : : function < void ( const Event & ) > test_fn ) { <nl> + const std : : function < Status ( SummaryWriterInterface * ) > & writer_fn , <nl> + const std : : function < void ( const Event & ) > & test_fn ) { <nl> static std : : set < string > * tests = new std : : set < string > ( ) ; <nl> CHECK ( tests - > insert ( test_name ) . second ) < < " : " < < test_name ; <nl> <nl> TEST_F ( SummaryInterfaceTest , WriteAudio ) { <nl> } ) ) ; <nl> } <nl> <nl> + TEST_F ( SummaryInterfaceTest , WriteEvent ) { <nl> + TF_CHECK_OK ( <nl> + SummaryTestHelper ( " event_test " , <nl> + [ ] ( SummaryWriterInterface * writer ) { <nl> + std : : unique_ptr < Event > e { new Event } ; <nl> + e - > set_step ( 7 ) ; <nl> + e - > mutable_summary ( ) - > add_value ( ) - > set_tag ( " hi " ) ; <nl> + TF_RETURN_IF_ERROR ( writer - > WriteEvent ( std : : move ( e ) ) ) ; <nl> + TF_RETURN_IF_ERROR ( writer - > Flush ( ) ) ; <nl> + return Status : : OK ( ) ; <nl> + } , <nl> + [ ] ( const Event & e ) { <nl> + EXPECT_EQ ( e . step ( ) , 7 ) ; <nl> + CHECK_EQ ( e . summary ( ) . value_size ( ) , 1 ) ; <nl> + EXPECT_EQ ( e . summary ( ) . value ( 0 ) . tag ( ) , " hi " ) ; <nl> + } ) ) ; <nl> + } <nl> + <nl> TEST_F ( SummaryInterfaceTest , WallTime ) { <nl> env_ . AdvanceByMillis ( 7023 ) ; <nl> TF_CHECK_OK ( SummaryTestHelper ( <nl> mmm a / tensorflow / core / ops / compat / ops_history . v1 . pbtxt <nl> ppp b / tensorflow / core / ops / compat / ops_history . v1 . pbtxt <nl> op { <nl> type : " type " <nl> } <nl> } <nl> + op { <nl> + name : " ParallelInterleaveDataset " <nl> + input_arg { <nl> + name : " input_dataset " <nl> + type : DT_VARIANT <nl> + } <nl> + input_arg { <nl> + name : " other_arguments " <nl> + type_list_attr : " Targuments " <nl> + } <nl> + input_arg { <nl> + name : " cycle_length " <nl> + type : DT_INT64 <nl> + } <nl> + input_arg { <nl> + name : " block_length " <nl> + type : DT_INT64 <nl> + } <nl> + input_arg { <nl> + name : " sloppy " <nl> + type : DT_BOOL <nl> + } <nl> + output_arg { <nl> + name : " handle " <nl> + type : DT_VARIANT <nl> + } <nl> + attr { <nl> + name : " f " <nl> + type : " func " <nl> + } <nl> + attr { <nl> + name : " Targuments " <nl> + type : " list ( type ) " <nl> + has_minimum : true <nl> + } <nl> + attr { <nl> + name : " output_types " <nl> + type : " list ( type ) " <nl> + has_minimum : true <nl> + minimum : 1 <nl> + } <nl> + attr { <nl> + name : " output_shapes " <nl> + type : " list ( shape ) " <nl> + has_minimum : true <nl> + minimum : 1 <nl> + } <nl> + } <nl> op { <nl> name : " ParallelMapDataset " <nl> input_arg { <nl> op { <nl> } <nl> is_stateful : true <nl> } <nl> + op { <nl> + name : " ResourceCountUpTo " <nl> + input_arg { <nl> + name : " resource " <nl> + type : DT_RESOURCE <nl> + } <nl> + output_arg { <nl> + name : " output " <nl> + type_attr : " T " <nl> + } <nl> + attr { <nl> + name : " limit " <nl> + type : " int " <nl> + } <nl> + attr { <nl> + name : " T " <nl> + type : " type " <nl> + allowed_values { <nl> + list { <nl> + type : DT_INT32 <nl> + type : DT_INT64 <nl> + } <nl> + } <nl> + } <nl> + is_stateful : true <nl> + } <nl> op { <nl> name : " ResourceGather " <nl> input_arg { <nl> op { <nl> } <nl> } <nl> } <nl> - op { <nl> - name : " SloppyInterleaveDataset " <nl> - input_arg { <nl> - name : " input_dataset " <nl> - type : DT_VARIANT <nl> - } <nl> - input_arg { <nl> - name : " other_arguments " <nl> - type_list_attr : " Targuments " <nl> - } <nl> - input_arg { <nl> - name : " cycle_length " <nl> - type : DT_INT64 <nl> - } <nl> - input_arg { <nl> - name : " block_length " <nl> - type : DT_INT64 <nl> - } <nl> - output_arg { <nl> - name : " handle " <nl> - type : DT_VARIANT <nl> - } <nl> - attr { <nl> - name : " f " <nl> - type : " func " <nl> - } <nl> - attr { <nl> - name : " Targuments " <nl> - type : " list ( type ) " <nl> - has_minimum : true <nl> - } <nl> - attr { <nl> - name : " output_types " <nl> - type : " list ( type ) " <nl> - has_minimum : true <nl> - minimum : 1 <nl> - } <nl> - attr { <nl> - name : " output_shapes " <nl> - type : " list ( shape ) " <nl> - has_minimum : true <nl> - minimum : 1 <nl> - } <nl> - is_stateful : true <nl> - } <nl> - op { <nl> - name : " SloppyInterleaveDataset " <nl> - input_arg { <nl> - name : " input_dataset " <nl> - type : DT_VARIANT <nl> - } <nl> - input_arg { <nl> - name : " other_arguments " <nl> - type_list_attr : " Targuments " <nl> - } <nl> - input_arg { <nl> - name : " cycle_length " <nl> - type : DT_INT64 <nl> - } <nl> - input_arg { <nl> - name : " block_length " <nl> - type : DT_INT64 <nl> - } <nl> - output_arg { <nl> - name : " handle " <nl> - type : DT_VARIANT <nl> - } <nl> - attr { <nl> - name : " f " <nl> - type : " func " <nl> - } <nl> - attr { <nl> - name : " Targuments " <nl> - type : " list ( type ) " <nl> - has_minimum : true <nl> - } <nl> - attr { <nl> - name : " output_types " <nl> - type : " list ( type ) " <nl> - has_minimum : true <nl> - minimum : 1 <nl> - } <nl> - attr { <nl> - name : " output_shapes " <nl> - type : " list ( shape ) " <nl> - has_minimum : true <nl> - minimum : 1 <nl> - } <nl> - } <nl> op { <nl> name : " Softmax " <nl> input_arg { <nl> mmm a / tensorflow / core / ops / dataset_ops . cc <nl> ppp b / tensorflow / core / ops / dataset_ops . cc <nl> f : A function mapping elements of ` input_dataset ` , concatenated with <nl> ` output_types ` and ` output_shapes ` . <nl> ) doc " ) ; <nl> <nl> - REGISTER_OP ( " SloppyInterleaveDataset " ) <nl> + REGISTER_OP ( " ParallelInterleaveDataset " ) <nl> . Input ( " input_dataset : variant " ) <nl> . Input ( " other_arguments : Targuments " ) <nl> . Input ( " cycle_length : int64 " ) <nl> . Input ( " block_length : int64 " ) <nl> + . Input ( " sloppy : bool " ) <nl> . Output ( " handle : variant " ) <nl> . Attr ( " f : func " ) <nl> . Attr ( " Targuments : list ( type ) > = 0 " ) <nl> mmm a / tensorflow / core / ops / ops . pbtxt <nl> ppp b / tensorflow / core / ops / ops . pbtxt <nl> op { <nl> summary : " Interleave the values from the ` data ` tensors into a single tensor . " <nl> description : " Builds a merged tensor such that \ n \ n ` ` ` python \ n merged [ indices [ m ] [ i , . . . , j ] , . . . ] = data [ m ] [ i , . . . , j , . . . ] \ n ` ` ` \ n \ nFor example , if each ` indices [ m ] ` is scalar or vector , we have \ n \ n ` ` ` python \ n # Scalar indices : \ n merged [ indices [ m ] , . . . ] = data [ m ] [ . . . ] \ n \ n # Vector indices : \ n merged [ indices [ m ] [ i ] , . . . ] = data [ m ] [ i , . . . ] \ n ` ` ` \ n \ nEach ` data [ i ] . shape ` must start with the corresponding ` indices [ i ] . shape ` , \ nand the rest of ` data [ i ] . shape ` must be constant w . r . t . ` i ` . That is , we \ nmust have ` data [ i ] . shape = indices [ i ] . shape + constant ` . In terms of this \ n ` constant ` , the output shape is \ n \ n merged . shape = [ max ( indices ) ] + constant \ n \ nValues may be merged in parallel , so if an index appears in both ` indices [ m ] [ i ] ` \ nand ` indices [ n ] [ j ] ` , the result may be invalid . This differs from the normal \ nDynamicStitch operator that defines the behavior in that case . \ n \ nFor example : \ n \ n ` ` ` python \ n indices [ 0 ] = 6 \ n indices [ 1 ] = [ 4 , 1 ] \ n indices [ 2 ] = [ [ 5 , 2 ] , [ 0 , 3 ] ] \ n data [ 0 ] = [ 61 , 62 ] \ n data [ 1 ] = [ [ 41 , 42 ] , [ 11 , 12 ] ] \ n data [ 2 ] = [ [ [ 51 , 52 ] , [ 21 , 22 ] ] , [ [ 1 , 2 ] , [ 31 , 32 ] ] ] \ n merged = [ [ 1 , 2 ] , [ 11 , 12 ] , [ 21 , 22 ] , [ 31 , 32 ] , [ 41 , 42 ] , \ n [ 51 , 52 ] , [ 61 , 62 ] ] \ n ` ` ` \ n \ nThis method can be used to merge partitions created by ` dynamic_partition ` \ nas illustrated on the following example : \ n \ n ` ` ` python \ n # Apply function ( increments x_i ) on elements for which a certain condition \ n # apply ( x_i ! = - 1 in this example ) . \ n x = tf . constant ( [ 0 . 1 , - 1 . , 5 . 2 , 4 . 3 , - 1 . , 7 . 4 ] ) \ n condition_mask = tf . not_equal ( x , tf . constant ( - 1 . ) ) \ n partitioned_data = tf . dynamic_partition ( \ n x , tf . cast ( condition_mask , tf . int32 ) , 2 ) \ n partitioned_data [ 1 ] = partitioned_data [ 1 ] + 1 . 0 \ n condition_indices = tf . dynamic_partition ( \ n tf . range ( tf . shape ( x ) [ 0 ] ) , tf . cast ( condition_mask , tf . int32 ) , 2 ) \ n x = tf . dynamic_stitch ( condition_indices , partitioned_data ) \ n # Here x = [ 1 . 1 , - 1 . , 6 . 2 , 5 . 3 , - 1 , 8 . 4 ] , the - 1 . values remain \ n # unchanged . \ n ` ` ` \ n \ n < div style = \ " width : 70 % ; margin : auto ; margin - bottom : 10px ; margin - top : 20px ; \ " > \ n < img style = \ " width : 100 % \ " src = \ " https : / / www . tensorflow . org / images / DynamicStitch . png \ " alt > \ n < / div > " <nl> } <nl> + op { <nl> + name : " ParallelInterleaveDataset " <nl> + input_arg { <nl> + name : " input_dataset " <nl> + type : DT_VARIANT <nl> + } <nl> + input_arg { <nl> + name : " other_arguments " <nl> + type_list_attr : " Targuments " <nl> + } <nl> + input_arg { <nl> + name : " cycle_length " <nl> + type : DT_INT64 <nl> + } <nl> + input_arg { <nl> + name : " block_length " <nl> + type : DT_INT64 <nl> + } <nl> + input_arg { <nl> + name : " sloppy " <nl> + type : DT_BOOL <nl> + } <nl> + output_arg { <nl> + name : " handle " <nl> + type : DT_VARIANT <nl> + } <nl> + attr { <nl> + name : " f " <nl> + type : " func " <nl> + description : " A function mapping elements of ` input_dataset ` , concatenated with \ n ` other_arguments ` , to a Dataset variant that contains elements matching \ n ` output_types ` and ` output_shapes ` . " <nl> + } <nl> + attr { <nl> + name : " Targuments " <nl> + type : " list ( type ) " <nl> + has_minimum : true <nl> + } <nl> + attr { <nl> + name : " output_types " <nl> + type : " list ( type ) " <nl> + has_minimum : true <nl> + minimum : 1 <nl> + } <nl> + attr { <nl> + name : " output_shapes " <nl> + type : " list ( shape ) " <nl> + has_minimum : true <nl> + minimum : 1 <nl> + } <nl> + summary : " Creates a dataset that applies ` f ` to the outputs of ` input_dataset ` . " <nl> + description : " The resulting dataset is similar to the ` InterleaveDataset ` , with the exception \ nthat if retrieving the next value from a dataset would cause the requester to \ nblock , it will skip that input dataset . This dataset is especially useful \ nwhen loading data from a variable - latency datastores ( e . g . HDFS , GCS ) , as it \ nallows the training step to proceed so long as some data is available . \ n \ n ! ! WARNING ! ! This dataset is not deterministic ! " <nl> + } <nl> op { <nl> name : " ParallelMapDataset " <nl> input_arg { <nl> op { <nl> description : " Note that in dense implementation of this algorithm , ms and mom will \ nupdate even if the grad is zero , but in this sparse implementation , ms \ nand mom will not update in iterations during which the grad is zero . \ n \ nmean_square = decay * mean_square + ( 1 - decay ) * gradient * * 2 \ nDelta = learning_rate * gradient / sqrt ( mean_square + epsilon ) \ n \ nms < - rho * ms_ { t - 1 } + ( 1 - rho ) * grad * grad \ nmom < - momentum * mom_ { t - 1 } + lr * grad / sqrt ( ms + epsilon ) \ nvar < - var - mom " <nl> is_stateful : true <nl> } <nl> + op { <nl> + name : " ResourceCountUpTo " <nl> + input_arg { <nl> + name : " resource " <nl> + description : " Should be from a scalar ` Variable ` node . " <nl> + type : DT_RESOURCE <nl> + } <nl> + output_arg { <nl> + name : " output " <nl> + description : " A copy of the input before increment . If nothing else modifies the \ ninput , the values produced will all be distinct . " <nl> + type_attr : " T " <nl> + } <nl> + attr { <nl> + name : " limit " <nl> + type : " int " <nl> + description : " If incrementing ref would bring it above limit , instead generates an \ n \ ' OutOfRange \ ' error . " <nl> + } <nl> + attr { <nl> + name : " T " <nl> + type : " type " <nl> + allowed_values { <nl> + list { <nl> + type : DT_INT32 <nl> + type : DT_INT64 <nl> + } <nl> + } <nl> + } <nl> + summary : " Increments variable pointed to by \ ' resource \ ' until it reaches \ ' limit \ ' . " <nl> + is_stateful : true <nl> + } <nl> op { <nl> name : " ResourceGather " <nl> input_arg { <nl> op { <nl> summary : " Return a slice from \ ' input \ ' . " <nl> description : " The output tensor is a tensor with dimensions described by \ ' size \ ' \ nwhose values are extracted from \ ' input \ ' starting at the offsets in \ n \ ' begin \ ' . \ n \ n * Requirements * : \ n 0 < = begin [ i ] < = begin [ i ] + size [ i ] < = Di for i in [ 0 , n ) " <nl> } <nl> - op { <nl> - name : " SloppyInterleaveDataset " <nl> - input_arg { <nl> - name : " input_dataset " <nl> - type : DT_VARIANT <nl> - } <nl> - input_arg { <nl> - name : " other_arguments " <nl> - type_list_attr : " Targuments " <nl> - } <nl> - input_arg { <nl> - name : " cycle_length " <nl> - type : DT_INT64 <nl> - } <nl> - input_arg { <nl> - name : " block_length " <nl> - type : DT_INT64 <nl> - } <nl> - output_arg { <nl> - name : " handle " <nl> - type : DT_VARIANT <nl> - } <nl> - attr { <nl> - name : " f " <nl> - type : " func " <nl> - description : " A function mapping elements of ` input_dataset ` , concatenated with \ n ` other_arguments ` , to a Dataset variant that contains elements matching \ n ` output_types ` and ` output_shapes ` . " <nl> - } <nl> - attr { <nl> - name : " Targuments " <nl> - type : " list ( type ) " <nl> - has_minimum : true <nl> - } <nl> - attr { <nl> - name : " output_types " <nl> - type : " list ( type ) " <nl> - has_minimum : true <nl> - minimum : 1 <nl> - } <nl> - attr { <nl> - name : " output_shapes " <nl> - type : " list ( shape ) " <nl> - has_minimum : true <nl> - minimum : 1 <nl> - } <nl> - summary : " Creates a dataset that applies ` f ` to the outputs of ` input_dataset ` . " <nl> - description : " The resulting dataset is similar to the ` InterleaveDataset ` , with the exception \ nthat if retrieving the next value from a dataset would cause the requester to \ nblock , it will skip that input dataset . This dataset is especially useful \ nwhen loading data from a variable - latency datastores ( e . g . HDFS , GCS ) , as it \ nallows the training step to proceed so long as some data is available . \ n \ n ! ! WARNING ! ! This dataset is not deterministic ! " <nl> - } <nl> op { <nl> name : " Softmax " <nl> input_arg { <nl> mmm a / tensorflow / core / ops / state_ops . cc <nl> ppp b / tensorflow / core / ops / state_ops . cc <nl> output : A copy of the input before increment . If nothing else modifies the <nl> input , the values produced will all be distinct . <nl> ) doc " ) ; <nl> <nl> + REGISTER_OP ( " ResourceCountUpTo " ) <nl> + . Input ( " resource : resource " ) <nl> + . Output ( " output : T " ) <nl> + . Attr ( " limit : int " ) <nl> + . Attr ( " T : { int32 , int64 } " ) <nl> + . SetShapeFn ( [ ] ( InferenceContext * c ) { <nl> + auto * handle_data = c - > input_handle_shapes_and_types ( 0 ) ; <nl> + if ( handle_data = = nullptr | | handle_data - > empty ( ) ) { <nl> + return errors : : InvalidArgument ( " Handle has no shape / type information . " ) ; <nl> + } <nl> + shape_inference : : ShapeAndType shape_and_type = ( * handle_data ) [ 0 ] ; <nl> + DataType value_dtype ; <nl> + TF_RETURN_IF_ERROR ( c - > GetAttr ( " T " , & value_dtype ) ) ; <nl> + if ( value_dtype ! = shape_and_type . dtype ) { <nl> + return errors : : InvalidArgument ( <nl> + " Data types do not match : " , DataTypeString ( value_dtype ) , " and " , <nl> + DataTypeString ( shape_and_type . dtype ) ) ; <nl> + } <nl> + ShapeHandle output ; <nl> + TF_RETURN_IF_ERROR ( c - > WithRank ( shape_and_type . shape , 0 , & output ) ) ; <nl> + c - > set_output ( 0 , output ) ; <nl> + return Status : : OK ( ) ; <nl> + } ) <nl> + . Doc ( R " doc ( <nl> + Increments variable pointed to by ' resource ' until it reaches ' limit ' . <nl> + <nl> + resource : Should be from a scalar ` Variable ` node . <nl> + limit : If incrementing ref would bring it above limit , instead generates an <nl> + ' OutOfRange ' error . <nl> + output : A copy of the input before increment . If nothing else modifies the <nl> + input , the values produced will all be distinct . <nl> + ) doc " ) ; <nl> + <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / platform / cloud / curl_http_request . cc <nl> ppp b / tensorflow / core / platform / cloud / curl_http_request . cc <nl> int CurlHttpRequest : : ProgressCallback ( void * this_object , curl_off_t dltotal , <nl> } <nl> <nl> if ( now - that - > last_progress_timestamp_ > kInactivityTimeoutSeconds ) { <nl> - LOG ( ERROR ) < < " The transmission has been stuck at " < < current_progress <nl> - < < " bytes for " < < now - that - > last_progress_timestamp_ <nl> + LOG ( ERROR ) < < " The transmission of request " < < this_object <nl> + < < " has been stuck at " < < current_progress < < " of " <nl> + < < dltotal + ultotal < < " bytes for " <nl> + < < now - that - > last_progress_timestamp_ <nl> < < " seconds and will be aborted . " ; <nl> return 1 ; / / Will abort the request . <nl> } <nl> mmm a / tensorflow / core / protobuf / config . proto <nl> ppp b / tensorflow / core / protobuf / config . proto <nl> message OptimizerOptions { <nl> / / If true , perform constant folding optimization on the graph . <nl> bool do_constant_folding = 2 ; <nl> <nl> + / / Constant folding optimization replaces tensors whose values can be <nl> + / / predetermined , with constant nodes . To avoid inserting too large constants , <nl> + / / the size of each constant created can be limited . If this value is zero , a <nl> + / / default limit of 10 MiB will be applied . If constant folding optimization <nl> + / / is disabled , this value is ignored . <nl> + int64 max_folded_constant_in_bytes = 6 ; <nl> + <nl> / / If true , perform function inlining on the graph . <nl> bool do_function_inlining = 4 ; <nl> <nl> mmm a / tensorflow / docs_src / get_started / get_started . md <nl> ppp b / tensorflow / docs_src / get_started / get_started . md <nl> input_fn = tf . estimator . inputs . numpy_input_fn ( <nl> train_input_fn = tf . estimator . inputs . numpy_input_fn ( <nl> { " x " : x_train } , y_train , batch_size = 4 , num_epochs = 1000 , shuffle = False ) <nl> eval_input_fn = tf . estimator . inputs . numpy_input_fn ( <nl> - { " x " : x_eval } , y_eval , batch_size = 4 , num_epochs = 1000 , shuffle = False ) <nl> + { " x " : x_eval } , y_eval , batch_size = 4 , num_epochs = 1 , shuffle = False ) <nl> <nl> # train <nl> estimator . train ( input_fn = input_fn , steps = 1000 ) <nl> mmm a / tensorflow / docs_src / get_started / linear_regression . md <nl> ppp b / tensorflow / docs_src / get_started / linear_regression . md <nl> This unit provides the following short examples demonstrating how <nl> to implement regression in Estimators : <nl> <nl> < table > <nl> - < tr > < th > Example < / th > < th > Data Set < / th > < th > Demonstrates How To . . . < / th > < / tr > <nl> + < tr > < th > Example < / th > < th > Demonstrates How To . . . < / th > < / tr > <nl> <nl> < tr > <nl> < td > < a href = " https : / / www . tensorflow . org / code / tensorflow / examples / get_started / regression / linear_regression . py " > linear_regression . py < / a > < / td > <nl> - < td > [ imports85 ] ( https : / / archive . ics . uci . edu / ml / datasets / automobile ) < / td > <nl> < td > Use the @ { tf . estimator . LinearRegressor } Estimator to train a <nl> regression model on numeric data . < / td > <nl> < / tr > <nl> <nl> < tr > <nl> < td > < a href = " https : / / www . tensorflow . org / code / tensorflow / examples / get_started / regression / linear_regression_categorical . py " > linear_regression_categorical . py < / a > < / td > <nl> - < td > [ imports85 ] ( https : / / archive . ics . uci . edu / ml / datasets / automobile ) < / td > <nl> < td > Use the @ { tf . estimator . LinearRegressor } Estimator to train a <nl> regression model on categorical data . < / td > <nl> < / tr > <nl> <nl> < tr > <nl> < td > < a href = " https : / / www . tensorflow . org / code / tensorflow / examples / get_started / regression / dnn_regression . py " > dnn_regression . py < / a > < / td > <nl> - < td > [ imports85 ] ( https : / / archive . ics . uci . edu / ml / datasets / automobile ) < / td > <nl> < td > Use the @ { tf . estimator . DNNRegressor } Estimator to train a <nl> regression model on discrete data with a deep neural network . < / td > <nl> < / tr > <nl> <nl> < tr > <nl> < td > < a href = " https : / / www . tensorflow . org / code / tensorflow / examples / get_started / regression / custom_regression . py " > custom_regression . py < / a > < / td > <nl> - < td > [ imports85 ] ( https : / / archive . ics . uci . edu / ml / datasets / automobile ) < / td > <nl> < td > Use @ { tf . estimator . Estimator } to train a customized dnn <nl> regression model . < / td > <nl> < / tr > <nl> During training , all three programs output the following information : <nl> For example , here ' s some possible output for the ` linear_regressor . py ` <nl> program : <nl> <nl> - ` ` ` bsh <nl> + ` ` ` None <nl> INFO : tensorflow : Saving checkpoints for 1 into / tmp / tmpAObiz9 / model . ckpt . <nl> INFO : tensorflow : loss = 161 . 308 , step = 1 <nl> INFO : tensorflow : global_step / sec : 1557 . 24 <nl> mmm a / tensorflow / go / op / wrappers . go <nl> ppp b / tensorflow / go / op / wrappers . go <nl> func MergeSummary ( scope * Scope , inputs [ ] tf . Output ) ( summary tf . Output ) { <nl> return op . Output ( 0 ) <nl> } <nl> <nl> - / / Encode audio data using the WAV file format . <nl> - / / <nl> - / / This operation will generate a string suitable to be saved out to create a . wav <nl> - / / audio file . It will be encoded in the 16 - bit PCM format . It takes in float <nl> - / / values in the range - 1 . 0f to 1 . 0f , and any outside that value will be clamped to <nl> - / / that range . <nl> - / / <nl> - / / ` audio ` is a 2 - D float Tensor of shape ` [ length , channels ] ` . <nl> - / / ` sample_rate ` is a scalar Tensor holding the rate to use ( e . g . 44100 ) . <nl> - / / <nl> - / / Arguments : <nl> - / / audio : 2 - D with shape ` [ length , channels ] ` . <nl> - / / sample_rate : Scalar containing the sample frequency . <nl> - / / <nl> - / / Returns 0 - D . WAV - encoded file contents . <nl> - func EncodeWav ( scope * Scope , audio tf . Output , sample_rate tf . Output ) ( contents tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - opspec : = tf . OpSpec { <nl> - Type : " EncodeWav " , <nl> - Input : [ ] tf . Input { <nl> - audio , sample_rate , <nl> - } , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) <nl> - } <nl> - <nl> - / / The gradient operator for the SparseAdd op . <nl> - / / <nl> - / / The SparseAdd op calculates A + B , where A , B , and the sum are all represented <nl> - / / as ` SparseTensor ` objects . This op takes in the upstream gradient w . r . t . <nl> - / / non - empty values of the sum , and outputs the gradients w . r . t . the non - empty <nl> - / / values of A and B . <nl> - / / <nl> - / / Arguments : <nl> - / / backprop_val_grad : 1 - D with shape ` [ nnz ( sum ) ] ` . The gradient with respect to <nl> - / / the non - empty values of the sum . <nl> - / / a_indices : 2 - D . The ` indices ` of the ` SparseTensor ` A , size ` [ nnz ( A ) , ndims ] ` . <nl> - / / b_indices : 2 - D . The ` indices ` of the ` SparseTensor ` B , size ` [ nnz ( B ) , ndims ] ` . <nl> - / / sum_indices : 2 - D . The ` indices ` of the sum ` SparseTensor ` , size <nl> - / / ` [ nnz ( sum ) , ndims ] ` . <nl> - / / <nl> - / / Returns 1 - D with shape ` [ nnz ( A ) ] ` . The gradient with respect to the <nl> - / / non - empty values of A . 1 - D with shape ` [ nnz ( B ) ] ` . The gradient with respect to the <nl> - / / non - empty values of B . <nl> - func SparseAddGrad ( scope * Scope , backprop_val_grad tf . Output , a_indices tf . Output , b_indices tf . Output , sum_indices tf . Output ) ( a_val_grad tf . Output , b_val_grad tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - opspec : = tf . OpSpec { <nl> - Type : " SparseAddGrad " , <nl> - Input : [ ] tf . Input { <nl> - backprop_val_grad , a_indices , b_indices , sum_indices , <nl> - } , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) , op . Output ( 1 ) <nl> - } <nl> - <nl> / / Read an element from the TensorArray into output ` value ` . <nl> / / <nl> / / Arguments : <nl> func IFFT3D ( scope * Scope , input tf . Output ) ( output tf . Output ) { <nl> return op . Output ( 0 ) <nl> } <nl> <nl> + / / Increments variable pointed to by ' resource ' until it reaches ' limit ' . <nl> + / / <nl> + / / Arguments : <nl> + / / resource : Should be from a scalar ` Variable ` node . <nl> + / / limit : If incrementing ref would bring it above limit , instead generates an <nl> + / / ' OutOfRange ' error . <nl> + / / <nl> + / / <nl> + / / Returns A copy of the input before increment . If nothing else modifies the <nl> + / / input , the values produced will all be distinct . <nl> + func ResourceCountUpTo ( scope * Scope , resource tf . Output , limit int64 , T tf . DataType ) ( output tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + attrs : = map [ string ] interface { } { " limit " : limit , " T " : T } <nl> + opspec : = tf . OpSpec { <nl> + Type : " ResourceCountUpTo " , <nl> + Input : [ ] tf . Input { <nl> + resource , <nl> + } , <nl> + Attrs : attrs , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) <nl> + } <nl> + <nl> / / Looks up keys in a table , outputs the corresponding values . <nl> / / <nl> / / The tensor ` keys ` must of the same type as the keys of the table . <nl> func RFFT2D ( scope * Scope , input tf . Output , fft_length tf . Output ) ( output tf . Outp <nl> return op . Output ( 0 ) <nl> } <nl> <nl> + / / ResourceSparseApplyAdagradAttr is an optional argument to ResourceSparseApplyAdagrad . <nl> + type ResourceSparseApplyAdagradAttr func ( optionalAttr ) <nl> + <nl> + / / ResourceSparseApplyAdagradUseLocking sets the optional use_locking attribute to value . <nl> + / / <nl> + / / value : If ` True ` , updating of the var and accum tensors will be protected <nl> + / / by a lock ; otherwise the behavior is undefined , but may exhibit less <nl> + / / contention . <nl> + / / If not specified , defaults to false <nl> + func ResourceSparseApplyAdagradUseLocking ( value bool ) ResourceSparseApplyAdagradAttr { <nl> + return func ( m optionalAttr ) { <nl> + m [ " use_locking " ] = value <nl> + } <nl> + } <nl> + <nl> + / / Update relevant entries in ' * var ' and ' * accum ' according to the adagrad scheme . <nl> + / / <nl> + / / That is for rows we have grad for , we update var and accum as follows : <nl> + / / accum + = grad * grad <nl> + / / var - = lr * grad * ( 1 / sqrt ( accum ) ) <nl> + / / <nl> + / / Arguments : <nl> + / / var_ : Should be from a Variable ( ) . <nl> + / / accum : Should be from a Variable ( ) . <nl> + / / lr : Learning rate . Must be a scalar . <nl> + / / grad : The gradient . <nl> + / / indices : A vector of indices into the first dimension of var and accum . <nl> + / / <nl> + / / Returns the created operation . <nl> + func ResourceSparseApplyAdagrad ( scope * Scope , var_ tf . Output , accum tf . Output , lr tf . Output , grad tf . Output , indices tf . Output , optional . . . ResourceSparseApplyAdagradAttr ) ( o * tf . Operation ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + attrs : = map [ string ] interface { } { } <nl> + for _ , a : = range optional { <nl> + a ( attrs ) <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " ResourceSparseApplyAdagrad " , <nl> + Input : [ ] tf . Input { <nl> + var_ , accum , lr , grad , indices , <nl> + } , <nl> + Attrs : attrs , <nl> + } <nl> + return scope . AddOperation ( opspec ) <nl> + } <nl> + <nl> / / Creates a dataset that zips together ` input_datasets ` . <nl> func ZipDataset ( scope * Scope , input_datasets [ ] tf . Output , output_types [ ] tf . DataType , output_shapes [ ] tf . Shape ) ( handle tf . Output ) { <nl> if scope . Err ( ) ! = nil { <nl> func LRN ( scope * Scope , input tf . Output , optional . . . LRNAttr ) ( output tf . Output ) <nl> return op . Output ( 0 ) <nl> } <nl> <nl> - / / ResourceSparseApplyAdagradAttr is an optional argument to ResourceSparseApplyAdagrad . <nl> - type ResourceSparseApplyAdagradAttr func ( optionalAttr ) <nl> - <nl> - / / ResourceSparseApplyAdagradUseLocking sets the optional use_locking attribute to value . <nl> - / / <nl> - / / value : If ` True ` , updating of the var and accum tensors will be protected <nl> - / / by a lock ; otherwise the behavior is undefined , but may exhibit less <nl> - / / contention . <nl> - / / If not specified , defaults to false <nl> - func ResourceSparseApplyAdagradUseLocking ( value bool ) ResourceSparseApplyAdagradAttr { <nl> - return func ( m optionalAttr ) { <nl> - m [ " use_locking " ] = value <nl> - } <nl> - } <nl> - <nl> - / / Update relevant entries in ' * var ' and ' * accum ' according to the adagrad scheme . <nl> - / / <nl> - / / That is for rows we have grad for , we update var and accum as follows : <nl> - / / accum + = grad * grad <nl> - / / var - = lr * grad * ( 1 / sqrt ( accum ) ) <nl> - / / <nl> - / / Arguments : <nl> - / / var_ : Should be from a Variable ( ) . <nl> - / / accum : Should be from a Variable ( ) . <nl> - / / lr : Learning rate . Must be a scalar . <nl> - / / grad : The gradient . <nl> - / / indices : A vector of indices into the first dimension of var and accum . <nl> - / / <nl> - / / Returns the created operation . <nl> - func ResourceSparseApplyAdagrad ( scope * Scope , var_ tf . Output , accum tf . Output , lr tf . Output , grad tf . Output , indices tf . Output , optional . . . ResourceSparseApplyAdagradAttr ) ( o * tf . Operation ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - attrs : = map [ string ] interface { } { } <nl> - for _ , a : = range optional { <nl> - a ( attrs ) <nl> - } <nl> - opspec : = tf . OpSpec { <nl> - Type : " ResourceSparseApplyAdagrad " , <nl> - Input : [ ] tf . Input { <nl> - var_ , accum , lr , grad , indices , <nl> - } , <nl> - Attrs : attrs , <nl> - } <nl> - return scope . AddOperation ( opspec ) <nl> - } <nl> - <nl> / / AvgPool3DGradAttr is an optional argument to AvgPool3DGrad . <nl> type AvgPool3DGradAttr func ( optionalAttr ) <nl> <nl> func StringToHashBucketStrong ( scope * Scope , input tf . Output , num_buckets int64 , <nl> return op . Output ( 0 ) <nl> } <nl> <nl> + / / Encode audio data using the WAV file format . <nl> + / / <nl> + / / This operation will generate a string suitable to be saved out to create a . wav <nl> + / / audio file . It will be encoded in the 16 - bit PCM format . It takes in float <nl> + / / values in the range - 1 . 0f to 1 . 0f , and any outside that value will be clamped to <nl> + / / that range . <nl> + / / <nl> + / / ` audio ` is a 2 - D float Tensor of shape ` [ length , channels ] ` . <nl> + / / ` sample_rate ` is a scalar Tensor holding the rate to use ( e . g . 44100 ) . <nl> + / / <nl> + / / Arguments : <nl> + / / audio : 2 - D with shape ` [ length , channels ] ` . <nl> + / / sample_rate : Scalar containing the sample frequency . <nl> + / / <nl> + / / Returns 0 - D . WAV - encoded file contents . <nl> + func EncodeWav ( scope * Scope , audio tf . Output , sample_rate tf . Output ) ( contents tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " EncodeWav " , <nl> + Input : [ ] tf . Input { <nl> + audio , sample_rate , <nl> + } , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) <nl> + } <nl> + <nl> + / / The gradient operator for the SparseAdd op . <nl> + / / <nl> + / / The SparseAdd op calculates A + B , where A , B , and the sum are all represented <nl> + / / as ` SparseTensor ` objects . This op takes in the upstream gradient w . r . t . <nl> + / / non - empty values of the sum , and outputs the gradients w . r . t . the non - empty <nl> + / / values of A and B . <nl> + / / <nl> + / / Arguments : <nl> + / / backprop_val_grad : 1 - D with shape ` [ nnz ( sum ) ] ` . The gradient with respect to <nl> + / / the non - empty values of the sum . <nl> + / / a_indices : 2 - D . The ` indices ` of the ` SparseTensor ` A , size ` [ nnz ( A ) , ndims ] ` . <nl> + / / b_indices : 2 - D . The ` indices ` of the ` SparseTensor ` B , size ` [ nnz ( B ) , ndims ] ` . <nl> + / / sum_indices : 2 - D . The ` indices ` of the sum ` SparseTensor ` , size <nl> + / / ` [ nnz ( sum ) , ndims ] ` . <nl> + / / <nl> + / / Returns 1 - D with shape ` [ nnz ( A ) ] ` . The gradient with respect to the <nl> + / / non - empty values of A . 1 - D with shape ` [ nnz ( B ) ] ` . The gradient with respect to the <nl> + / / non - empty values of B . <nl> + func SparseAddGrad ( scope * Scope , backprop_val_grad tf . Output , a_indices tf . Output , b_indices tf . Output , sum_indices tf . Output ) ( a_val_grad tf . Output , b_val_grad tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " SparseAddGrad " , <nl> + Input : [ ] tf . Input { <nl> + backprop_val_grad , a_indices , b_indices , sum_indices , <nl> + } , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) , op . Output ( 1 ) <nl> + } <nl> + <nl> / / FixedLengthRecordReaderV2Attr is an optional argument to FixedLengthRecordReaderV2 . <nl> type FixedLengthRecordReaderV2Attr func ( optionalAttr ) <nl> <nl> mmm a / tensorflow / python / BUILD <nl> ppp b / tensorflow / python / BUILD <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " : array_ops " , <nl> + " : constant_op " , <nl> " : data_flow_ops_gen " , <nl> + " : dtypes " , <nl> + " : errors " , <nl> " : framework_ops " , <nl> " : math_ops " , <nl> " : tensor_shape " , <nl> mmm a / tensorflow / python / eager / graph_callable . py <nl> ppp b / tensorflow / python / eager / graph_callable . py <nl> def _custom_getter ( getter = None , name = None , shape = None , dtype = dtypes . float32 , # <nl> graph_mode_resource = v . variable . handle <nl> if initializer is None : <nl> initializer = _default_initializer ( name , shape , dtype ) <nl> - resource_variable_ops . assign_variable_op ( <nl> - graph_mode_resource , initializer ( shape , dtype ) ) <nl> + resource_variable_ops . shape_safe_assign_variable_handle ( <nl> + graph_mode_resource , v . variable . shape , initializer ( shape , dtype ) ) <nl> return v . variable <nl> <nl> scope = variable_scope . get_variable_scope ( ) <nl> mmm a / tensorflow / python / eager / ops_test . py <nl> ppp b / tensorflow / python / eager / ops_test . py <nl> def testCompatibleSetShape ( self ) : <nl> x . set_shape ( tensor_shape . TensorShape ( [ None , 2 ] ) ) <nl> self . assertEqual ( x . get_shape ( ) , ( 1 , 2 ) ) <nl> <nl> + def testCastScalarToPrimitiveTypes ( self ) : <nl> + x = constant_op . constant ( 1 . 3 ) <nl> + self . assertIsInstance ( int ( x ) , int ) <nl> + self . assertEqual ( int ( x ) , 1 ) <nl> + self . assertIsInstance ( float ( x ) , float ) <nl> + self . assertAllClose ( float ( x ) , 1 . 3 ) <nl> + <nl> + def testCastNonScalarToPrimitiveTypesFails ( self ) : <nl> + x = constant_op . constant ( [ 1 . 3 , 2 ] ) <nl> + with self . assertRaises ( TypeError ) : <nl> + int ( x ) <nl> + with self . assertRaises ( TypeError ) : <nl> + float ( x ) <nl> + <nl> <nl> if __name__ = = ' __main__ ' : <nl> test . main ( ) <nl> mmm a / tensorflow / python / eager / pywrap_tensor . cc <nl> ppp b / tensorflow / python / eager / pywrap_tensor . cc <nl> static PyObject * EagerTensor_shape_tuple ( EagerTensor * self ) { <nl> return shape ; <nl> } <nl> <nl> + / / Getter for ` _rank ` . <nl> + static PyObject * EagerTensor_rank ( EagerTensor * self ) { <nl> + # if PY_MAJOR_VERSION < 3 <nl> + return PyInt_FromLong ( TFE_TensorHandleNumDims ( self - > handle ) ) ; <nl> + # else <nl> + return PyLong_FromLong ( TFE_TensorHandleNumDims ( self - > handle ) ) ; <nl> + # endif <nl> + } <nl> + <nl> static PyObject * EagerTensor_tensor_handle ( EagerTensor * self , void * unused ) { <nl> Py_INCREF ( self - > handle_data ) ; <nl> return self - > handle_data ; <nl> static PyMethodDef EagerTensor_methods [ ] = { <nl> PyDoc_STR ( " _datatype_enum " ) } , <nl> { " _shape_tuple " , ( PyCFunction ) EagerTensor_shape_tuple , METH_NOARGS , <nl> PyDoc_STR ( " _shape_tuple " ) } , <nl> + { " _rank " , ( PyCFunction ) EagerTensor_rank , METH_NOARGS , PyDoc_STR ( " _rank " ) } , <nl> { " _copy_to_device " , ( PyCFunction ) EagerTensor_copy_to_device , <nl> METH_VARARGS | METH_KEYWORDS , PyDoc_STR ( " _copy_to_device " ) } , <nl> { nullptr , nullptr } , <nl> mmm a / tensorflow / python / estimator / canned / head . py <nl> ppp b / tensorflow / python / estimator / canned / head . py <nl> def _multi_class_head_with_softmax_cross_entropy_loss ( n_classes , <nl> ` tf . feature_column . numeric_column ` defining feature column representing <nl> weights . It is used to down weight or boost examples during training . It <nl> will be multiplied by the loss of the example . <nl> - label_vocabulary : A list of strings represents possible label values . If it <nl> - is not given , that means labels are already encoded as integer within <nl> - [ 0 , n_classes ) . If given , labels must be string type and have any value in <nl> - ` label_vocabulary ` . Also there will be errors if vocabulary is not <nl> - provided and labels are string . <nl> + label_vocabulary : A list or tuple of strings representing possible label <nl> + values . If it is not given , that means labels are already encoded as an <nl> + integer within [ 0 , n_classes ) . If given , labels must be of string type and <nl> + have any value in ` label_vocabulary ` . Note that errors will be raised if <nl> + ` label_vocabulary ` is not provided but labels are strings . <nl> name : name of the head . If provided , summary and metrics keys will be <nl> suffixed by ` " / " + name ` . Also used as ` name_scope ` when creating ops . <nl> <nl> def _multi_class_head_with_softmax_cross_entropy_loss ( n_classes , <nl> " " " <nl> if label_vocabulary is not None and not isinstance ( label_vocabulary , <nl> ( list , tuple ) ) : <nl> - raise ValueError ( ' label_vocabulary should be a list . Given type : { } ' . format ( <nl> - type ( label_vocabulary ) ) ) <nl> + raise ValueError ( <nl> + ' label_vocabulary should be a list or a tuple . Given type : { } ' . format ( <nl> + type ( label_vocabulary ) ) ) <nl> <nl> return _MultiClassHeadWithSoftmaxCrossEntropyLoss ( n_classes , weight_column , <nl> label_vocabulary , name ) <nl> def _label_ids ( self , labels ) : <nl> " " " Converts labels to integer id space . " " " <nl> if self . _label_vocabulary is None : <nl> if not labels . dtype . is_integer : <nl> - raise ValueError ( ' Labels dtype should be integer ' <nl> - ' Instead got % s . ' % labels . dtype ) <nl> + raise ValueError ( ' Labels dtype should be integer . Instead got { } . ' . <nl> + format ( labels . dtype ) ) <nl> label_ids = labels <nl> else : <nl> if labels . dtype ! = dtypes . string : <nl> def create_estimator_spec ( <nl> <nl> # Train . <nl> if train_op_fn is None : <nl> - raise ValueError ( ' train_op_fn can not be None . ' ) <nl> + raise ValueError ( ' train_op_fn cannot be None . ' ) <nl> with ops . name_scope ( ' ' ) : <nl> summary . scalar ( <nl> _summary_key ( self . _name , metric_keys . MetricKeys . LOSS ) , <nl> def _binary_logistic_head_with_sigmoid_cross_entropy_loss ( <nl> generated for each threshold value . This threshold is applied to the <nl> logistic values to determine the binary classification ( i . e . , above the <nl> threshold is ` true ` , below is ` false ` . <nl> - label_vocabulary : A list of strings represents possible label values . If it <nl> - is not given , that means labels are already encoded within [ 0 , 1 ] . If <nl> - given , labels must be string type and have any value in <nl> - ` label_vocabulary ` . Also there will be errors if vocabulary is not <nl> - provided and labels are string . <nl> + label_vocabulary : A list or tuple of strings representing possible label <nl> + values . If it is not given , that means labels are already encoded within <nl> + [ 0 , 1 ] . If given , labels must be string type and have any value in <nl> + ` label_vocabulary ` . Note that errors will be raised if ` label_vocabulary ` <nl> + is not provided but labels are strings . <nl> name : name of the head . If provided , summary and metrics keys will be <nl> suffixed by ` " / " + name ` . Also used as ` name_scope ` when creating ops . <nl> <nl> def _binary_logistic_head_with_sigmoid_cross_entropy_loss ( <nl> thresholds = tuple ( thresholds ) if thresholds else tuple ( ) <nl> if label_vocabulary is not None and not isinstance ( label_vocabulary , <nl> ( list , tuple ) ) : <nl> - raise ValueError ( ' label_vocabulary should be a list . Given type : { } ' . format ( <nl> - type ( label_vocabulary ) ) ) <nl> + raise ValueError ( <nl> + ' label_vocabulary should be a list or tuple . Given type : { } ' . format ( <nl> + type ( label_vocabulary ) ) ) <nl> <nl> for threshold in thresholds : <nl> if ( threshold < = 0 . 0 ) or ( threshold > = 1 . 0 ) : <nl> - raise ValueError ( ' thresholds not in ( 0 , 1 ) : % s . ' % ( thresholds , ) ) <nl> + raise ValueError ( ' thresholds not in ( 0 , 1 ) : { } . ' . format ( ( thresholds , ) ) ) <nl> return _BinaryLogisticHeadWithSigmoidCrossEntropyLoss ( <nl> weight_column = weight_column , <nl> thresholds = thresholds , <nl> mmm a / tensorflow / python / estimator / estimator . py <nl> ppp b / tensorflow / python / estimator / estimator . py <nl> <nl> from tensorflow . python . training import training_util <nl> from tensorflow . python . util import compat <nl> from tensorflow . python . util import nest <nl> - from tensorflow . python . util import tf_inspect <nl> <nl> <nl> _VALID_MODEL_FN_ARGS = set ( <nl> def _verify_model_fn_args ( model_fn , params ) : <nl> logging . warning ( ' Estimator \ ' s model_fn ( % s ) includes params ' <nl> ' argument , but params are not passed to Estimator . ' , <nl> model_fn ) <nl> - if tf_inspect . ismethod ( model_fn ) : <nl> - if ' self ' in args : <nl> - args . remove ( ' self ' ) <nl> non_valid_args = list ( args - _VALID_MODEL_FN_ARGS ) <nl> if non_valid_args : <nl> raise ValueError ( ' model_fn ( % s ) has following not expected args : % s ' % <nl> mmm a / tensorflow / python / estimator / util . py <nl> ppp b / tensorflow / python / estimator / util . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> + import functools <nl> + <nl> from tensorflow . python . util import tf_decorator <nl> from tensorflow . python . util import tf_inspect <nl> <nl> <nl> + def _is_bounded_method ( fn ) : <nl> + return tf_inspect . ismethod ( fn ) and ( fn . __self__ is not None ) <nl> + <nl> + <nl> + def _is_callable_object ( obj ) : <nl> + return hasattr ( obj , ' __call__ ' ) and tf_inspect . ismethod ( obj . __call__ ) <nl> + <nl> + <nl> def fn_args ( fn ) : <nl> " " " Get argument names for function - like object . <nl> <nl> def fn_args ( fn ) : <nl> ValueError : if partial function has positionally bound arguments <nl> " " " <nl> _ , fn = tf_decorator . unwrap ( fn ) <nl> - <nl> - # Handle callables . <nl> - if hasattr ( fn , ' __call__ ' ) and tf_inspect . ismethod ( fn . __call__ ) : <nl> - return tuple ( tf_inspect . getargspec ( fn . __call__ ) . args ) <nl> - <nl> - # Handle functools . partial and similar objects . <nl> - if hasattr ( fn , ' func ' ) and hasattr ( fn , ' keywords ' ) and hasattr ( fn , ' args ' ) : <nl> - # Handle nested partial . <nl> - original_args = fn_args ( fn . func ) <nl> - if not original_args : <nl> - return tuple ( ) <nl> - <nl> - return tuple ( [ <nl> - arg for arg in original_args [ len ( fn . args ) : ] <nl> - if arg not in set ( ( fn . keywords or { } ) . keys ( ) ) <nl> - ] ) <nl> - <nl> - # Handle function . <nl> - return tuple ( tf_inspect . getargspec ( fn ) . args ) <nl> + if isinstance ( fn , functools . partial ) : <nl> + args = fn_args ( fn . func ) <nl> + args = [ a for a in args [ len ( fn . args ) : ] if a not in ( fn . keywords or [ ] ) ] <nl> + else : <nl> + if _is_callable_object ( fn ) : <nl> + fn = fn . __call__ <nl> + args = tf_inspect . getargspec ( fn ) . args <nl> + if _is_bounded_method ( fn ) : <nl> + args . remove ( ' self ' ) <nl> + return tuple ( args ) <nl> mmm a / tensorflow / python / estimator / util_test . py <nl> ppp b / tensorflow / python / estimator / util_test . py <nl> class Foo ( object ) : <nl> def __call__ ( self , a , b ) : <nl> return a + b <nl> <nl> - self . assertEqual ( ( ' self ' , ' a ' , ' b ' ) , util . fn_args ( Foo ( ) ) ) <nl> + self . assertEqual ( ( ' a ' , ' b ' ) , util . fn_args ( Foo ( ) ) ) <nl> + <nl> + def test_bounded_method ( self ) : <nl> + <nl> + class Foo ( object ) : <nl> + <nl> + def bar ( self , a , b ) : <nl> + return a + b <nl> + <nl> + self . assertEqual ( ( ' a ' , ' b ' ) , util . fn_args ( Foo ( ) . bar ) ) <nl> <nl> def test_partial_function ( self ) : <nl> expected_test_arg = 123 <nl> mmm a / tensorflow / python / framework / ops . py <nl> ppp b / tensorflow / python / framework / ops . py <nl> def _shape_tuple ( self ) : <nl> return None <nl> return tuple ( shape ) <nl> <nl> + def _rank ( self ) : <nl> + " " " Integer rank of this Tensor , if known , else None . <nl> + <nl> + Returns : <nl> + Integer rank or None <nl> + " " " <nl> + return self . _shape . ndims <nl> + <nl> def get_shape ( self ) : <nl> " " " Alias of Tensor . shape . " " " <nl> return self . shape <nl> def numpy ( self ) : <nl> raise ValueError ( " Resource handles are not convertible to numpy . " ) <nl> return self . cpu ( ) . _numpy ( ) # pylint : disable = protected - access <nl> <nl> + # __int__ and __float__ may copy the tensor to CPU and <nl> + # only work for scalars ; values are cast as per numpy . <nl> + def __int__ ( self ) : <nl> + return int ( self . numpy ( ) ) <nl> + <nl> + def __float__ ( self ) : <nl> + return float ( self . numpy ( ) ) <nl> + <nl> def __array__ ( self ) : <nl> return np . array ( self . numpy ( ) ) <nl> <nl> def _shape_tuple ( self ) : <nl> " " " <nl> raise NotImplementedError ( ) <nl> <nl> + def _rank ( self ) : <nl> + " " " Integer rank of this Tensor . <nl> + <nl> + Unlike regular Tensors , the rank is always known for EagerTensors . <nl> + <nl> + This is more performant than len ( self . _shape_tuple ( ) ) <nl> + <nl> + Returns : <nl> + Integer rank <nl> + " " " <nl> + raise NotImplementedError ( ) <nl> + <nl> def _copy_to_device ( self , context , device ) : # pylint : disable = redefined - outer - name <nl> raise NotImplementedError ( ) <nl> <nl> class GraphKeys ( object ) : <nl> <nl> @ decorator_utils . classproperty <nl> def VARIABLES ( cls ) : # pylint : disable = no - self - argument <nl> - logging . warning ( " VARIABLES collection name is deprecated , " <nl> - " please use GLOBAL_VARIABLES instead ; " <nl> - " VARIABLES will be removed after 2017 - 03 - 02 . " ) <nl> + logging . log_first_n ( logging . WARN , <nl> + " VARIABLES collection name is deprecated , please use " <nl> + " GLOBAL_VARIABLES instead ; VARIABLES will be removed " <nl> + " after 2017 - 03 - 02 . " , 1 ) <nl> return cls . GLOBAL_VARIABLES <nl> <nl> <nl> mmm a / tensorflow / python / kernel_tests / BUILD <nl> ppp b / tensorflow / python / kernel_tests / BUILD <nl> cuda_py_test ( <nl> " / / tensorflow / python : data_flow_ops_gen " , <nl> " / / tensorflow / python : distributed_framework_test_lib " , <nl> " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework_test_lib " , <nl> " / / tensorflow / python : gradients " , <nl> + " / / tensorflow / python : init_ops " , <nl> " / / tensorflow / python : math_ops " , <nl> " / / tensorflow / python : nn_grad " , <nl> " / / tensorflow / python : training " , <nl> " / / tensorflow / python : tensor_array_grad " , <nl> " / / tensorflow / python : tensor_array_ops " , <nl> " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python / eager : backprop " , <nl> + " / / tensorflow / python / eager : context " , <nl> ] , <nl> flaky = 1 , # create_local_cluster sometimes times out . <nl> ) <nl> mmm a / tensorflow / python / kernel_tests / resource_variable_ops_test . py <nl> ppp b / tensorflow / python / kernel_tests / resource_variable_ops_test . py <nl> <nl> from tensorflow . python . ops import init_ops <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import resource_variable_ops <nl> + from tensorflow . python . ops import state_ops <nl> from tensorflow . python . ops import variable_scope <nl> from tensorflow . python . ops import variables <nl> from tensorflow . python . platform import test <nl> def testHandleNumpy ( self ) : <nl> resource_variable_ops . ResourceVariable ( <nl> 1 . 0 , name = " handle - numpy " ) . handle . numpy ( ) <nl> <nl> + def testCountUpTo ( self ) : <nl> + with context . eager_mode ( ) : <nl> + v = resource_variable_ops . ResourceVariable ( 0 , name = " upto " ) <nl> + self . assertAllEqual ( v . count_up_to ( 1 ) , 0 ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + v . count_up_to ( 1 ) <nl> + <nl> + def testCountUpToFunction ( self ) : <nl> + with context . eager_mode ( ) : <nl> + v = resource_variable_ops . ResourceVariable ( 0 , name = " upto " ) <nl> + self . assertAllEqual ( state_ops . count_up_to ( v , 1 ) , 0 ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + state_ops . count_up_to ( v , 1 ) <nl> + <nl> @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testInitFnDtype ( self ) : <nl> v = resource_variable_ops . ResourceVariable ( <nl> def testAssignDifferentShapesEager ( self ) : <nl> with variable_scope . variable_scope ( " foo " ) : <nl> var = variable_scope . get_variable ( " x " , shape = [ 1 , 1 ] , <nl> dtype = dtypes . float32 ) <nl> - assign = var . assign ( np . zeros ( shape = [ 2 , 2 ] ) ) <nl> - self . evaluate ( assign ) <nl> + with self . assertRaisesRegexp ( ValueError , <nl> + " Shapes . * and . * are incompatible " ) : <nl> + assign = var . assign ( np . zeros ( shape = [ 2 , 2 ] ) ) <nl> + self . evaluate ( assign ) <nl> <nl> def testDtypeAfterFromProto ( self ) : <nl> v = resource_variable_ops . ResourceVariable ( 2 . 0 ) <nl> mmm a / tensorflow / python / kernel_tests / tensor_array_ops_test . py <nl> ppp b / tensorflow / python / kernel_tests / tensor_array_ops_test . py <nl> <nl> <nl> from tensorflow . core . protobuf import config_pb2 <nl> from tensorflow . python . client import session as session_lib <nl> + from tensorflow . python . eager import backprop <nl> + from tensorflow . python . eager import context <nl> from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import tensor_shape <nl> + from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import control_flow_ops <nl> from tensorflow . python . ops import gen_data_flow_ops <nl> from tensorflow . python . ops import gradients_impl <nl> + from tensorflow . python . ops import init_ops <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import tensor_array_grad <nl> from tensorflow . python . ops import tensor_array_ops <nl> + from tensorflow . python . ops import variable_scope <nl> from tensorflow . python . ops import variables <nl> import tensorflow . python . ops . nn_grad # pylint : disable = unused - import <nl> from tensorflow . python . platform import test <nl> def _converter ( x ) : <nl> return _converter <nl> <nl> <nl> + def _make_ta ( size , name , dtype = dtypes . float32 , infer_shape = False ) : <nl> + return tensor_array_ops . TensorArray ( <nl> + dtype = dtype , tensor_array_name = name , size = size , infer_shape = infer_shape ) <nl> + <nl> + <nl> class TensorArrayTest ( test . TestCase ) : <nl> <nl> @ classmethod <nl> def tearDownClass ( cls ) : <nl> super ( TensorArrayTest , cls ) . tearDownClass ( ) <nl> session_lib . Session . reset ( cls . _workers [ 0 ] . target ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArrayWriteRead ( self ) : <nl> - with self . test_session ( use_gpu = True ) as session : <nl> + with self . test_session ( use_gpu = True ) : <nl> ta = tensor_array_ops . TensorArray ( <nl> dtype = dtypes . float32 , <nl> tensor_array_name = " foo " , <nl> def testTensorArrayWriteRead ( self ) : <nl> r1 = w2 . read ( 1 ) <nl> r2 = w2 . read ( 2 ) <nl> <nl> - d0 , d1 , d2 = session . run ( [ r0 , r1 , r2 ] ) <nl> + d0 , d1 , d2 = self . evaluate ( [ r0 , r1 , r2 ] ) <nl> self . assertAllEqual ( [ [ 4 . 0 , 5 . 0 ] ] , d0 ) <nl> self . assertAllEqual ( [ [ 1 . 0 ] ] , d1 ) <nl> self . assertAllEqual ( - 3 . 0 , d2 ) <nl> def _testTensorArrayWritePack ( self , tf_dtype ) : <nl> <nl> c0 = w2 . stack ( ) <nl> <nl> + c0 = self . evaluate ( c0 ) <nl> self . assertAllEqual ( <nl> - convert ( [ [ [ 4 . 0 , 5 . 0 ] ] , [ [ 6 . 0 , 7 . 0 ] ] , [ [ 8 . 0 , 9 . 0 ] ] ] ) , c0 . eval ( ) ) <nl> + convert ( [ [ [ 4 . 0 , 5 . 0 ] ] , [ [ 6 . 0 , 7 . 0 ] ] , [ [ 8 . 0 , 9 . 0 ] ] ] ) , c0 ) <nl> <nl> def _testTensorArrayWritePackMaybeLegacy ( self ) : <nl> self . _testTensorArrayWritePack ( dtypes . float32 ) <nl> def _testTensorArrayWritePackMaybeLegacy ( self ) : <nl> self . _testTensorArrayWritePack ( dtypes . complex128 ) <nl> self . _testTensorArrayWritePack ( dtypes . string ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArrayWritePack ( self ) : <nl> self . _testTensorArrayWritePackMaybeLegacy ( ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testEmptyTensorArrayPack ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> ta = tensor_array_ops . TensorArray ( <nl> def testEmptyTensorArrayPack ( self ) : <nl> <nl> c0 = w2 . stack ( ) <nl> <nl> - self . assertAllEqual ( [ 3 , 0 , 1 ] , c0 . eval ( ) . shape ) <nl> + c0 = self . evaluate ( c0 ) <nl> + self . assertAllEqual ( [ 3 , 0 , 1 ] , c0 . shape ) <nl> <nl> def _testTensorArrayWriteConcat ( self , tf_dtype ) : <nl> with self . test_session ( use_gpu = True ) : <nl> def _testTensorArrayWriteConcat ( self , tf_dtype ) : <nl> <nl> c0 = w2 . concat ( ) <nl> <nl> + c0 = self . evaluate ( c0 ) <nl> self . assertAllEqual ( <nl> convert ( [ [ 4 . 0 , 5 . 0 ] , [ 104 . 0 , 105 . 0 ] , [ 204 . 0 , 205 . 0 ] , [ 6 . 0 , 7 . 0 ] , <nl> - [ 106 . 0 , 107 . 0 ] , [ 8 . 0 , 9 . 0 ] ] ) , c0 . eval ( ) ) <nl> + [ 106 . 0 , 107 . 0 ] , [ 8 . 0 , 9 . 0 ] ] ) , c0 ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArrayWriteConcat ( self ) : <nl> self . _testTensorArrayWriteConcat ( dtypes . float32 ) <nl> self . _testTensorArrayWriteConcat ( dtypes . float64 ) <nl> def _testTensorArrayPackNotAllValuesAvailableFails ( self ) : <nl> <nl> with self . assertRaisesOpError ( " Could not read from TensorArray index 1 " <nl> " because it has not yet been written to . " ) : <nl> - ta . write ( 0 , [ [ 4 . 0 , 5 . 0 ] ] ) . stack ( ) . eval ( ) <nl> + self . evaluate ( ta . write ( 0 , [ [ 4 . 0 , 5 . 0 ] ] ) . stack ( ) ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArrayPackNotAllValuesAvailableFails ( self ) : <nl> self . _testTensorArrayPackNotAllValuesAvailableFails ( ) <nl> <nl> def _testTensorArrayUnpackRead ( self , tf_dtype ) : <nl> - with self . test_session ( use_gpu = True ) as session : <nl> - ta = tensor_array_ops . TensorArray ( <nl> - dtype = tf_dtype , tensor_array_name = " foo " , size = 3 ) <nl> - <nl> + with self . test_session ( use_gpu = True ) : <nl> convert = _make_converter ( tf_dtype ) <nl> <nl> + ta = _make_ta ( 3 , " foo " , dtype = tf_dtype ) <nl> # Unpack a vector into scalars <nl> w0 = ta . unstack ( convert ( [ 1 . 0 , 2 . 0 , 3 . 0 ] ) ) <nl> r0 = w0 . read ( 0 ) <nl> r1 = w0 . read ( 1 ) <nl> r2 = w0 . read ( 2 ) <nl> <nl> - d0 , d1 , d2 = session . run ( [ r0 , r1 , r2 ] ) <nl> + d0 , d1 , d2 = self . evaluate ( [ r0 , r1 , r2 ] ) <nl> self . assertAllEqual ( convert ( 1 . 0 ) , d0 ) <nl> self . assertAllEqual ( convert ( 2 . 0 ) , d1 ) <nl> self . assertAllEqual ( convert ( 3 . 0 ) , d2 ) <nl> <nl> - ta = tensor_array_ops . TensorArray ( <nl> - dtype = tf_dtype , tensor_array_name = " foo " , size = 3 ) <nl> - <nl> # Unpack a matrix into vectors <nl> w1 = ta . unstack ( convert ( [ [ 1 . 0 , 1 . 1 ] , [ 2 . 0 , 2 . 1 ] , [ 3 . 0 , 3 . 1 ] ] ) ) <nl> r0 = w1 . read ( 0 ) <nl> r1 = w1 . read ( 1 ) <nl> r2 = w1 . read ( 2 ) <nl> <nl> - d0 , d1 , d2 = session . run ( [ r0 , r1 , r2 ] ) <nl> + d0 , d1 , d2 = self . evaluate ( [ r0 , r1 , r2 ] ) <nl> self . assertAllEqual ( convert ( [ 1 . 0 , 1 . 1 ] ) , d0 ) <nl> self . assertAllEqual ( convert ( [ 2 . 0 , 2 . 1 ] ) , d1 ) <nl> self . assertAllEqual ( convert ( [ 3 . 0 , 3 . 1 ] ) , d2 ) <nl> <nl> - # Reset ta because we ' re going to change the shape , else shape <nl> - # inference will throw an error . <nl> - ta = tensor_array_ops . TensorArray ( <nl> - dtype = tf_dtype , tensor_array_name = " foo " , size = 3 ) <nl> - <nl> # Try unpacking an empty matrix , which should not cause an error . <nl> w2 = ta . unstack ( convert ( [ [ ] , [ ] , [ ] ] ) ) <nl> r0 = w2 . read ( 0 ) <nl> r1 = w2 . read ( 1 ) <nl> r2 = w2 . read ( 2 ) <nl> <nl> - d0 , d1 , d2 = session . run ( [ r0 , r1 , r2 ] ) <nl> + d0 , d1 , d2 = self . evaluate ( [ r0 , r1 , r2 ] ) <nl> self . assertAllEqual ( convert ( [ ] ) , d0 ) <nl> self . assertAllEqual ( convert ( [ ] ) , d1 ) <nl> self . assertAllEqual ( convert ( [ ] ) , d2 ) <nl> def _testTensorArrayUnpackReadMaybeLegacy ( self ) : <nl> self . _testTensorArrayUnpackRead ( dtypes . complex128 ) <nl> self . _testTensorArrayUnpackRead ( dtypes . string ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArrayUnpackRead ( self ) : <nl> self . _testTensorArrayUnpackReadMaybeLegacy ( ) <nl> <nl> def _testTensorArraySplitRead ( self , tf_dtype ) : <nl> - with self . test_session ( use_gpu = True ) as session : <nl> - ta = tensor_array_ops . TensorArray ( <nl> - dtype = tf_dtype , tensor_array_name = " foo " , size = 3 , infer_shape = False ) <nl> - <nl> + with self . test_session ( use_gpu = True ) : <nl> convert = _make_converter ( tf_dtype ) <nl> <nl> # Split an empty vector <nl> + ta = _make_ta ( 3 , " foo " , dtype = tf_dtype ) <nl> lengths = constant_op . constant ( [ 0 , 0 , 0 ] ) <nl> w0 = ta . split ( convert ( [ ] ) , lengths = lengths ) <nl> r0 = w0 . read ( 0 ) <nl> r1 = w0 . read ( 1 ) <nl> r2 = w0 . read ( 2 ) <nl> <nl> - d0 , d1 , d2 = session . run ( [ r0 , r1 , r2 ] ) <nl> + d0 , d1 , d2 = self . evaluate ( [ r0 , r1 , r2 ] ) <nl> self . assertAllEqual ( convert ( [ ] ) , d0 ) <nl> self . assertAllEqual ( convert ( [ ] ) , d1 ) <nl> self . assertAllEqual ( convert ( [ ] ) , d2 ) <nl> def _testTensorArraySplitRead ( self , tf_dtype ) : <nl> r1 = w0 . read ( 1 ) <nl> r2 = w0 . read ( 2 ) <nl> <nl> - d0 , d1 , d2 = session . run ( [ r0 , r1 , r2 ] ) <nl> + d0 , d1 , d2 = self . evaluate ( [ r0 , r1 , r2 ] ) <nl> self . assertAllEqual ( convert ( [ 1 . 0 , 2 . 0 ] ) , d0 ) <nl> self . assertAllEqual ( convert ( [ ] ) , d1 ) <nl> self . assertAllEqual ( convert ( [ 3 . 0 ] ) , d2 ) <nl> def _testTensorArraySplitRead ( self , tf_dtype ) : <nl> r1 = w0 . read ( 1 ) <nl> r2 = w0 . read ( 2 ) <nl> <nl> - d0 , d1 , d2 = session . run ( [ r0 , r1 , r2 ] ) <nl> + d0 , d1 , d2 = self . evaluate ( [ r0 , r1 , r2 ] ) <nl> self . assertAllEqual ( convert ( [ [ 1 . 0 , 101 . 0 ] , [ 2 . 0 , 201 . 0 ] ] ) , d0 ) <nl> self . assertAllEqual ( convert ( [ ] ) . reshape ( 0 , 2 ) , d1 ) <nl> self . assertAllEqual ( convert ( [ [ 3 . 0 , 301 . 0 ] ] ) , d2 ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArraySplitRead ( self ) : <nl> self . _testTensorArraySplitRead ( dtypes . float32 ) <nl> self . _testTensorArraySplitRead ( dtypes . float64 ) <nl> def testTensorGradAccessTwiceReceiveSameObject ( self ) : <nl> self . assertAllEqual ( t_g_ta_0 , t_g_ta_1 ) <nl> self . assertAllEqual ( [ [ 4 . 0 , 5 . 0 ] ] , d_r1_0 ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArrayWriteWrongIndexOrDataTypeFails ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> - ta = tensor_array_ops . TensorArray ( <nl> - dtype = dtypes . float32 , tensor_array_name = " foo " , size = 3 ) <nl> - <nl> + ta = _make_ta ( 3 , " foo " , dtype = dtypes . float32 ) <nl> + in_graph_mode = context . in_graph_mode ( ) <nl> # Test writing the wrong datatype <nl> - with self . assertRaisesOpError ( <nl> - " TensorArray dtype is float but Op is trying to write dtype string " ) : <nl> - ta . write ( - 1 , " wrong_type_scalar " ) . flow . eval ( ) <nl> - <nl> - # Test writing to a negative index <nl> - with self . assertRaisesOpError ( <nl> - " Tried to write to index - 1 but array is not " <nl> - " resizeable and size is : 3 " ) : <nl> - ta . write ( - 1 , 3 . 0 ) . flow . eval ( ) <nl> + if in_graph_mode : <nl> + with self . assertRaisesOpError ( <nl> + " TensorArray dtype is float but Op is trying to write " <nl> + " dtype string " ) : <nl> + self . evaluate ( ta . write ( 0 , " wrong_type_scalar " ) . flow ) <nl> + else : <nl> + with self . assertRaisesOpError ( <nl> + " TensorArray dtype is float32 but Op is trying to write " <nl> + " dtype string " ) : <nl> + self . evaluate ( ta . write ( 0 , " wrong_type_scalar " ) . flow ) <nl> + <nl> + if context . in_graph_mode ( ) : <nl> + with self . assertRaisesOpError ( <nl> + " Tried to write to index - 1 but array is not " <nl> + " resizeable and size is : 3 " ) : <nl> + self . evaluate ( ta . write ( - 1 , 3 . 0 ) . flow ) <nl> + else : <nl> + with self . assertRaisesOpError ( <nl> + r " Writing to negative indices \ ( index - 1 \ ) is not allowed . " ) : <nl> + self . evaluate ( ta . write ( - 1 , 3 . 0 ) . flow ) <nl> <nl> # Test reading from too large an index <nl> with self . assertRaisesOpError ( <nl> " Tried to write to index 3 but array is not " <nl> " resizeable and size is : 3 " ) : <nl> - ta . write ( 3 , 3 . 0 ) . flow . eval ( ) <nl> + self . evaluate ( ta . write ( 3 , 3 . 0 ) . flow ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArrayReadWrongIndexOrDataTypeFails ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> - ta = tensor_array_ops . TensorArray ( <nl> - dtype = dtypes . float32 , tensor_array_name = " foo " , size = 3 ) <nl> + ta = _make_ta ( 3 , " foo " , dtype = dtypes . float32 ) <nl> <nl> w0 = ta . write ( 0 , [ [ 4 . 0 , 5 . 0 ] ] ) <nl> <nl> - # Test reading wrong datatype <nl> - r0_bad = gen_data_flow_ops . _tensor_array_read_v3 ( <nl> - handle = w0 . handle , index = 0 , dtype = dtypes . float64 , flow_in = w0 . flow ) <nl> - with self . assertRaisesOpError ( <nl> - " TensorArray dtype is float but Op requested dtype double . " ) : <nl> - r0_bad . eval ( ) <nl> + # Test reading wrong datatype , which is only possible in graph mode <nl> + if context . in_graph_mode ( ) : <nl> + r0_bad = gen_data_flow_ops . _tensor_array_read_v3 ( <nl> + handle = w0 . handle , index = 0 , dtype = dtypes . float64 , flow_in = w0 . flow ) <nl> + with self . assertRaisesOpError ( <nl> + " TensorArray dtype is float but Op requested dtype double . " ) : <nl> + r0_bad . eval ( ) <nl> <nl> # Test reading from a different index than the one we wrote to <nl> - r1 = w0 . read ( 1 ) <nl> with self . assertRaisesOpError ( <nl> " Could not read from TensorArray index 1 because " <nl> " it has not yet been written to . " ) : <nl> - r1 . eval ( ) <nl> + self . evaluate ( w0 . read ( 1 ) ) <nl> <nl> - # Test reading from a negative index <nl> - with self . assertRaisesOpError ( <nl> - r " Tried to read from index - 1 but array size is : 3 " ) : <nl> - ta . read ( - 1 ) . eval ( ) <nl> + # Test reading from a negative index , which is not allowed <nl> + if context . in_graph_mode ( ) : <nl> + with self . assertRaisesOpError ( <nl> + r " Tried to read from index - 1 but array size is : 3 " ) : <nl> + self . evaluate ( ta . read ( - 1 ) ) <nl> + else : <nl> + with self . assertRaisesOpError ( <nl> + r " Reading from negative indices \ ( index - 1 \ ) is not allowed . " ) : <nl> + self . evaluate ( ta . read ( - 1 ) ) <nl> <nl> # Test reading from too large an index <nl> with self . assertRaisesOpError ( <nl> " Tried to read from index 3 but array size is : 3 " ) : <nl> - ta . read ( 3 ) . eval ( ) <nl> + self . evaluate ( ta . read ( 3 ) ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArrayWriteMultipleFails ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> ta = tensor_array_ops . TensorArray ( <nl> def testTensorArrayWriteMultipleFails ( self ) : <nl> with self . assertRaisesOpError ( <nl> " Could not write to TensorArray index 2 because " <nl> " it has already been written to . " ) : <nl> - ta . write ( 2 , 3 . 0 ) . write ( 2 , 3 . 0 ) . flow . eval ( ) <nl> + if context . in_graph_mode ( ) : <nl> + self . evaluate ( ta . write ( 2 , 3 . 0 ) . write ( 2 , 3 . 0 ) . flow ) <nl> + else : <nl> + self . evaluate ( ta . write ( 2 , 3 . 0 ) . write ( 2 , 3 . 0 ) ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArrayConcatIncompatibleShapesFails ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> ta = tensor_array_ops . TensorArray ( <nl> def testTensorArrayConcatIncompatibleShapesFails ( self ) : <nl> <nl> with self . assertRaisesOpError ( <nl> " Concat saw a scalar shape at index 0 but requires at least vectors " ) : <nl> - w3 . concat ( ) . eval ( ) <nl> + self . evaluate ( w3 . concat ( ) ) <nl> <nl> ta = tensor_array_ops . TensorArray ( <nl> dtype = dtypes . float32 , <nl> def testTensorArrayConcatIncompatibleShapesFails ( self ) : <nl> w2 = w1 . write ( 1 , [ 4 . 0 ] ) <nl> w3 = w2 . write ( 2 , [ [ 3 . 0 ] ] ) <nl> <nl> - with self . assertRaisesOpError ( <nl> - r " TensorArray has inconsistent shapes . Index 0 has " <nl> - r " \ ( excepting dimension 0 \ ) shape : \ [ \ ] but index 2 has \ ( excepting " <nl> - r " dimension 0 \ ) shape : \ [ 1 \ ] " ) : <nl> - w3 . concat ( ) . eval ( ) <nl> + # The eager - mode implementation just passes up array_op . concat ' s error <nl> + # message . <nl> + if context . in_graph_mode ( ) : <nl> + with self . assertRaisesOpError ( <nl> + r " TensorArray has inconsistent shapes . Index 0 has " <nl> + r " \ ( excepting dimension 0 \ ) shape : \ [ \ ] but index 2 has " <nl> + r " \ ( excepting dimension 0 \ ) shape : \ [ 1 \ ] " ) : <nl> + self . evaluate ( w3 . concat ( ) ) <nl> + else : <nl> + with self . assertRaisesOpError ( <nl> + r " . * Ranks of all input tensors should match : shape \ [ 0 \ ] " <nl> + r " = \ [ 1 \ ] vs \ . shape \ [ 2 \ ] = \ [ 1 , 1 \ ] . * " ) : <nl> + self . evaluate ( w3 . concat ( ) ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArraySplitIncompatibleShapesFails ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> - ta = tensor_array_ops . TensorArray ( <nl> - dtype = dtypes . float32 , <nl> - tensor_array_name = " foo " , <nl> - size = 3 , <nl> - infer_shape = False ) <nl> - <nl> + in_graph_mode = context . in_graph_mode ( ) <nl> + ta = _make_ta ( 3 , " foo " ) <nl> with self . assertRaisesOpError ( <nl> r " Expected lengths to be a vector , received shape : \ [ \ ] " ) : <nl> - lengths = array_ops . placeholder ( dtypes . int64 ) <nl> - ta . split ( [ 1 . 0 , 2 . 0 , 3 . 0 ] , lengths ) . flow . eval ( feed_dict = { lengths : 1 } ) <nl> + if in_graph_mode : <nl> + lengths = array_ops . placeholder ( dtypes . int64 ) <nl> + ta . split ( [ 1 . 0 , 2 . 0 , 3 . 0 ] , lengths ) . flow . eval ( feed_dict = { lengths : 1 } ) <nl> + else : <nl> + self . evaluate ( ta . split ( [ 1 . 0 , 2 . 0 , 3 . 0 ] , 1 ) ) <nl> <nl> with self . assertRaisesOpError ( <nl> r " Expected sum of lengths to be equal to values . shape \ [ 0 \ ] , " <nl> r " but sum of lengths is 1 and value ' s shape is : \ [ 3 \ ] " ) : <nl> - ta . split ( [ 1 . 0 , 2 . 0 , 3 . 0 ] , [ 1 ] ) . flow . eval ( ) <nl> + if in_graph_mode : <nl> + self . evaluate ( ta . split ( [ 1 . 0 , 2 . 0 , 3 . 0 ] , [ 1 ] ) . flow ) <nl> + else : <nl> + self . evaluate ( ta . split ( [ 1 . 0 , 2 . 0 , 3 . 0 ] , [ 1 ] ) ) <nl> <nl> + ta = _make_ta ( 1 , " baz " ) <nl> with self . assertRaisesOpError ( <nl> r " Expected value to be at least a vector , but received shape : \ [ \ ] " ) : <nl> - ta . split ( 1 . 0 , [ 1 ] ) . flow . eval ( ) <nl> - <nl> - ta = tensor_array_ops . TensorArray ( <nl> - dtype = dtypes . float32 , <nl> - tensor_array_name = " foo " , <nl> - size = 2 , <nl> - infer_shape = False ) <nl> + if in_graph_mode : <nl> + self . evaluate ( ta . split ( 1 . 0 , [ 1 ] ) . flow ) <nl> + else : <nl> + self . evaluate ( ta . split ( 1 . 0 , [ 1 ] ) ) <nl> <nl> + ta = _make_ta ( 2 , " buz " ) <nl> with self . assertRaisesOpError ( <nl> r " TensorArray ' s size is not equal to the size of lengths " <nl> r " \ ( 2 vs . 1 \ ) , and the TensorArray is not marked as " <nl> r " dynamically resizeable " ) : <nl> - ta . split ( [ 1 . 0 ] , [ 1 ] ) . flow . eval ( ) <nl> + if in_graph_mode : <nl> + self . evaluate ( ta . split ( [ 1 . 0 ] , [ 1 ] ) . flow ) <nl> + else : <nl> + self . evaluate ( ta . split ( [ 1 . 0 ] , [ 1 ] ) ) <nl> <nl> def _testTensorArrayWriteGradientAddMultipleAdds ( self , dtype ) : <nl> with self . test_session ( use_gpu = True ) : <nl> def testTensorArrayWriteGradientAddMultipleAdds ( self ) : <nl> dtypes . complex64 , dtypes . complex128 ) : <nl> self . _testTensorArrayWriteGradientAddMultipleAdds ( dtype ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testMultiTensorArray ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> h1 = tensor_array_ops . TensorArray ( <nl> def testMultiTensorArray ( self ) : <nl> w2 = h2 . write ( 0 , 5 . 0 ) <nl> r2 = w2 . read ( 0 ) <nl> r = r1 + r2 <nl> - self . assertAllClose ( 9 . 0 , r . eval ( ) ) <nl> + val = self . evaluate ( r ) <nl> + self . assertAllClose ( 9 . 0 , val ) <nl> <nl> def _testTensorArrayGradientWriteReadType ( self , dtype ) : <nl> with self . test_session ( use_gpu = True ) as session : <nl> def _testTensorArrayGradientWritePackConcatAndRead ( self ) : <nl> def testTensorArrayGradientWritePackConcatAndRead ( self ) : <nl> self . _testTensorArrayGradientWritePackConcatAndRead ( ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArrayReadTwice ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> value = constant_op . constant ( [ [ 1 . 0 , - 1 . 0 ] , [ 10 . 0 , - 10 . 0 ] ] ) <nl> def testTensorArrayReadTwice ( self ) : <nl> <nl> w_readonce = ta_readonce . unstack ( value ) <nl> r0_readonce = w_readonce . read ( 0 ) <nl> - with ops . control_dependencies ( [ r0_readonce ] ) : <nl> - r1_readonce = w_readonce . read ( 0 ) <nl> <nl> with self . assertRaisesOpError ( <nl> r " Could not read index 0 twice because it was cleared after a " <nl> r " previous read \ ( perhaps try setting clear_after_read = false \ ? \ ) " ) : <nl> - r1_readonce . eval ( ) <nl> + with ops . control_dependencies ( [ r0_readonce ] ) : <nl> + self . evaluate ( w_readonce . read ( 0 ) ) <nl> <nl> ta_readtwice = tensor_array_ops . TensorArray ( <nl> dtype = dtypes . float32 , <nl> def testTensorArrayReadTwice ( self ) : <nl> with ops . control_dependencies ( [ r0_readtwice ] ) : <nl> r1_readtwice = w_readtwice . read ( 0 ) <nl> <nl> - self . assertAllEqual ( [ 1 . 0 , - 1 . 0 ] , r1_readtwice . eval ( ) ) <nl> + self . assertAllEqual ( [ 1 . 0 , - 1 . 0 ] , self . evaluate ( r1_readtwice ) ) <nl> <nl> def _testTensorArrayGradientUnpackRead ( self ) : <nl> with self . test_session ( use_gpu = True ) as session : <nl> def _testTensorArrayGradientDynamicUnpackRead ( self ) : <nl> def testTensorArrayGradientDynamicUnpackRead ( self ) : <nl> self . _testTensorArrayGradientDynamicUnpackRead ( ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testCloseTensorArray ( self ) : <nl> - with self . test_session ( use_gpu = True ) as session : <nl> + with self . test_session ( use_gpu = True ) : <nl> ta = tensor_array_ops . TensorArray ( <nl> dtype = dtypes . float32 , tensor_array_name = " foo " , size = 3 ) <nl> - c1 = ta . close ( ) <nl> - session . run ( c1 ) <nl> + self . evaluate ( ta . close ( ) ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testSizeTensorArray ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> ta = tensor_array_ops . TensorArray ( <nl> dtype = dtypes . float32 , tensor_array_name = " foo " , size = 3 ) <nl> s = ta . size ( ) <nl> - self . assertAllEqual ( 3 , s . eval ( ) ) <nl> + self . assertAllEqual ( 3 , self . evaluate ( s ) ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testWriteCloseTensorArray ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> ta = tensor_array_ops . TensorArray ( <nl> def testWriteCloseTensorArray ( self ) : <nl> infer_shape = False ) <nl> w0 = ta . write ( 0 , [ [ 4 . 0 , 5 . 0 ] ] ) <nl> w1 = w0 . write ( 1 , [ 3 . 0 ] ) <nl> - w1 . close ( ) . run ( ) # Expected to run without problems <nl> + self . evaluate ( w1 . close ( ) ) # Expected to run without problems <nl> <nl> def _testWhileLoopWritePackGradients ( self , dynamic_size , dtype ) : <nl> np_dtype = dtype . as_numpy_dtype <nl> - with self . test_session ( use_gpu = True ) as session : <nl> + with self . test_session ( use_gpu = True ) : <nl> + def func ( v0 , state0 , var ) : <nl> + ta = tensor_array_ops . TensorArray ( <nl> + dtype = dtype , <nl> + tensor_array_name = " foo " , <nl> + size = 0 if dynamic_size else 3 , <nl> + dynamic_size = dynamic_size ) <nl> + time_0 = array_ops . identity ( 0 ) <nl> + <nl> + def body ( time , ta_t , state ) : <nl> + sliced = array_ops . slice ( <nl> + v0 , begin = array_ops . stack ( [ time , 0 ] ) , size = [ 1 , - 1 ] ) <nl> + sliced = array_ops . squeeze ( sliced ) <nl> + out = sliced + var + state <nl> + state + = sliced <nl> + ta_t = ta_t . write ( time , out ) <nl> + return ( time + 1 , ta_t , state ) <nl> + <nl> + ( unused_0 , h_final , unused_2 ) = control_flow_ops . while_loop ( <nl> + cond = lambda time , unused_1 , unused_2 : time < 3 , <nl> + body = body , <nl> + loop_vars = ( time_0 , ta , state0 ) , <nl> + shape_invariants = ( time_0 . get_shape ( ) , tensor_shape . unknown_shape ( ) , <nl> + tensor_shape . unknown_shape ( ) ) , <nl> + parallel_iterations = 3 ) <nl> + vout = h_final . stack ( ) <nl> + return vout <nl> + <nl> v0 = array_ops . identity ( np . arange ( 3 * 5 , dtype = np_dtype ) . reshape ( 3 , 5 ) ) <nl> - var = variables . Variable ( np . arange ( 100 , 105 , dtype = np_dtype ) ) <nl> state0 = array_ops . identity ( np . array ( [ 1 ] * 5 , dtype = np_dtype ) ) <nl> - ta = tensor_array_ops . TensorArray ( <nl> - dtype = dtype , <nl> - tensor_array_name = " foo " , <nl> - size = 0 if dynamic_size else 3 , <nl> - dynamic_size = dynamic_size ) <nl> - time_0 = array_ops . identity ( 0 ) <nl> - <nl> - def body ( time , ta_t , state ) : <nl> - sliced = array_ops . slice ( <nl> - v0 , begin = array_ops . stack ( [ time , 0 ] ) , size = [ 1 , - 1 ] ) <nl> - sliced = array_ops . squeeze ( sliced ) <nl> - out = sliced + var + state <nl> - state + = sliced <nl> - ta_t = ta_t . write ( time , out ) <nl> - return ( time + 1 , ta_t , state ) <nl> - <nl> - ( unused_0 , h_final , unused_2 ) = control_flow_ops . while_loop ( <nl> - cond = lambda time , unused_1 , unused_2 : time < 3 , <nl> - body = body , <nl> - loop_vars = ( time_0 , ta , state0 ) , <nl> - shape_invariants = ( time_0 . get_shape ( ) , tensor_shape . unknown_shape ( ) , <nl> - tensor_shape . unknown_shape ( ) ) , <nl> - parallel_iterations = 3 ) <nl> - vout = h_final . stack ( ) <nl> - <nl> + init_val = np . arange ( 100 , 105 , dtype = np_dtype ) <nl> + var = variable_scope . get_variable ( <nl> + " var " , <nl> + shape = init_val . shape , <nl> + dtype = np_dtype , <nl> + initializer = init_ops . constant_initializer ( init_val ) ) <nl> + <nl> + vout = func ( v0 , state0 , var ) <nl> grad_val = - np . arange ( 3 * 5 , dtype = np_dtype ) . reshape ( 3 , 5 ) <nl> - v0_grad = gradients_impl . gradients ( [ vout ] , [ v0 ] , [ grad_val ] ) [ 0 ] <nl> - state0_grad = gradients_impl . gradients ( [ vout ] , [ state0 ] , [ grad_val ] ) [ 0 ] <nl> - var_grad = gradients_impl . gradients ( [ vout ] , [ var ] , [ grad_val ] ) [ 0 ] <nl> + if context . in_graph_mode ( ) : <nl> + v0_grad = gradients_impl . gradients ( [ vout ] , [ v0 ] , [ grad_val ] ) [ 0 ] <nl> + state0_grad = gradients_impl . gradients ( [ vout ] , [ state0 ] , [ grad_val ] ) [ 0 ] <nl> + var_grad = gradients_impl . gradients ( [ vout ] , [ var ] , [ grad_val ] ) [ 0 ] <nl> + variables . global_variables_initializer ( ) . run ( ) <nl> + else : <nl> + grad_fn = backprop . gradients_function ( func ) <nl> + v0_grad , state0_grad , var_grad = grad_fn ( v0 , state0 , var , dy = grad_val ) <nl> <nl> - variables . global_variables_initializer ( ) . run ( ) <nl> state0_t , var_t , v0_t , vout_t , v0_grad_t , var_grad_t , state0_grad_t = ( <nl> - session . run ( [ state0 , var , v0 , vout , v0_grad , var_grad , state0_grad ] ) ) <nl> - just_v0_grad_t , = session . run ( [ v0_grad ] ) <nl> + self . evaluate ( <nl> + ( [ state0 , var , v0 , vout , v0_grad , var_grad , state0_grad ] ) ) ) <nl> + just_v0_grad_t = self . evaluate ( v0_grad ) <nl> <nl> # state = [ state0 | state0 + v0 [ 0 ] | state0 + v0 [ 0 ] + v0 [ 1 ] ] <nl> # vout = [ v0 [ 0 ] + var + state [ 0 ] | <nl> def body ( time , ta_t , state ) : <nl> self . assertAllClose ( grad_val . sum ( axis = 0 ) , var_grad_t ) <nl> self . assertAllClose ( grad_val . sum ( axis = 0 ) , state0_grad_t ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testWhileLoopWritePackGradients ( self ) : <nl> self . _testWhileLoopWritePackGradients ( <nl> dynamic_size = False , dtype = dtypes . float32 ) <nl> def testWhileLoopDynamicWritePackGradients ( self ) : <nl> self . _testWhileLoopWritePackGradients ( <nl> dynamic_size = True , dtype = dtypes . float32 ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testGradSerialTwoLoops ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> - num_steps = 100 <nl> - acc = tensor_array_ops . TensorArray ( <nl> - dtype = dtypes . float32 , <nl> - size = num_steps , <nl> - clear_after_read = False , <nl> - element_shape = tensor_shape . scalar ( ) ) <nl> - i = constant_op . constant ( 0 , name = " i " ) <nl> - x = constant_op . constant ( 2 . 0 , name = " x " ) <nl> + def loop ( x ) : <nl> + num_steps = 100 <nl> + acc = tensor_array_ops . TensorArray ( <nl> + dtype = dtypes . float32 , <nl> + size = num_steps , <nl> + clear_after_read = False , <nl> + element_shape = tensor_shape . scalar ( ) ) <nl> + i = constant_op . constant ( 0 , name = " i " ) <nl> + <nl> + c = lambda i , acc : i < 5 <nl> <nl> - c = lambda i , acc : i < 5 <nl> + def b ( i , acc ) : <nl> + x1 = control_flow_ops . cond ( <nl> + math_ops . equal ( i , 0 ) , lambda : x , <nl> + lambda : math_ops . multiply ( acc . read ( i - 1 ) , 2 . 0 ) ) <nl> + return i + 1 , acc . write ( i , x1 ) <nl> <nl> - def b ( i , acc ) : <nl> - x1 = control_flow_ops . cond ( <nl> - math_ops . equal ( i , 0 ) , lambda : x , <nl> - lambda : math_ops . multiply ( acc . read ( i - 1 ) , 2 . 0 ) ) <nl> - return i + 1 , acc . write ( i , x1 ) <nl> + i1 , acc1 = control_flow_ops . while_loop ( c , b , [ i , acc ] ) <nl> <nl> - i1 , acc1 = control_flow_ops . while_loop ( c , b , [ i , acc ] ) <nl> + z = constant_op . constant ( 0 . 0 ) <nl> <nl> - z = constant_op . constant ( 0 . 0 ) <nl> + def fn ( i , acc ) : <nl> + return i + 1 , acc . write ( i , z ) <nl> <nl> - def fn ( i , acc ) : <nl> - return i + 1 , acc . write ( i , z ) <nl> + _ , acc2 = control_flow_ops . while_loop ( lambda i , acc : i < num_steps , fn , <nl> + [ i1 , acc1 ] ) <nl> <nl> - _ , acc2 = control_flow_ops . while_loop ( lambda i , acc : i < num_steps , fn , <nl> - [ i1 , acc1 ] ) <nl> + r = acc2 . stack ( ) <nl> + return r <nl> <nl> - r = acc2 . stack ( ) <nl> - grad = gradients_impl . gradients ( r , [ x ] ) [ 0 ] <nl> - self . assertAllClose ( 31 . 0 , grad . eval ( ) ) <nl> + x = constant_op . constant ( 2 . 0 , name = " x " ) <nl> + if context . in_graph_mode ( ) : <nl> + grad = gradients_impl . gradients ( loop ( x ) , [ x ] ) [ 0 ] <nl> + else : <nl> + grad = backprop . gradients_function ( loop ) ( x ) [ 0 ] <nl> + self . assertAllClose ( 31 . 0 , self . evaluate ( grad ) ) <nl> <nl> def testSumOfTwoReadVariablesWithoutRepeatGrad ( self ) : <nl> with self . test_session ( use_gpu = True ) as session : <nl> def testPartlyUnknownShape ( self ) : <nl> r5 = w5 . read ( 0 ) <nl> self . assertAllEqual ( [ 5 , 4 , 2 , 3 ] , r5 . get_shape ( ) . as_list ( ) ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def _testUnpackShape ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> ta = tensor_array_ops . TensorArray ( <nl> def _testUnpackShape ( self ) : <nl> <nl> c1 = constant_op . constant ( [ 4 . 0 , 5 . 0 ] ) <nl> w1 = w0 . write ( 3 , c1 ) <nl> - r1 = w1 . read ( 0 ) <nl> - self . assertAllEqual ( c1 . get_shape ( ) , r1 . get_shape ( ) ) <nl> + <nl> + with self . assertRaisesOpError ( <nl> + r " Could not read index 0 twice because it was cleared after a " <nl> + r " previous read \ ( perhaps try setting clear_after_read = false \ ? \ ) " ) : <nl> + with ops . control_dependencies ( [ r0 ] ) : <nl> + self . evaluate ( w1 . read ( 0 ) ) <nl> + <nl> + r1 = w1 . read ( 1 ) <nl> + self . assertAllEqual ( c1 . get_shape ( ) , r1 . shape ) <nl> <nl> c2 = constant_op . constant ( [ 4 . 0 , 5 . 0 , 6 . 0 ] ) <nl> with self . assertRaises ( ValueError ) : <nl> def _testUnpackShape ( self ) : <nl> def testUnpackShape ( self ) : <nl> self . _testUnpackShape ( ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testSplitShape ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> ta = tensor_array_ops . TensorArray ( <nl> def testSplitShape ( self ) : <nl> infer_shape = True ) <nl> w0 = ta1 . split ( value , [ 1 , 2 ] ) <nl> r0 = w0 . read ( 0 ) <nl> - self . assertEqual ( r0 . get_shape ( ) . ndims , None ) <nl> - self . assertEqual ( <nl> - tensor_shape . TensorShape ( <nl> - ta1 . handle . op . get_attr ( " element_shape " ) ) . ndims , None ) <nl> + if context . in_graph_mode ( ) : <nl> + self . assertEqual ( r0 . get_shape ( ) . ndims , None ) <nl> + self . assertEqual ( <nl> + tensor_shape . TensorShape ( <nl> + ta1 . handle . op . get_attr ( " element_shape " ) ) . ndims , None ) <nl> + else : <nl> + self . assertEqual ( ( 1 , 2 ) , r0 . get_shape ( ) ) <nl> + self . assertEqual ( ( 2 , 2 ) , w0 . read ( 1 ) . get_shape ( ) ) <nl> <nl> def testWriteUnknownShape ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> def _testTensorArrayEvalEmpty ( self ) : <nl> def testTensorArrayEvalEmpty ( self ) : <nl> self . _testTensorArrayEvalEmpty ( ) <nl> <nl> + # this test is ill - defined for Eager mode mmm unpacking an empty tensor <nl> + # gives an empty list / there is not equivalent of " mark_used " in Eager <nl> def _testTensorArrayEvalEmptyWithDefault ( self ) : <nl> with self . test_session ( use_gpu = True ) : <nl> ta = tensor_array_ops . TensorArray ( <nl> def testTensorArrayScatterReadAndGradients ( self ) : <nl> self . assertAllEqual ( [ 10 . 0 , - 10 . 0 ] , read_vals [ 1 ] ) <nl> self . assertAllEqual ( [ [ 2 . 0 , 3 . 0 ] , [ 4 . 0 , 5 . 0 ] ] , grad_vals [ 0 ] ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArrayWriteGatherAndGradients ( self ) : <nl> with self . test_session ( use_gpu = True ) as session : <nl> ta = tensor_array_ops . TensorArray ( <nl> def testTensorArrayWriteGatherAndGradients ( self ) : <nl> size = 0 , <nl> dynamic_size = True ) <nl> <nl> - values = constant_op . constant ( [ [ 1 . 0 * x , - 1 . 0 * x ] for x in range ( 10 ) ] ) <nl> - indices = constant_op . constant ( [ 1 , 8 ] ) <nl> - <nl> - w = ta . unstack ( values ) <nl> - g = w . gather ( indices ) <nl> + def func ( values ) : <nl> + indices = constant_op . constant ( [ 1 , 8 ] ) <nl> + w = ta . unstack ( values ) <nl> + g = w . gather ( indices ) <nl> + return g <nl> <nl> + values = constant_op . constant ( [ [ 1 . 0 * x , - 1 . 0 * x ] for x in range ( 10 ) ] ) <nl> + g = func ( values ) <nl> + grad_ys = [ [ [ 2 . 0 , 3 . 0 ] , [ 4 . 0 , 5 . 0 ] ] ] <nl> # Test combined gradients + aggregation of read ( 0 ) <nl> - grad = gradients_impl . gradients ( <nl> - ys = [ g ] , xs = [ values ] , grad_ys = [ [ [ 2 . 0 , 3 . 0 ] , [ 4 . 0 , 5 . 0 ] ] ] ) <nl> - g_vals , grad_vals = session . run ( [ [ g ] , grad ] ) <nl> + if context . in_graph_mode ( ) : <nl> + grad = gradients_impl . gradients ( ys = [ g ] , xs = [ values ] , grad_ys = grad_ys ) <nl> + g_vals , grad_vals = session . run ( [ [ g ] , grad ] ) <nl> + else : <nl> + g_vals = [ g ] <nl> + grad_vals = backprop . gradients_function ( func ) ( <nl> + values , dy = constant_op . constant ( grad_ys [ 0 ] , dtype = dtypes . float32 ) ) <nl> <nl> # Gradients for 8 of the 10 unread components are zero . <nl> expected_grad = np . zeros ( ( 10 , 2 ) ) <nl> def _body ( i , ta_i ) : <nl> self . assertFalse ( <nl> [ s for s in dev_stats [ d ] if " / TensorArray " in s . node_name ] ) <nl> <nl> + @ test_util . run_in_graph_and_eager_modes ( ) <nl> def testTensorArrayIdentity ( self ) : <nl> - with self . test_session ( use_gpu = True ) as session : <nl> + with self . test_session ( use_gpu = True ) : <nl> ta0 = tensor_array_ops . TensorArray ( dtype = dtypes . float32 , size = 2 , <nl> infer_shape = False ) <nl> ta1 = tensor_array_ops . TensorArray ( dtype = dtypes . int32 , size = 4 , <nl> def testTensorArrayIdentity ( self ) : <nl> ta0 = ta0 . write ( 0 , 0 . ) <nl> ta1 = ta1 . write ( 0 , 1 ) <nl> <nl> - v0 = variables . Variable ( 0 ) <nl> - v1 = variables . Variable ( 0 ) <nl> + v0 = variable_scope . get_variable ( <nl> + " v0 " , shape = ( ) , initializer = init_ops . zeros_initializer ( ) ) <nl> + v1 = variable_scope . get_variable ( <nl> + " v1 " , shape = ( ) , initializer = init_ops . zeros_initializer ( ) ) <nl> <nl> with ops . control_dependencies ( [ v0 . assign_add ( 1 ) ] ) : <nl> ta0 = ta0 . identity ( ) <nl> def testTensorArrayIdentity ( self ) : <nl> # Tests correct properties on new TensorArrays . <nl> self . assertEqual ( dtypes . float32 , ta0 . dtype ) <nl> self . assertEqual ( dtypes . int32 , ta1 . dtype ) <nl> - self . assertEqual ( tensor_shape . unknown_shape ( ) , read0 . get_shape ( ) ) <nl> + if context . in_graph_mode ( ) : <nl> + self . assertEqual ( tensor_shape . unknown_shape ( ) , read0 . get_shape ( ) ) <nl> + else : <nl> + self . assertEqual ( tensor_shape . scalar ( ) , read1 . get_shape ( ) ) <nl> self . assertEqual ( tensor_shape . scalar ( ) , read1 . get_shape ( ) ) <nl> <nl> - variables . global_variables_initializer ( ) . run ( ) <nl> + if context . in_graph_mode ( ) : <nl> + variables . global_variables_initializer ( ) . run ( ) <nl> <nl> - read0_v , read1_v , size0_v , size1_v = session . run ( <nl> - ( read0 , read1 , size0 , size1 ) ) <nl> + read0_v , read1_v , size0_v , size1_v = self . evaluate ( ( read0 , read1 , size0 , <nl> + size1 ) ) <nl> <nl> # Tests that the control dependencies was added and executed . <nl> - self . assertEqual ( 1 , v0 . eval ( ) ) <nl> - self . assertEqual ( 1 , v1 . eval ( ) ) <nl> + self . assertEqual ( 1 , self . evaluate ( v0 ) ) <nl> + self . assertEqual ( 1 , self . evaluate ( v1 ) ) <nl> <nl> # Tests correct TensorArray . <nl> self . assertEqual ( read0_v , 0 ) <nl> mmm a / tensorflow / python / layers / base . py <nl> ppp b / tensorflow / python / layers / base . py <nl> def __init__ ( self , trainable = True , name = None , dtype = None , <nl> self . _inbound_nodes = [ ] <nl> self . _outbound_nodes = [ ] <nl> <nl> - # Determine layer name ( non - unique ) . <nl> - if isinstance ( name , vs . VariableScope ) : <nl> - base_name = name . name <nl> - else : <nl> - base_name = name <nl> - self . _name = name <nl> - if not name : <nl> - base_name = _to_snake_case ( self . __class__ . __name__ ) <nl> - self . _name = _unique_layer_name ( base_name ) <nl> - self . _base_name = base_name <nl> + self . _init_set_name ( name ) <nl> <nl> # Determine variable scope . <nl> scope = kwargs . get ( ' _scope ' ) <nl> def __init__ ( self , trainable = True , name = None , dtype = None , <nl> batch_size = kwargs . get ( ' batch_size ' ) <nl> self . _batch_input_shape = ( batch_size , ) + tuple ( kwargs [ ' input_shape ' ] ) <nl> <nl> + def _init_set_name ( self , name ) : <nl> + # Determine layer name ( non - unique ) . <nl> + if isinstance ( name , vs . VariableScope ) : <nl> + base_name = name . name <nl> + else : <nl> + base_name = name <nl> + self . _name = name <nl> + if not name : <nl> + self . _name , base_name = self . _make_unique_name ( ) <nl> + self . _base_name = base_name <nl> + <nl> @ property <nl> def dtype ( self ) : <nl> return self . _dtype <nl> def _compute_output_shape ( self , input_shape ) : <nl> " " " <nl> return input_shape <nl> <nl> + def _make_unique_name ( self , name_uid_map = None , avoid_names = None ) : <nl> + base_name = _to_snake_case ( self . __class__ . __name__ ) <nl> + name = _unique_layer_name ( base_name , name_uid_map = name_uid_map , <nl> + avoid_names = avoid_names ) <nl> + return ( name , base_name ) <nl> + <nl> def _set_scope ( self , scope = None ) : <nl> if self . _scope is None : <nl> # If constructed with _scope = None , lazy setting of scope . <nl> def _set_scope ( self , scope = None ) : <nl> <nl> def add_variable ( self , name , shape , dtype = None , <nl> initializer = None , regularizer = None , <nl> - trainable = True , constraint = None ) : <nl> + trainable = True , constraint = None , <nl> + partitioner = None ) : <nl> " " " Adds a new variable to the layer , or gets an existing one ; returns it . <nl> <nl> Arguments : <nl> def add_variable ( self , name , shape , dtype = None , <nl> " trainable_variables " ( e . g . variables , biases ) <nl> or " non_trainable_variables " ( e . g . BatchNorm mean , stddev ) . <nl> constraint : constraint instance ( callable ) . <nl> + partitioner : ( optional ) partitioner instance ( callable ) . If <nl> + provided , when the requested variable is created it will be split <nl> + into multiple partitions according to ` partitioner ` . In this case , <nl> + an instance of ` PartitionedVariable ` is returned . Available <nl> + partitioners include ` tf . fixed_size_partitioner ` and <nl> + ` tf . variable_axis_size_partitioner ` . For more details , see the <nl> + documentation of ` tf . get_variable ` and the " Variable Partitioners <nl> + and Sharding " section of the API guide . <nl> <nl> Returns : <nl> - The created variable . <nl> + The created variable . Usually either a ` Variable ` or ` ResourceVariable ` <nl> + instance . If ` partitioner ` is not ` None ` , a ` PartitionedVariable ` <nl> + instance is returned . <nl> <nl> Raises : <nl> RuntimeError : If called in Eager mode with regularizers . <nl> def add_variable ( self , name , shape , dtype = None , <nl> initializer = initializer , <nl> dtype = dtypes . as_dtype ( dtype ) , <nl> constraint = constraint , <nl> - trainable = trainable and self . trainable ) <nl> + trainable = trainable and self . trainable , <nl> + partitioner = partitioner ) <nl> if variable in existing_variables : <nl> return variable <nl> if regularizer : <nl> def __init__ ( self , inputs , outputs , name = None ) : # pylint : disable = super - init - no <nl> # TODO ( fchollet ) : check that all inputs and outputs are DeferredTensors . <nl> pass <nl> <nl> - # Set layer name and scope <nl> - if isinstance ( name , vs . VariableScope ) : <nl> - base_name = name . name <nl> - else : <nl> - base_name = name <nl> - self . _name = name <nl> - if not name : <nl> - base_name = _to_snake_case ( self . __class__ . __name__ ) <nl> - self . _name = _unique_layer_name ( base_name ) <nl> + self . _init_set_name ( name ) <nl> self . _activity_regularizer = None <nl> - with vs . variable_scope ( None , default_name = base_name ) as captured_scope : <nl> + with vs . variable_scope ( <nl> + None , default_name = self . _base_name ) as captured_scope : <nl> self . _scope = captured_scope <nl> - self . _base_name = base_name <nl> call_fn_args = estimator_util . fn_args ( self . call ) <nl> self . _compute_previous_mask = ( ' mask ' in call_fn_args or <nl> hasattr ( self , ' compute_mask ' ) ) <nl> def _collect_previous_mask ( input_tensors ) : <nl> PER_GRAPH_LAYER_NAME_UIDS = weakref . WeakKeyDictionary ( ) <nl> <nl> <nl> - def _unique_layer_name ( name ) : <nl> + def _get_default_graph_uid_map ( ) : <nl> + graph = ops . get_default_graph ( ) <nl> + name_uid_map = PER_GRAPH_LAYER_NAME_UIDS . get ( graph , None ) <nl> + if name_uid_map is None : <nl> + name_uid_map = collections . defaultdict ( int ) <nl> + PER_GRAPH_LAYER_NAME_UIDS [ graph ] = name_uid_map <nl> + return name_uid_map <nl> + <nl> + <nl> + def _unique_layer_name ( name , name_uid_map = None , avoid_names = None ) : <nl> " " " Makes a layer name ( or arbitrary string ) unique within a TensorFlow graph . <nl> <nl> Arguments : <nl> name : String name to make unique . <nl> + name_uid_map : An optional defaultdict ( int ) to use when creating unique <nl> + names . If None ( default ) , uses a per - Graph dictionary . <nl> + avoid_names : An optional set or dict with names which should not be used . If <nl> + None ( default ) does not avoid any names . <nl> <nl> Returns : <nl> Unique string name . <nl> def _unique_layer_name ( name ) : <nl> _unique_layer_name ( ' dense ' ) # dense_2 <nl> ` ` ` <nl> " " " <nl> - graph = ops . get_default_graph ( ) <nl> - if graph not in PER_GRAPH_LAYER_NAME_UIDS : <nl> - PER_GRAPH_LAYER_NAME_UIDS [ graph ] = collections . defaultdict ( int ) <nl> - layer_name_uids = PER_GRAPH_LAYER_NAME_UIDS [ graph ] <nl> - layer_name_uids [ name ] + = 1 <nl> - return name + ' _ ' + str ( layer_name_uids [ name ] ) <nl> + if name_uid_map is None : <nl> + name_uid_map = _get_default_graph_uid_map ( ) <nl> + if avoid_names is None : <nl> + avoid_names = set ( ) <nl> + proposed_name = None <nl> + while proposed_name is None or proposed_name in avoid_names : <nl> + name_uid_map [ name ] + = 1 <nl> + proposed_name = name + ' _ ' + str ( name_uid_map [ name ] ) <nl> + return proposed_name <nl> mmm a / tensorflow / python / layers / convolutional . py <nl> ppp b / tensorflow / python / layers / convolutional . py <nl> def conv1d ( inputs , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . Conv1D ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> def conv2d ( inputs , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . Conv2D ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> def conv3d ( inputs , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . Conv3D ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> def separable_conv2d ( inputs , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . SeparableConv2d ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> def conv2d_transpose ( inputs , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . Conv2DTranspose ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> def conv3d_transpose ( inputs , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . Conv3DTranspose ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> mmm a / tensorflow / python / layers / core . py <nl> ppp b / tensorflow / python / layers / core . py <nl> def dense ( <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . Dense ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> def dropout ( inputs , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . Dropout ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> mmm a / tensorflow / python / layers / maxout . py <nl> ppp b / tensorflow / python / layers / maxout . py <nl> def maxout ( inputs , num_units , axis = - 1 , name = None ) : <nl> <nl> Raises : <nl> ValueError : if num_units is not multiple of number of features . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . MaxOut ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> mmm a / tensorflow / python / layers / normalization . py <nl> ppp b / tensorflow / python / layers / normalization . py <nl> def _renorm_correction_and_moments ( self , mean , variance , training ) : <nl> if dmax is not None : <nl> d = math_ops . maximum ( d , - dmax ) <nl> d = math_ops . minimum ( d , dmax ) <nl> - # When not training , use r = 1 , d = 0 , and decay = 1 meaning no updates . <nl> + # When not training , use r = 1 , d = 0 . <nl> r = utils . smart_cond ( training , lambda : r , lambda : array_ops . ones_like ( r ) ) <nl> d = utils . smart_cond ( training , lambda : d , lambda : array_ops . zeros_like ( d ) ) <nl> - decay = utils . smart_cond ( training , lambda : self . renorm_momentum , lambda : 1 . ) <nl> <nl> def _update_renorm_variable ( var , weight , value ) : <nl> " " " Updates a moving average and weight , returns the unbiased value . " " " <nl> - # Update the variables without zero debiasing . The debiasing will be <nl> - # accomplished by dividing the exponential moving average by the weight . <nl> - # For example , after a single update , the moving average would be <nl> - # ( 1 - decay ) * value . and the weight will be 1 - decay , with their ratio <nl> - # giving value . <nl> - # Make sure the weight is not updated until before r and d computation . <nl> value = array_ops . identity ( value ) <nl> - with ops . control_dependencies ( [ value ] ) : <nl> - weight_value = array_ops . constant ( 1 . , dtype = weight . dtype ) <nl> - new_var = moving_averages . assign_moving_average ( <nl> - var , value , decay , zero_debias = False ) <nl> - new_weight = moving_averages . assign_moving_average ( <nl> - weight , weight_value , decay , zero_debias = False ) <nl> - return new_var / new_weight <nl> + def _do_update ( ) : <nl> + # Update the variables without zero debiasing . The debiasing will be <nl> + # accomplished by dividing the exponential moving average by the weight . <nl> + # For example , after a single update , the moving average would be <nl> + # ( 1 - decay ) * value . and the weight will be 1 - decay , with their ratio <nl> + # giving the value . <nl> + # Make sure the weight is not updated until before r and d computation . <nl> + with ops . control_dependencies ( [ value ] ) : <nl> + weight_value = array_ops . constant ( 1 . , dtype = weight . dtype ) <nl> + new_var = moving_averages . assign_moving_average ( <nl> + var , value , self . renorm_momentum , zero_debias = False ) <nl> + new_weight = moving_averages . assign_moving_average ( <nl> + weight , weight_value , self . renorm_momentum , zero_debias = False ) <nl> + return new_var / new_weight <nl> + def _fake_update ( ) : <nl> + return array_ops . identity ( var ) <nl> + return utils . smart_cond ( training , _do_update , _fake_update ) <nl> <nl> with ops . colocate_with ( self . moving_mean ) : <nl> new_mean = _update_renorm_variable ( self . renorm_mean , <nl> def _compose_transforms ( scale , offset , then_scale , then_offset ) : <nl> else : <nl> new_mean , new_variance = mean , variance <nl> <nl> - # Update moving averages when training , and prevent updates otherwise . <nl> - decay = utils . smart_cond ( training , lambda : self . momentum , lambda : 1 . ) <nl> if self . virtual_batch_size is not None : <nl> # This isn ' t strictly correct since in ghost batch norm , you are <nl> # supposed to sequentially update the moving_mean and moving_variance <nl> def _compose_transforms ( scale , offset , then_scale , then_offset ) : <nl> new_variance = math_ops . reduce_mean ( new_variance , <nl> axis = 1 , keep_dims = True ) <nl> <nl> - mean_update = moving_averages . assign_moving_average ( <nl> - self . moving_mean , new_mean , decay , zero_debias = False ) <nl> - variance_update = moving_averages . assign_moving_average ( <nl> - self . moving_variance , new_variance , decay , zero_debias = False ) <nl> + def _do_update ( var , value ) : <nl> + return moving_averages . assign_moving_average ( <nl> + var , value , self . momentum , zero_debias = False ) <nl> + <nl> + mean_update = utils . smart_cond ( <nl> + training , <nl> + lambda : _do_update ( self . moving_mean , new_mean ) , <nl> + lambda : self . moving_mean ) <nl> + variance_update = utils . smart_cond ( <nl> + training , <nl> + lambda : _do_update ( self . moving_variance , new_variance ) , <nl> + lambda : self . moving_variance ) <nl> if context . in_graph_mode ( ) : <nl> self . add_update ( mean_update , inputs = inputs ) <nl> self . add_update ( variance_update , inputs = inputs ) <nl> def batch_normalization ( inputs , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . BatchNormalization ` <nl> + instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> mmm a / tensorflow / python / layers / pooling . py <nl> ppp b / tensorflow / python / layers / pooling . py <nl> def average_pooling1d ( inputs , pool_size , strides , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . AveragePooling1D ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> def max_pooling1d ( inputs , pool_size , strides , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . MaxPooling1D ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> def average_pooling2d ( inputs , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . AveragePooling2D ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> def max_pooling2d ( inputs , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . MaxPooling2D ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> def average_pooling3d ( inputs , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . AveragePooling3D ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> def max_pooling3d ( inputs , <nl> <nl> Raises : <nl> ValueError : if eager execution is enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . Use ` tf . layers . MaxPooling3D ` instead . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> mmm a / tensorflow / python / ops / array_ops . py <nl> ppp b / tensorflow / python / ops / array_ops . py <nl> def where ( condition , x = None , y = None , name = None ) : <nl> " " " <nl> if x is None and y is None : <nl> with ops . name_scope ( name , " Where " , [ condition ] ) as name : <nl> - # Temporarily create an old style WhereOp nodedef + Operation without the <nl> - # attribute " T " . <nl> - # TODO ( b / 67720963 ) : Roll this back when the issue is resolved . <nl> - condition = gen_math_ops . cast ( condition , dtypes . bool ) <nl> - output = gen_array_ops . where ( input = condition , name = name ) <nl> - if context . in_graph_mode ( ) : <nl> - output . op . _node_def . attr . clear ( ) <nl> - return output <nl> + condition = ops . convert_to_tensor ( <nl> + condition , preferred_dtype = dtypes . bool , name = " condition " ) <nl> + return gen_array_ops . where ( input = condition , name = name ) <nl> elif x is not None and y is not None : <nl> return gen_math_ops . _select ( condition = condition , t = x , e = y , name = name ) <nl> else : <nl> mmm a / tensorflow / python / ops / metrics_impl . py <nl> ppp b / tensorflow / python / ops / metrics_impl . py <nl> def _local_variable ( initial_value , validate_shape = True , name = None ) : <nl> <nl> <nl> def _remove_squeezable_dimensions ( predictions , labels , weights ) : <nl> - " " " Internal version of ` remove_squeezable_dimensions ` which handles weights . <nl> + " " " Squeeze or expand last dim if needed . <nl> <nl> - Squeezes ` predictions ` and ` labels ` if their rank differs by 1 . <nl> - Squeezes ` weights ` if its rank is 1 more than the new rank of ` predictions ` <nl> + Squeezes last dim of ` predictions ` or ` labels ` if their rank differs by 1 <nl> + ( using confusion_matrix . remove_squeezable_dimensions ) . <nl> + Squeezes or expands last dim of ` weights ` if its rank differs by 1 from the <nl> + new rank of ` predictions ` . <nl> + <nl> + If ` weights ` is scalar , it is kept scalar . <nl> <nl> This will use static shape if available . Otherwise , it will add graph <nl> operations , which could result in a performance hit . <nl> def _remove_squeezable_dimensions ( predictions , labels , weights ) : <nl> Args : <nl> predictions : Predicted values , a ` Tensor ` of arbitrary dimensions . <nl> labels : Optional label ` Tensor ` whose dimensions match ` predictions ` . <nl> - weights : Optional weight ` Tensor ` . It will be squeezed if its rank is 1 <nl> - more than the new rank of ` predictions ` <nl> + weights : Optional weight scalar or ` Tensor ` whose dimensions match <nl> + ` predictions ` . <nl> <nl> Returns : <nl> - Tuple of ` predictions ` , ` labels ` and ` weights ` , possibly with the last <nl> - dimension squeezed . <nl> + Tuple of ` predictions ` , ` labels ` and ` weights ` . Each of them possibly has <nl> + the last dimension squeezed , ` weights ` could be extended by one dimension . <nl> " " " <nl> predictions = ops . convert_to_tensor ( predictions ) <nl> if labels is not None : <nl> mmm a / tensorflow / python / ops / resource_variable_ops . py <nl> ppp b / tensorflow / python / ops / resource_variable_ops . py <nl> <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import gen_array_ops <nl> from tensorflow . python . ops import gen_resource_variable_ops <nl> + from tensorflow . python . ops import gen_state_ops <nl> from tensorflow . python . ops import variables <nl> # go / tf - wildcard - import <nl> # pylint : disable = wildcard - import <nl> def _eager_safe_variable_handle ( shape , dtype , shared_name , name , graph_mode ) : <nl> return handle <nl> <nl> <nl> + def shape_safe_assign_variable_handle ( handle , shape , value , name = None ) : <nl> + " " " Helper that checks shape compatibility and assigns variable . " " " <nl> + value_tensor = ops . convert_to_tensor ( value ) <nl> + shape . assert_is_compatible_with ( value_tensor . shape ) <nl> + return gen_resource_variable_ops . assign_variable_op ( handle , <nl> + value_tensor , <nl> + name = name ) <nl> + <nl> + <nl> class ResourceVariable ( variables . Variable ) : <nl> " " " Variable based on resource handles . <nl> <nl> def numpy ( self ) : <nl> " numpy ( ) is only available when eager execution is enabled . " ) <nl> return self . read_value ( ) . numpy ( ) <nl> <nl> + def count_up_to ( self , limit ) : <nl> + " " " Increments this variable until it reaches ` limit ` . <nl> + <nl> + When that Op is run it tries to increment the variable by ` 1 ` . If <nl> + incrementing the variable would bring it above ` limit ` then the Op raises <nl> + the exception ` OutOfRangeError ` . <nl> + <nl> + If no error is raised , the Op outputs the value of the variable before <nl> + the increment . <nl> + <nl> + This is essentially a shortcut for ` count_up_to ( self , limit ) ` . <nl> + <nl> + Args : <nl> + limit : value at which incrementing the variable raises an error . <nl> + <nl> + Returns : <nl> + A ` Tensor ` that will hold the variable value before the increment . If no <nl> + other Op modifies this variable , the values produced will all be <nl> + distinct . <nl> + " " " <nl> + return gen_state_ops . resource_count_up_to ( self . handle , limit = limit , <nl> + T = self . dtype ) <nl> + <nl> def _set_save_slice_info ( self , save_slice_info ) : <nl> " " " Sets the slice info for this ` ResourceVariable ` . <nl> <nl> def assign_add ( self , delta , use_locking = None , name = None ) : <nl> return self . read_value ( ) <nl> <nl> def assign ( self , value , use_locking = None , name = None ) : <nl> + value_tensor = ops . convert_to_tensor ( value , dtype = self . dtype ) <nl> + self . _shape . assert_is_compatible_with ( value_tensor . shape ) <nl> with ops . control_dependencies ( [ <nl> gen_resource_variable_ops . assign_variable_op ( <nl> self . handle , <nl> - ops . convert_to_tensor ( value , dtype = self . dtype ) , <nl> + value_tensor , <nl> name = name ) <nl> ] ) : <nl> return self . read_value ( ) <nl> mmm a / tensorflow / python / ops / rnn_cell_impl . py <nl> ppp b / tensorflow / python / ops / rnn_cell_impl . py <nl> def __init__ ( self , num_units , forget_bias = 1 . 0 , <nl> if not state_is_tuple : <nl> logging . warn ( " % s : Using a concatenated state is slower and will soon be " <nl> " deprecated . Use state_is_tuple = True . " , self ) <nl> + <nl> + # Inputs must be 2 - dimensional . <nl> + self . input_spec = base_layer . InputSpec ( ndim = 2 ) <nl> + <nl> self . _num_units = num_units <nl> self . _forget_bias = forget_bias <nl> self . _state_is_tuple = state_is_tuple <nl> def output_size ( self ) : <nl> return self . _num_units <nl> <nl> def build ( self , inputs_shape ) : <nl> - if inputs_shape . ndims ! = 2 : <nl> - raise ValueError ( " Expected inputs . shape to be rank 2 , saw shape : % s " <nl> - % inputs_shape ) <nl> if inputs_shape [ 1 ] . value is None : <nl> raise ValueError ( " Expected inputs . shape [ - 1 ] to be known , saw shape : % s " <nl> % inputs_shape ) <nl> def call ( self , inputs , state ) : <nl> return new_h , new_state <nl> <nl> <nl> - class LSTMCell ( RNNCell ) : <nl> + class LSTMCell ( _LayerRNNCell ) : <nl> " " " Long short - term memory unit ( LSTM ) recurrent network cell . <nl> <nl> The default non - peephole implementation is based on : <nl> def __init__ ( self , num_units , <nl> initializer = None , num_proj = None , proj_clip = None , <nl> num_unit_shards = None , num_proj_shards = None , <nl> forget_bias = 1 . 0 , state_is_tuple = True , <nl> - activation = None , reuse = None ) : <nl> + activation = None , reuse = None , name = None ) : <nl> " " " Initialize the parameters for an LSTM cell . <nl> <nl> Args : <nl> def __init__ ( self , num_units , <nl> reuse : ( optional ) Python boolean describing whether to reuse variables <nl> in an existing scope . If not ` True ` , and the existing scope already has <nl> the given variables , an error is raised . <nl> + name : String , the name of the layer . Layers with the same name will <nl> + share weights , but to avoid mistakes we require reuse = True in such <nl> + cases . <nl> <nl> - When restoring from CudnnLSTM - trained checkpoints , must use <nl> - CudnnCompatibleLSTMCell instead . <nl> + When restoring from CudnnLSTM - trained checkpoints , use <nl> + ` CudnnCompatibleLSTMCell ` instead . <nl> " " " <nl> - super ( LSTMCell , self ) . __init__ ( _reuse = reuse ) <nl> + super ( LSTMCell , self ) . __init__ ( _reuse = reuse , name = name ) <nl> if not state_is_tuple : <nl> logging . warn ( " % s : Using a concatenated state is slower and will soon be " <nl> " deprecated . Use state_is_tuple = True . " , self ) <nl> def __init__ ( self , num_units , <nl> " deprecated and will be removed in Jan 2017 . " <nl> " Use a variable scope with a partitioner instead . " , self ) <nl> <nl> + # Inputs must be 2 - dimensional . <nl> + self . input_spec = base_layer . InputSpec ( ndim = 2 ) <nl> + <nl> self . _num_units = num_units <nl> self . _use_peepholes = use_peepholes <nl> self . _cell_clip = cell_clip <nl> def __init__ ( self , num_units , <nl> LSTMStateTuple ( num_units , num_units ) <nl> if state_is_tuple else 2 * num_units ) <nl> self . _output_size = num_units <nl> - self . _linear1 = None <nl> - self . _linear2 = None <nl> - if self . _use_peepholes : <nl> - self . _w_f_diag = None <nl> - self . _w_i_diag = None <nl> - self . _w_o_diag = None <nl> <nl> @ property <nl> def state_size ( self ) : <nl> def state_size ( self ) : <nl> def output_size ( self ) : <nl> return self . _output_size <nl> <nl> + def build ( self , inputs_shape ) : <nl> + if inputs_shape [ 1 ] . value is None : <nl> + raise ValueError ( " Expected inputs . shape [ - 1 ] to be known , saw shape : % s " <nl> + % inputs_shape ) <nl> + <nl> + input_depth = inputs_shape [ 1 ] . value <nl> + h_depth = self . _num_units if self . _num_proj is None else self . _num_proj <nl> + maybe_partitioner = ( <nl> + partitioned_variables . fixed_size_partitioner ( self . _num_unit_shards ) <nl> + if self . _num_unit_shards is not None <nl> + else None ) <nl> + self . _kernel = self . add_variable ( <nl> + _WEIGHTS_VARIABLE_NAME , <nl> + shape = [ input_depth + h_depth , 4 * self . _num_units ] , <nl> + initializer = self . _initializer , <nl> + partitioner = maybe_partitioner ) <nl> + self . _bias = self . add_variable ( <nl> + _BIAS_VARIABLE_NAME , <nl> + shape = [ 4 * self . _num_units ] , <nl> + initializer = init_ops . constant_initializer ( 0 . 0 , dtype = self . dtype ) ) <nl> + if self . _use_peepholes : <nl> + self . _w_f_diag = self . add_variable ( " w_f_diag " , shape = [ self . _num_units ] , <nl> + initializer = self . _initializer ) <nl> + self . _w_i_diag = self . add_variable ( " w_i_diag " , shape = [ self . _num_units ] , <nl> + initializer = self . _initializer ) <nl> + self . _w_o_diag = self . add_variable ( " w_o_diag " , shape = [ self . _num_units ] , <nl> + initializer = self . _initializer ) <nl> + <nl> + if self . _num_proj is not None : <nl> + maybe_proj_partitioner = ( <nl> + partitioned_variables . fixed_size_partitioner ( self . _num_proj_shards ) <nl> + if self . _num_proj_shards is not None <nl> + else None ) <nl> + self . _proj_kernel = self . add_variable ( <nl> + " projection / % s " % _WEIGHTS_VARIABLE_NAME , <nl> + shape = [ self . _num_units , self . _num_proj ] , <nl> + initializer = self . _initializer , <nl> + partitioner = maybe_proj_partitioner ) <nl> + <nl> + self . _built = True <nl> + <nl> def call ( self , inputs , state ) : <nl> " " " Run one step of LSTM . <nl> <nl> def call ( self , inputs , state ) : <nl> c_prev = array_ops . slice ( state , [ 0 , 0 ] , [ - 1 , self . _num_units ] ) <nl> m_prev = array_ops . slice ( state , [ 0 , self . _num_units ] , [ - 1 , num_proj ] ) <nl> <nl> - dtype = inputs . dtype <nl> input_size = inputs . get_shape ( ) . with_rank ( 2 ) [ 1 ] <nl> if input_size . value is None : <nl> raise ValueError ( " Could not infer input size from inputs . get_shape ( ) [ - 1 ] " ) <nl> - if self . _linear1 is None : <nl> - scope = vs . get_variable_scope ( ) <nl> - with vs . variable_scope ( <nl> - scope , initializer = self . _initializer ) as unit_scope : <nl> - if self . _num_unit_shards is not None : <nl> - unit_scope . set_partitioner ( <nl> - partitioned_variables . fixed_size_partitioner ( <nl> - self . _num_unit_shards ) ) <nl> - self . _linear1 = _Linear ( [ inputs , m_prev ] , 4 * self . _num_units , True ) <nl> <nl> # i = input_gate , j = new_input , f = forget_gate , o = output_gate <nl> - lstm_matrix = self . _linear1 ( [ inputs , m_prev ] ) <nl> + lstm_matrix = math_ops . matmul ( <nl> + array_ops . concat ( [ inputs , m_prev ] , 1 ) , self . _kernel ) <nl> + lstm_matrix = nn_ops . bias_add ( lstm_matrix , self . _bias ) <nl> + <nl> i , j , f , o = array_ops . split ( <nl> value = lstm_matrix , num_or_size_splits = 4 , axis = 1 ) <nl> # Diagonal connections <nl> - if self . _use_peepholes and self . _w_f_diag is None : <nl> - scope = vs . get_variable_scope ( ) <nl> - with vs . variable_scope ( <nl> - scope , initializer = self . _initializer ) as unit_scope : <nl> - with vs . variable_scope ( unit_scope ) : <nl> - self . _w_f_diag = vs . get_variable ( <nl> - " w_f_diag " , shape = [ self . _num_units ] , dtype = dtype ) <nl> - self . _w_i_diag = vs . get_variable ( <nl> - " w_i_diag " , shape = [ self . _num_units ] , dtype = dtype ) <nl> - self . _w_o_diag = vs . get_variable ( <nl> - " w_o_diag " , shape = [ self . _num_units ] , dtype = dtype ) <nl> - <nl> if self . _use_peepholes : <nl> c = ( sigmoid ( f + self . _forget_bias + self . _w_f_diag * c_prev ) * c_prev + <nl> sigmoid ( i + self . _w_i_diag * c_prev ) * self . _activation ( j ) ) <nl> def call ( self , inputs , state ) : <nl> m = sigmoid ( o ) * self . _activation ( c ) <nl> <nl> if self . _num_proj is not None : <nl> - if self . _linear2 is None : <nl> - scope = vs . get_variable_scope ( ) <nl> - with vs . variable_scope ( scope , initializer = self . _initializer ) : <nl> - with vs . variable_scope ( " projection " ) as proj_scope : <nl> - if self . _num_proj_shards is not None : <nl> - proj_scope . set_partitioner ( <nl> - partitioned_variables . fixed_size_partitioner ( <nl> - self . _num_proj_shards ) ) <nl> - self . _linear2 = _Linear ( m , self . _num_proj , False ) <nl> - m = self . _linear2 ( m ) <nl> + m = math_ops . matmul ( m , self . _proj_kernel ) <nl> <nl> if self . _proj_clip is not None : <nl> # pylint : disable = invalid - unary - operand - type <nl> mmm a / tensorflow / python / ops / state_ops . py <nl> ppp b / tensorflow / python / ops / state_ops . py <nl> def assign ( ref , value , validate_shape = None , use_locking = None , name = None ) : <nl> ref , value , use_locking = use_locking , name = name , <nl> validate_shape = validate_shape ) <nl> return ref . assign ( value ) <nl> + <nl> + <nl> + def count_up_to ( ref , limit , name = None ) : <nl> + r " " " Increments ' ref ' until it reaches ' limit ' . <nl> + <nl> + Args : <nl> + ref : A Variable . Must be one of the following types : ` int32 ` , ` int64 ` . <nl> + Should be from a scalar ` Variable ` node . <nl> + limit : An ` int ` . <nl> + If incrementing ref would bring it above limit , instead generates an <nl> + ' OutOfRange ' error . <nl> + name : A name for the operation ( optional ) . <nl> + <nl> + Returns : <nl> + A ` Tensor ` . Has the same type as ` ref ` . <nl> + A copy of the input before increment . If nothing else modifies the <nl> + input , the values produced will all be distinct . <nl> + " " " <nl> + if ref . dtype . _is_ref_dtype : <nl> + return gen_state_ops . count_up_to ( ref , limit = limit , name = name ) <nl> + return gen_state_ops . resource_count_up_to ( <nl> + ref . handle , limit , T = ref . dtype , name = name ) <nl> mmm a / tensorflow / python / ops / tensor_array_ops . py <nl> ppp b / tensorflow / python / ops / tensor_array_ops . py <nl> <nl> import contextlib <nl> <nl> from tensorflow . python . eager import context <nl> + from tensorflow . python . framework import constant_op <nl> + from tensorflow . python . framework import dtypes <nl> + from tensorflow . python . framework import errors_impl <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import tensor_shape <nl> from tensorflow . python . framework import tensor_util <nl> <nl> from tensorflow . python . util import tf_should_use <nl> <nl> <nl> - # TensorArray object accesses many of the hidden generated ops , but is <nl> - # in fact built to wrap these methods . <nl> + # _GraphTensorArray accesses many of the hidden generated ops , but is in <nl> + # fact built to wrap these methods . <nl> # pylint : disable = protected - access <nl> - class TensorArray ( object ) : <nl> - " " " Class wrapping dynamic - sized , per - time - step , write - once Tensor arrays . <nl> - <nl> - This class is meant to be used with dynamic iteration primitives such as <nl> - ` while_loop ` and ` map_fn ` . It supports gradient back - propagation via special <nl> - " flow " control flow dependencies . <nl> + class _GraphTensorArray ( object ) : <nl> + " " " Graph - mode implementation of TensorArray . <nl> " " " <nl> <nl> def __init__ ( self , <nl> def __init__ ( self , <nl> element_shape = None , <nl> colocate_with_first_write_call = True , <nl> name = None ) : <nl> - " " " Construct a new TensorArray or wrap an existing TensorArray handle . <nl> - <nl> - A note about the parameter ` name ` : <nl> - <nl> - The name of the ` TensorArray ` ( even if passed in ) is uniquified : each time <nl> - a new ` TensorArray ` is created at runtime it is assigned its own name for <nl> - the duration of the run . This avoids name collisions if a ` TensorArray ` <nl> - is created within a ` while_loop ` . <nl> + " " " Constructs a graph mode TensorArray . <nl> <nl> Args : <nl> dtype : ( required ) data type of the TensorArray . <nl> def __init__ ( self , <nl> This is used when creating the TensorArray handle . If this value is <nl> set , handle should be None . <nl> handle : ( optional ) A ` Tensor ` handle to an existing TensorArray . If this <nl> - is set , tensor_array_name should be None . <nl> + is set , tensor_array_name should be None . Only supported in graph mode . <nl> flow : ( optional ) A float ` Tensor ` scalar coming from an existing <nl> - ` TensorArray . flow ` . <nl> + ` TensorArray . flow ` . Only supported in graph mode . <nl> infer_shape : ( optional , default : True ) If True , shape inference <nl> is enabled . In this case , all elements must have the same shape . <nl> element_shape : ( optional , default : None ) A ` TensorShape ` object specifying <nl> def create ( ) : <nl> <nl> @ property <nl> def flow ( self ) : <nl> - " " " The flow ` Tensor ` forcing ops leading to this TensorArray state . " " " <nl> return self . _flow <nl> <nl> @ property <nl> def dtype ( self ) : <nl> - " " " The data type of this TensorArray . " " " <nl> return self . _dtype <nl> <nl> @ property <nl> def handle ( self ) : <nl> - " " " The reference to the TensorArray . " " " <nl> return self . _handle <nl> <nl> def _merge_element_shape ( self , shape ) : <nl> def _maybe_colocate_with ( self , value ) : <nl> yield <nl> <nl> def identity ( self ) : <nl> - " " " Returns a TensorArray with the same content and properties . <nl> - <nl> - Returns : <nl> - A new TensorArray object with flow that ensures the control dependencies <nl> - from the contexts will become control dependencies for writes , reads , etc . <nl> - Use this object all for subsequent operations . <nl> - " " " <nl> + " " " See TensorArray . " " " <nl> flow = array_ops . identity ( self . _flow ) <nl> ta = TensorArray ( <nl> dtype = self . _dtype , handle = self . _handle , flow = flow , <nl> def identity ( self ) : <nl> return ta <nl> <nl> def grad ( self , source , flow = None , name = None ) : <nl> + " " " See TensorArray . " " " <nl> # tensor_array_grad requires a flow input when forward <nl> # TensorArrays are dynamically sized . This forces the creation <nl> # of the grad TensorArray only once the final forward array ' s size <nl> def grad ( self , source , flow = None , name = None ) : <nl> return g <nl> <nl> def read ( self , index , name = None ) : <nl> - " " " Read the value at location ` index ` in the TensorArray . <nl> - <nl> - Args : <nl> - index : 0 - D . int32 tensor with the index to read from . <nl> - name : A name for the operation ( optional ) . <nl> - <nl> - Returns : <nl> - The tensor at index ` index ` . <nl> - " " " <nl> + " " " See TensorArray . " " " <nl> value = gen_data_flow_ops . _tensor_array_read_v3 ( <nl> handle = self . _handle , <nl> index = index , <nl> def read ( self , index , name = None ) : <nl> <nl> @ tf_should_use . should_use_result <nl> def write ( self , index , value , name = None ) : <nl> - " " " Write ` value ` into index ` index ` of the TensorArray . <nl> - <nl> - Args : <nl> - index : 0 - D . int32 scalar with the index to write to . <nl> - value : N - D . Tensor of type ` dtype ` . The Tensor to write to this index . <nl> - name : A name for the operation ( optional ) . <nl> - <nl> - Returns : <nl> - A new TensorArray object with flow that ensures the write occurs . <nl> - Use this object all for subsequent operations . <nl> - <nl> - Raises : <nl> - ValueError : if there are more writers than specified . <nl> - " " " <nl> + " " " See TensorArray . " " " <nl> with ops . name_scope ( name , " TensorArrayWrite " , [ self . _handle , index , value ] ) : <nl> value = ops . convert_to_tensor ( value , name = " value " ) <nl> if self . _infer_shape : <nl> def write ( self , index , value , name = None ) : <nl> return ta <nl> <nl> def stack ( self , name = None ) : <nl> - " " " Return the values in the TensorArray as a stacked ` Tensor ` . <nl> - <nl> - All of the values must have been written and their shapes must all match . <nl> - If input shapes have rank - ` R ` , then output shape will have rank - ` ( R + 1 ) ` . <nl> - <nl> - Args : <nl> - name : A name for the operation ( optional ) . <nl> - <nl> - Returns : <nl> - All the tensors in the TensorArray stacked into one tensor . <nl> - " " " <nl> + " " " See TensorArray . " " " <nl> with ops . colocate_with ( self . _handle ) : <nl> with ops . name_scope ( name , " TensorArrayStack " , [ self . _handle ] ) : <nl> return self . gather ( math_ops . range ( 0 , self . size ( ) ) , name = name ) <nl> <nl> def gather ( self , indices , name = None ) : <nl> - " " " Return selected values in the TensorArray as a packed ` Tensor ` . <nl> - <nl> - All of selected values must have been written and their shapes <nl> - must all match . <nl> - <nl> - Args : <nl> - indices : A ` 1 - D ` ` Tensor ` taking values in ` [ 0 , max_value ) ` . If <nl> - the ` TensorArray ` is not dynamic , ` max_value = size ( ) ` . <nl> - name : A name for the operation ( optional ) . <nl> - <nl> - Returns : <nl> - The in the ` TensorArray ` selected by ` indices ` , packed into one tensor . <nl> - " " " <nl> + " " " See TensorArray . " " " <nl> if self . _element_shape : <nl> element_shape = self . _element_shape [ 0 ] <nl> else : <nl> def gather ( self , indices , name = None ) : <nl> return value <nl> <nl> def concat ( self , name = None ) : <nl> - " " " Return the values in the TensorArray as a concatenated ` Tensor ` . <nl> - <nl> - All of the values must have been written , their ranks must match , and <nl> - and their shapes must all match for all dimensions except the first . <nl> - <nl> - Args : <nl> - name : A name for the operation ( optional ) . <nl> - <nl> - Returns : <nl> - All the tensors in the TensorArray concatenated into one tensor . <nl> - " " " <nl> + " " " See TensorArray . " " " <nl> if self . _element_shape and self . _element_shape [ 0 ] . dims is not None : <nl> element_shape_except0 = ( <nl> tensor_shape . TensorShape ( self . _element_shape [ 0 ] . dims [ 1 : ] ) ) <nl> def concat ( self , name = None ) : <nl> <nl> @ tf_should_use . should_use_result <nl> def unstack ( self , value , name = None ) : <nl> - " " " Unstack the values of a ` Tensor ` in the TensorArray . <nl> - <nl> - If input value shapes have rank - ` R ` , then the output TensorArray will <nl> - contain elements whose shapes are rank - ` ( R - 1 ) ` . <nl> - <nl> - Args : <nl> - value : ( N + 1 ) - D . Tensor of type ` dtype ` . The Tensor to unstack . <nl> - name : A name for the operation ( optional ) . <nl> - <nl> - Returns : <nl> - A new TensorArray object with flow that ensures the unstack occurs . <nl> - Use this object all for subsequent operations . <nl> - <nl> - Raises : <nl> - ValueError : if the shape inference fails . <nl> - " " " <nl> + " " " See TensorArray . " " " <nl> with ops . name_scope ( name , " TensorArrayUnstack " , [ self . _handle , value ] ) : <nl> num_elements = array_ops . shape ( value ) [ 0 ] <nl> return self . scatter ( <nl> def unstack ( self , value , name = None ) : <nl> <nl> @ tf_should_use . should_use_result <nl> def scatter ( self , indices , value , name = None ) : <nl> - " " " Scatter the values of a ` Tensor ` in specific indices of a ` TensorArray ` . <nl> - <nl> - Args : <nl> - indices : A ` 1 - D ` ` Tensor ` taking values in ` [ 0 , max_value ) ` . If <nl> - the ` TensorArray ` is not dynamic , ` max_value = size ( ) ` . <nl> - value : ( N + 1 ) - D . Tensor of type ` dtype ` . The Tensor to unpack . <nl> - name : A name for the operation ( optional ) . <nl> - <nl> - Returns : <nl> - A new TensorArray object with flow that ensures the scatter occurs . <nl> - Use this object all for subsequent operations . <nl> - <nl> - Raises : <nl> - ValueError : if the shape inference fails . <nl> - " " " <nl> + " " " See TensorArray . " " " <nl> with ops . name_scope ( name , " TensorArrayScatter " , <nl> [ self . _handle , value , indices ] ) : <nl> value = ops . convert_to_tensor ( value , name = " value " ) <nl> def scatter ( self , indices , value , name = None ) : <nl> <nl> @ tf_should_use . should_use_result <nl> def split ( self , value , lengths , name = None ) : <nl> - " " " Split the values of a ` Tensor ` into the TensorArray . <nl> - <nl> - Args : <nl> - value : ( N + 1 ) - D . Tensor of type ` dtype ` . The Tensor to split . <nl> - lengths : 1 - D . int32 vector with the lengths to use when splitting <nl> - ` value ` along its first dimension . <nl> - name : A name for the operation ( optional ) . <nl> - <nl> - Returns : <nl> - A new TensorArray object with flow that ensures the split occurs . <nl> - Use this object all for subsequent operations . <nl> - <nl> - Raises : <nl> - ValueError : if the shape inference fails . <nl> - " " " <nl> + " " " See TensorArray . " " " <nl> with ops . name_scope ( name , " TensorArraySplit " , <nl> [ self . _handle , value , lengths ] ) : <nl> value = ops . convert_to_tensor ( value , name = " value " ) <nl> def split ( self , value , lengths , name = None ) : <nl> return ta <nl> <nl> def size ( self , name = None ) : <nl> - " " " Return the size of the TensorArray . " " " <nl> + " " " See TensorArray . " " " <nl> return gen_data_flow_ops . _tensor_array_size_v3 ( <nl> handle = self . _handle , flow_in = self . flow , name = name ) <nl> <nl> @ tf_should_use . should_use_result <nl> def close ( self , name = None ) : <nl> - " " " Close the current TensorArray . " " " <nl> + " " " See TensorArray . " " " <nl> return gen_data_flow_ops . _tensor_array_close_v3 ( <nl> handle = self . _handle , name = name ) <nl> <nl> # pylint : enable = protected - access <nl> + <nl> + <nl> + # pylint : disable = protected - access <nl> + def _eager_write_no_copy ( ta , index , value ) : <nl> + " " " Writes value into an _EagerTensorArray without creating a new TensorArray . <nl> + <nl> + Args : <nl> + ta : _EagerTensorArray into which to write value . <nl> + index : 0 - D . int32 scalar with the index to write to . <nl> + value : N - D . Tensor of type ` dtype ` . The Tensor to write to this index . <nl> + <nl> + Raises : <nl> + errors_impl . AlreadyExistsError : attempting to overwrite an entry . <nl> + errors_impl . InvalidArgumentError : value dtype does not match ` ta ` ' s dtype . <nl> + errors_impl . OutOfRangeError : ` index ` is out of bounds . <nl> + ValueError : shape of ` value ` is not consistent with inferred shape . <nl> + " " " <nl> + <nl> + if isinstance ( index , ops . EagerTensor ) : <nl> + index = index . numpy ( ) <nl> + <nl> + if index < 0 : <nl> + raise errors_impl . OutOfRangeError ( <nl> + None , None , <nl> + " Writing to negative indices ( index % d ) is not allowed . " % index ) <nl> + <nl> + tensor_array = ta . _tensor_array <nl> + size = len ( tensor_array ) <nl> + if index > = size : <nl> + if not ta . _dynamic_size : <nl> + raise errors_impl . OutOfRangeError ( <nl> + None , None , <nl> + " Tried to write to index % d but array is not resizeable and size " <nl> + " is : % d " % ( index , size ) ) <nl> + tensor_array . extend ( [ None for _ in range ( index - size + 1 ) ] ) <nl> + <nl> + if not isinstance ( value , ops . EagerTensor ) : <nl> + value = constant_op . constant ( value ) <nl> + <nl> + if ta . _infer_shape : <nl> + if ta . _element_shape is None : <nl> + ta . _element_shape = value . shape <nl> + elif ta . _element_shape ! = value . shape : <nl> + raise ValueError ( " Incompatible shape for value ( % s ) , expected ( % s ) " % <nl> + ( value . shape . as_list ( ) , ta . _element_shape . as_list ( ) ) ) <nl> + <nl> + if ta . _dtype ! = value . dtype : <nl> + raise errors_impl . InvalidArgumentError ( <nl> + None , None , <nl> + " TensorArray dtype is % s but Op is trying to write dtype % s " % <nl> + ( ta . _dtype . name , value . dtype . name ) ) <nl> + <nl> + if ta . _tensor_array [ index ] is not None : <nl> + raise errors_impl . AlreadyExistsError ( <nl> + None , None , <nl> + " Could not write to TensorArray index % d because it has already been " <nl> + " written to . " % index ) <nl> + <nl> + tensor_array [ index ] = value <nl> + <nl> + # pylint : enable = protected - access <nl> + <nl> + <nl> + class _EagerTensorArray ( object ) : <nl> + " " " Eager - mode implementation of TensorArray . <nl> + " " " <nl> + <nl> + def __init__ ( self , <nl> + dtype , <nl> + size = None , <nl> + dynamic_size = None , <nl> + clear_after_read = None , <nl> + tensor_array_name = None , <nl> + handle = None , <nl> + flow = None , <nl> + infer_shape = True , <nl> + element_shape = None , <nl> + colocate_with_first_write_call = True , <nl> + name = None ) : <nl> + " " " Constructs an Eager mode TensorArray . <nl> + <nl> + Args : <nl> + dtype : ( required ) data type of the TensorArray . <nl> + size : ( optional ) int32 scalar ` Tensor ` : the size of the TensorArray . <nl> + Required if handle is not provided . <nl> + dynamic_size : ( optional ) Python bool : If true , writes to the TensorArray <nl> + can grow the TensorArray past its initial size . Default : False . <nl> + clear_after_read : Boolean ( optional , default : True ) . If True , clear <nl> + TensorArray values after reading them . This disables read - many <nl> + semantics , but allows early release of memory . <nl> + tensor_array_name : unused . <nl> + handle : unsupported . <nl> + flow : unsupported . <nl> + infer_shape : used for error checking , same semantics as TensorArray . <nl> + element_shape : used for error checking , same semantics as TensorArray . <nl> + colocate_with_first_write_call : unsupported . <nl> + name : unsupported . <nl> + <nl> + Raises : <nl> + ValueError : handle or flow are supplied , or if size is not supplied . <nl> + " " " <nl> + <nl> + del ( flow , tensor_array_name , name ) # not meaningful in Eager <nl> + <nl> + if handle is not None : <nl> + raise ValueError ( " TensorArray handles are not supported in Eager mode . " ) <nl> + if size is None : <nl> + raise ValueError ( " Size must be declared for TensorArrays in Eager mode . " ) <nl> + <nl> + # These attributes are not meaningful in Eager , but some library functions <nl> + # ( e . g . , those in control_flow_ops . py ) access them to create new tensor <nl> + # arrays ; as such , we define them for the sake of compatibility . <nl> + self . _handle = None <nl> + # we assign a dummy value to _flow in case other code assumes it to be <nl> + # a Tensor <nl> + self . _flow = constant_op . constant ( 0 , dtype = dtypes . int32 ) <nl> + self . _infer_shape = infer_shape <nl> + self . _element_shape = element_shape <nl> + self . _colocate_with_first_write_call = colocate_with_first_write_call <nl> + <nl> + self . _dtype = dtype <nl> + self . _dynamic_size = dynamic_size or False <nl> + self . _clear_after_read = ( <nl> + True if clear_after_read is None else clear_after_read ) <nl> + self . _previously_read_indices = [ ] <nl> + <nl> + if isinstance ( size , ops . EagerTensor ) : <nl> + size = size . numpy ( ) <nl> + self . _tensor_array = [ None for _ in range ( size ) ] <nl> + <nl> + @ property <nl> + def flow ( self ) : <nl> + " " " Flows are not meaningful in Eager ; this exists for compatibility . " " " <nl> + return self . _flow <nl> + <nl> + @ property <nl> + def dtype ( self ) : <nl> + return self . _dtype <nl> + <nl> + @ property <nl> + def handle ( self ) : <nl> + " " " Handles are not meaningful in Eager ; this exists for compatibility . " " " <nl> + return self . _handle <nl> + <nl> + def _identity_without_array ( self ) : <nl> + " " " Returns a new TensorArray with the same properties as this Eager one . <nl> + <nl> + NB : Does not set the underlying _tensor_array attribute . <nl> + " " " <nl> + ta = TensorArray ( <nl> + dtype = self . _dtype , <nl> + size = len ( self . _tensor_array ) , <nl> + dynamic_size = self . _dynamic_size , <nl> + clear_after_read = self . _clear_after_read , <nl> + handle = self . _handle , <nl> + flow = self . _flow , <nl> + infer_shape = self . _infer_shape , <nl> + element_shape = self . _element_shape , <nl> + colocate_with_first_write_call = self . _colocate_with_first_write_call ) <nl> + ta . _implementation . _previously_read_indices = self . _previously_read_indices # pylint : disable = protected - access <nl> + return ta <nl> + <nl> + def identity ( self ) : <nl> + " " " See TensorArray . " " " <nl> + ta = self . _identity_without_array ( ) <nl> + ta . _implementation . _tensor_array = [ t for t in self . _tensor_array ] # pylint : disable = protected - access <nl> + return ta <nl> + <nl> + def grad ( self , source , flow = None , name = None ) : <nl> + raise NotImplementedError ( <nl> + " TensorArray . grad is not supported in Eager mode ; Eager ' s gradient " <nl> + " implementation does not use / need this function to compute gradients " <nl> + " of operations that use TensorArrays . " ) <nl> + <nl> + def read ( self , index , name = None ) : <nl> + " " " See TensorArray . " " " <nl> + del name # not meaningful in Eager mode <nl> + <nl> + if isinstance ( index , ops . EagerTensor ) : <nl> + index = index . numpy ( ) <nl> + <nl> + if index < 0 : <nl> + raise errors_impl . OutOfRangeError ( <nl> + None , None , <nl> + " Reading from negative indices ( index % d ) is not allowed . " % index ) <nl> + <nl> + if index > = len ( self . _tensor_array ) : <nl> + raise errors_impl . OutOfRangeError ( <nl> + None , None , " Tried to read from index % d but array size is : % d " % <nl> + ( index , len ( self . _tensor_array ) ) ) <nl> + <nl> + tensor = self . _tensor_array [ index ] <nl> + if tensor is None : <nl> + if index in self . _previously_read_indices : <nl> + raise errors_impl . InvalidArgumentError ( <nl> + None , None , <nl> + " Could not read index % d twice because it was cleared after " <nl> + " a previous read ( perhaps try setting clear_after_read = false ? ) " % <nl> + index ) <nl> + else : <nl> + raise errors_impl . InvalidArgumentError ( <nl> + None , None , <nl> + " Could not read from TensorArray index % d because it has not yet " <nl> + " been written to . " % index ) <nl> + <nl> + if self . _clear_after_read : <nl> + self . _tensor_array [ index ] = None <nl> + self . _previously_read_indices . append ( index ) <nl> + return tensor <nl> + <nl> + def write ( self , index , value , name = None ) : <nl> + " " " See TensorArray . " " " <nl> + del name # not meaningful in Eager mode <nl> + ta = self . identity ( ) <nl> + _eager_write_no_copy ( ta . _implementation , index , value ) # pylint : disable = protected - access <nl> + return ta <nl> + <nl> + def stack ( self , name = None ) : <nl> + " " " See TensorArray . " " " <nl> + try : <nl> + return array_ops . stack ( self . _tensor_array , name = name ) <nl> + except ValueError : <nl> + if None in self . _tensor_array : <nl> + idx = self . _tensor_array . index ( None ) <nl> + raise errors_impl . InvalidArgumentError ( <nl> + None , None , " Could not read from TensorArray index % d because " <nl> + " it has not yet been written to . " % idx ) <nl> + else : <nl> + raise <nl> + <nl> + def gather ( self , indices , name = None ) : <nl> + " " " See TensorArray . " " " <nl> + del name # not meaningful in Eager mode <nl> + return array_ops . stack ( [ self . _tensor_array [ i ] for i in indices . numpy ( ) ] ) <nl> + <nl> + def concat ( self , name = None ) : <nl> + " " " See TensorArray . " " " <nl> + try : <nl> + return array_ops . concat ( self . _tensor_array , 0 , name = name ) <nl> + except errors_impl . OpError : <nl> + # Reproduce a subset of the error - handling for graph - mode TensorArrays . <nl> + shapes = [ t . shape for t in self . _tensor_array ] <nl> + ndims = [ s . ndims for s in shapes ] <nl> + if None in self . _tensor_array : <nl> + # Concatenating empty TensorArrays is permitted if the element <nl> + # shape is defined ; the output is a tensor with shape <nl> + # [ 0 ] + self . _element_shape [ 1 : ] <nl> + if all ( t is None for t in self . _tensor_array ) : <nl> + if self . _element_shape is not None : <nl> + return constant_op . constant ( [ ] , shape = [ 0 ] + self . _element_shape [ 1 : ] ) <nl> + else : <nl> + raise errors_impl . UnimplementedError ( <nl> + None , None , " TensorArray has size zero , but " <nl> + " element_shape_except0 % s is not fully defined . Currently only " <nl> + " static shapes are supported when concatenating zero - size " <nl> + " TensorArrays . " % self . _element_shape [ 1 : ] ) <nl> + # Concatenating a TensorArray in which some but not all entries have <nl> + # been written to is not allowed . <nl> + idx = self . _tensor_array . index ( None ) <nl> + raise errors_impl . InvalidArgumentError ( <nl> + None , None , " Could not read from TensorArray index % d because " <nl> + " it has not yet been written to . " % idx ) <nl> + elif 0 in ndims : <nl> + idx = ndims . index ( 0 ) <nl> + raise errors_impl . InvalidArgumentError ( <nl> + None , None , " Concat saw a scalar shape at index % d but requires " <nl> + " at least vectors . " % idx ) <nl> + else : <nl> + raise <nl> + <nl> + def unstack ( self , value , name = None ) : <nl> + " " " See TensorArray . " " " <nl> + tensors = array_ops . unstack ( value , name = name ) <nl> + if len ( tensors ) > len ( self . _tensor_array ) and not self . _dynamic_size : <nl> + raise ValueError ( <nl> + " Cannot unstack % d tensors into a TensorArray of static size % d " % <nl> + ( len ( tensors ) , len ( self . _tensors ) ) ) <nl> + ta = self . _identity_without_array ( ) <nl> + ta . _implementation . _tensor_array = tensors # pylint : disable = protected - access <nl> + return ta <nl> + <nl> + def scatter ( self , indices , value , name = None ) : <nl> + " " " See TensorArray . " " " <nl> + del name # unused in Eager <nl> + ta = self . identity ( ) <nl> + for index , val in zip ( indices . numpy ( ) , array_ops . unstack ( value ) ) : <nl> + _eager_write_no_copy ( ta . _implementation , index , val ) # pylint : disable = protected - access <nl> + return ta <nl> + <nl> + def split ( self , value , lengths , name = None ) : <nl> + " " " See TensorArray . " " " <nl> + # error checking to match graph - mode errors <nl> + value = constant_op . constant ( value ) <nl> + lengths = constant_op . constant ( lengths ) <nl> + sum_lengths = math_ops . reduce_sum ( lengths ) <nl> + if lengths . shape . ndims ! = 1 : <nl> + raise errors_impl . InvalidArgumentError ( <nl> + None , None , " Expected lengths to be a vector , received shape : % s " % <nl> + lengths . shape . as_list ( ) ) <nl> + elif value . shape . ndims = = 0 : <nl> + raise errors_impl . InvalidArgumentError ( <nl> + None , None , " Expected value to be at least a vector , " <nl> + " but received shape : % s " % value . shape . as_list ( ) ) <nl> + elif sum_lengths . numpy ( ) ! = value . shape . as_list ( ) [ 0 ] : <nl> + raise errors_impl . InvalidArgumentError ( <nl> + None , None , " Expected sum of lengths to be equal to " <nl> + " values . shape [ 0 ] , but sum of lengths is % d and " <nl> + " value ' s shape is : % s " % ( sum_lengths . numpy ( ) , <nl> + value . shape . as_list ( ) ) ) <nl> + elif not self . _dynamic_size and lengths . shape [ 0 ] ! = len ( self . _tensor_array ) : <nl> + raise errors_impl . InvalidArgumentError ( <nl> + None , None , " TensorArray ' s size is not equal to the size of " <nl> + " lengths ( % d vs . % d ) , and the TensorArray is not marked as " <nl> + " dynamically resizeable " % ( len ( self . _tensor_array ) , <nl> + lengths . shape [ 0 ] ) ) <nl> + else : <nl> + ta = self . _identity_without_array ( ) <nl> + tensor_array = array_ops . split ( value , lengths , name = name ) <nl> + ta . _implementation . _tensor_array = tensor_array # pylint : disable = protected - access <nl> + return ta <nl> + <nl> + def size ( self , name = None ) : <nl> + " " " See TensorArray . " " " <nl> + del name # not meaningful in Eager mode <nl> + return constant_op . constant ( len ( self . _tensor_array ) ) <nl> + <nl> + def close ( self , name = None ) : <nl> + del name # not meaningful in Eager mode <nl> + del self . _tensor_array [ : ] <nl> + return <nl> + <nl> + <nl> + # TensorArray is designed to hide an underlying implementation object <nl> + # and as such accesses many of that object ' s hidden fields . <nl> + # pylint : disable = protected - access <nl> + class TensorArray ( object ) : <nl> + " " " Class wrapping dynamic - sized , per - time - step , write - once Tensor arrays . <nl> + <nl> + This class is meant to be used with dynamic iteration primitives such as <nl> + ` while_loop ` and ` map_fn ` . It supports gradient back - propagation via special <nl> + " flow " control flow dependencies . <nl> + " " " <nl> + <nl> + def __init__ ( self , <nl> + dtype , <nl> + size = None , <nl> + dynamic_size = None , <nl> + clear_after_read = None , <nl> + tensor_array_name = None , <nl> + handle = None , <nl> + flow = None , <nl> + infer_shape = True , <nl> + element_shape = None , <nl> + colocate_with_first_write_call = True , <nl> + name = None ) : <nl> + " " " Construct a new TensorArray or wrap an existing TensorArray handle . <nl> + <nl> + A note about the parameter ` name ` : <nl> + <nl> + The name of the ` TensorArray ` ( even if passed in ) is uniquified : each time <nl> + a new ` TensorArray ` is created at runtime it is assigned its own name for <nl> + the duration of the run . This avoids name collisions if a ` TensorArray ` <nl> + is created within a ` while_loop ` . <nl> + <nl> + Args : <nl> + dtype : ( required ) data type of the TensorArray . <nl> + size : ( optional ) int32 scalar ` Tensor ` : the size of the TensorArray . <nl> + Required if handle is not provided . <nl> + dynamic_size : ( optional ) Python bool : If true , writes to the TensorArray <nl> + can grow the TensorArray past its initial size . Default : False . <nl> + clear_after_read : Boolean ( optional , default : True ) . If True , clear <nl> + TensorArray values after reading them . This disables read - many <nl> + semantics , but allows early release of memory . <nl> + tensor_array_name : ( optional ) Python string : the name of the TensorArray . <nl> + This is used when creating the TensorArray handle . If this value is <nl> + set , handle should be None . <nl> + handle : ( optional ) A ` Tensor ` handle to an existing TensorArray . If this <nl> + is set , tensor_array_name should be None . Only supported in graph mode . <nl> + flow : ( optional ) A float ` Tensor ` scalar coming from an existing <nl> + ` TensorArray . flow ` . Only supported in graph mode . <nl> + infer_shape : ( optional , default : True ) If True , shape inference <nl> + is enabled . In this case , all elements must have the same shape . <nl> + element_shape : ( optional , default : None ) A ` TensorShape ` object specifying <nl> + the shape constraints of each of the elements of the TensorArray . <nl> + Need not be fully defined . <nl> + colocate_with_first_write_call : If ` True ` , the TensorArray will be <nl> + colocated on the same device as the Tensor used on its first write <nl> + ( write operations include ` write ` , ` unstack ` , and ` split ` ) . If ` False ` , <nl> + the TensorArray will be placed on the device determined by the <nl> + device context available during its initialization . <nl> + name : A name for the operation ( optional ) . <nl> + <nl> + Raises : <nl> + ValueError : if both handle and tensor_array_name are provided . <nl> + TypeError : if handle is provided but is not a Tensor . <nl> + " " " <nl> + if context . in_graph_mode ( ) : <nl> + implementation = _GraphTensorArray <nl> + else : <nl> + implementation = _EagerTensorArray <nl> + <nl> + self . _implementation = implementation ( <nl> + dtype , <nl> + size = size , <nl> + dynamic_size = dynamic_size , <nl> + clear_after_read = clear_after_read , <nl> + tensor_array_name = tensor_array_name , <nl> + handle = handle , <nl> + flow = flow , <nl> + infer_shape = infer_shape , <nl> + element_shape = element_shape , <nl> + colocate_with_first_write_call = colocate_with_first_write_call , <nl> + name = name ) <nl> + <nl> + @ property <nl> + def flow ( self ) : <nl> + " " " The flow ` Tensor ` forcing ops leading to this TensorArray state . " " " <nl> + return self . _implementation . _flow <nl> + <nl> + @ property <nl> + def dtype ( self ) : <nl> + " " " The data type of this TensorArray . " " " <nl> + return self . _implementation . _dtype <nl> + <nl> + @ property <nl> + def handle ( self ) : <nl> + " " " The reference to the TensorArray . " " " <nl> + return self . _implementation . _handle <nl> + <nl> + @ property <nl> + def _infer_shape ( self ) : <nl> + return self . _implementation . _infer_shape <nl> + <nl> + @ _infer_shape . setter <nl> + def _infer_shape ( self , infer_shape ) : <nl> + self . _implementation . _infer_shape = infer_shape <nl> + <nl> + @ property <nl> + def _element_shape ( self ) : <nl> + return self . _implementation . _element_shape <nl> + <nl> + @ _element_shape . setter <nl> + def _element_shape ( self , element_shape ) : <nl> + self . _implementation . _element_shape = element_shape <nl> + <nl> + @ property <nl> + def _colocate_with_first_write_call ( self ) : <nl> + return self . _implementation . _colocate_with_first_write_call <nl> + <nl> + @ property <nl> + def _colocate_with ( self ) : <nl> + return self . _implementation . _colocate_with <nl> + <nl> + @ _colocate_with . setter <nl> + def _colocate_with ( self , colocate_with ) : <nl> + self . _implementation . _colocate_with = colocate_with <nl> + <nl> + def identity ( self ) : <nl> + " " " Returns a TensorArray with the same content and properties . <nl> + <nl> + Returns : <nl> + A new TensorArray object with flow that ensures the control dependencies <nl> + from the contexts will become control dependencies for writes , reads , etc . <nl> + Use this object all for subsequent operations . <nl> + " " " <nl> + return self . _implementation . identity ( ) <nl> + <nl> + def grad ( self , source , flow = None , name = None ) : <nl> + return self . _implementation . grad ( source , flow = flow , name = name ) <nl> + <nl> + def read ( self , index , name = None ) : <nl> + " " " Read the value at location ` index ` in the TensorArray . <nl> + <nl> + Args : <nl> + index : 0 - D . int32 tensor with the index to read from . <nl> + name : A name for the operation ( optional ) . <nl> + <nl> + Returns : <nl> + The tensor at index ` index ` . <nl> + " " " <nl> + return self . _implementation . read ( index , name = name ) <nl> + <nl> + @ tf_should_use . should_use_result <nl> + def write ( self , index , value , name = None ) : <nl> + " " " Write ` value ` into index ` index ` of the TensorArray . <nl> + <nl> + Args : <nl> + index : 0 - D . int32 scalar with the index to write to . <nl> + value : N - D . Tensor of type ` dtype ` . The Tensor to write to this index . <nl> + name : A name for the operation ( optional ) . <nl> + <nl> + Returns : <nl> + A new TensorArray object with flow that ensures the write occurs . <nl> + Use this object all for subsequent operations . <nl> + <nl> + Raises : <nl> + ValueError : if there are more writers than specified . <nl> + " " " <nl> + return self . _implementation . write ( index , value , name = name ) <nl> + <nl> + def stack ( self , name = None ) : <nl> + " " " Return the values in the TensorArray as a stacked ` Tensor ` . <nl> + <nl> + All of the values must have been written and their shapes must all match . <nl> + If input shapes have rank - ` R ` , then output shape will have rank - ` ( R + 1 ) ` . <nl> + <nl> + Args : <nl> + name : A name for the operation ( optional ) . <nl> + <nl> + Returns : <nl> + All the tensors in the TensorArray stacked into one tensor . <nl> + " " " <nl> + return self . _implementation . stack ( name = name ) <nl> + <nl> + def gather ( self , indices , name = None ) : <nl> + " " " Return selected values in the TensorArray as a packed ` Tensor ` . <nl> + <nl> + All of selected values must have been written and their shapes <nl> + must all match . <nl> + <nl> + Args : <nl> + indices : A ` 1 - D ` ` Tensor ` taking values in ` [ 0 , max_value ) ` . If <nl> + the ` TensorArray ` is not dynamic , ` max_value = size ( ) ` . <nl> + name : A name for the operation ( optional ) . <nl> + <nl> + Returns : <nl> + The tensors in the ` TensorArray ` selected by ` indices ` , packed into one <nl> + tensor . <nl> + " " " <nl> + return self . _implementation . gather ( indices , name = name ) <nl> + <nl> + def concat ( self , name = None ) : <nl> + " " " Return the values in the TensorArray as a concatenated ` Tensor ` . <nl> + <nl> + All of the values must have been written , their ranks must match , and <nl> + and their shapes must all match for all dimensions except the first . <nl> + <nl> + Args : <nl> + name : A name for the operation ( optional ) . <nl> + <nl> + Returns : <nl> + All the tensors in the TensorArray concatenated into one tensor . <nl> + " " " <nl> + return self . _implementation . concat ( name = name ) <nl> + <nl> + @ tf_should_use . should_use_result <nl> + def unstack ( self , value , name = None ) : <nl> + " " " Unstack the values of a ` Tensor ` in the TensorArray . <nl> + <nl> + If input value shapes have rank - ` R ` , then the output TensorArray will <nl> + contain elements whose shapes are rank - ` ( R - 1 ) ` . <nl> + <nl> + Args : <nl> + value : ( N + 1 ) - D . Tensor of type ` dtype ` . The Tensor to unstack . <nl> + name : A name for the operation ( optional ) . <nl> + <nl> + Returns : <nl> + A new TensorArray object with flow that ensures the unstack occurs . <nl> + Use this object all for subsequent operations . <nl> + <nl> + Raises : <nl> + ValueError : if the shape inference fails . <nl> + " " " <nl> + return self . _implementation . unstack ( value , name = name ) <nl> + <nl> + @ tf_should_use . should_use_result <nl> + def scatter ( self , indices , value , name = None ) : <nl> + " " " Scatter the values of a ` Tensor ` in specific indices of a ` TensorArray ` . <nl> + <nl> + Args : <nl> + indices : A ` 1 - D ` ` Tensor ` taking values in ` [ 0 , max_value ) ` . If <nl> + the ` TensorArray ` is not dynamic , ` max_value = size ( ) ` . <nl> + value : ( N + 1 ) - D . Tensor of type ` dtype ` . The Tensor to unpack . <nl> + name : A name for the operation ( optional ) . <nl> + <nl> + Returns : <nl> + A new TensorArray object with flow that ensures the scatter occurs . <nl> + Use this object all for subsequent operations . <nl> + <nl> + Raises : <nl> + ValueError : if the shape inference fails . <nl> + " " " <nl> + return self . _implementation . scatter ( indices , value , name = name ) <nl> + <nl> + @ tf_should_use . should_use_result <nl> + def split ( self , value , lengths , name = None ) : <nl> + " " " Split the values of a ` Tensor ` into the TensorArray . <nl> + <nl> + Args : <nl> + value : ( N + 1 ) - D . Tensor of type ` dtype ` . The Tensor to split . <nl> + lengths : 1 - D . int32 vector with the lengths to use when splitting <nl> + ` value ` along its first dimension . <nl> + name : A name for the operation ( optional ) . <nl> + <nl> + Returns : <nl> + A new TensorArray object with flow that ensures the split occurs . <nl> + Use this object all for subsequent operations . <nl> + <nl> + Raises : <nl> + ValueError : if the shape inference fails . <nl> + " " " <nl> + return self . _implementation . split ( value , lengths , name = name ) <nl> + <nl> + def size ( self , name = None ) : <nl> + " " " Return the size of the TensorArray . " " " <nl> + return self . _implementation . size ( name = name ) <nl> + <nl> + @ tf_should_use . should_use_result <nl> + def close ( self , name = None ) : <nl> + " " " Close the current TensorArray . " " " <nl> + return self . _implementation . close ( name = name ) <nl> + <nl> + # pylint : enable = protected - access <nl> mmm a / tensorflow / python / profiler / internal / flops_registry . py <nl> ppp b / tensorflow / python / profiler / internal / flops_registry . py <nl> def _max_pool_grad_flops ( graph , node ) : <nl> kernel_area = _list_product ( kernel_shape ) <nl> orig_out_shape = graph_util . tensor_shape_from_node_def_name ( graph , <nl> node . input [ 1 ] ) <nl> + orig_out_shape . assert_is_fully_defined ( ) <nl> max_pool_ops = kernel_area * orig_out_shape . num_elements ( ) <nl> return ops . OpStats ( " flops " , max_pool_ops + orig_out_shape . num_elements ( ) ) <nl> <nl> mmm a / tensorflow / python / training / input . py <nl> ppp b / tensorflow / python / training / input . py <nl> def input_producer ( input_tensor , <nl> <nl> Raises : <nl> ValueError : If the shape of the input cannot be inferred from the arguments . <nl> + RuntimeError : If called with eager execution enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Input pipelines based on Queues are not supported when eager execution is <nl> + enabled . Please use the ` tf . data ` API to ingest data under eager execution . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> - raise ValueError ( <nl> - " Queue - using input pipelines are not supported when eager execution is " <nl> - " enabled . Please use tf . data to ingest data into your model instead . " ) <nl> + raise RuntimeError ( <nl> + " Input pipelines based on Queues are not supported when eager execution " <nl> + " is enabled . Please use tf . data to ingest data into your model " <nl> + " instead . " ) <nl> with ops . name_scope ( name , " input_producer " , [ input_tensor ] ) : <nl> input_tensor = ops . convert_to_tensor ( input_tensor , name = " input_tensor " ) <nl> element_shape = input_tensor . shape [ 1 : ] . merge_with ( element_shape ) <nl> def string_input_producer ( string_tensor , <nl> Raises : <nl> ValueError : If the string_tensor is a null Python list . At runtime , <nl> will fail with an assertion if string_tensor becomes a null tensor . <nl> + <nl> + @ compatibility ( eager ) <nl> + Input pipelines based on Queues are not supported when eager execution is <nl> + enabled . Please use the ` tf . data ` API to ingest data under eager execution . <nl> + @ end_compatibility <nl> " " " <nl> not_null_err = " string_input_producer requires a non - null input tensor " <nl> if not isinstance ( string_tensor , ops . Tensor ) and not string_tensor : <nl> def range_input_producer ( limit , num_epochs = None , shuffle = True , seed = None , <nl> Returns : <nl> A Queue with the output integers . A ` QueueRunner ` for the Queue <nl> is added to the current ` Graph ` ' s ` QUEUE_RUNNER ` collection . <nl> + <nl> + @ compatibility ( eager ) <nl> + Input pipelines based on Queues are not supported when eager execution is <nl> + enabled . Please use the ` tf . data ` API to ingest data under eager execution . <nl> + @ end_compatibility <nl> " " " <nl> with ops . name_scope ( name , " input_producer " , [ limit ] ) as name : <nl> range_tensor = math_ops . range ( limit ) <nl> def slice_input_producer ( tensor_list , num_epochs = None , shuffle = True , seed = None , <nl> <nl> Raises : <nl> ValueError : if ` slice_input_producer ` produces nothing from ` tensor_list ` . <nl> + <nl> + @ compatibility ( eager ) <nl> + Input pipelines based on Queues are not supported when eager execution is <nl> + enabled . Please use the ` tf . data ` API to ingest data under eager execution . <nl> + @ end_compatibility <nl> " " " <nl> with ops . name_scope ( name , " input_producer " , tensor_list ) : <nl> tensor_list = ops . convert_n_to_tensor_or_indexed_slices ( tensor_list ) <nl> def _batch ( tensors , batch_size , keep_input , num_threads = 1 , capacity = 32 , <nl> " " " Helper function for ` batch ` and ` maybe_batch ` . " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> - " Queue - using input pipelines are not supported when eager execution is " <nl> - " enabled . Please use tf . data to ingest data into your model instead . " ) <nl> + " Input pipelines based on Queues are not supported when eager execution " <nl> + " is enabled . Please use tf . data to ingest data into your model " <nl> + " instead . " ) <nl> tensor_list = _as_tensor_list ( tensors ) <nl> with ops . name_scope ( name , " batch " , list ( tensor_list ) + [ keep_input ] ) as name : <nl> tensor_list = _validate ( tensor_list ) <nl> def _batch_join ( tensors_list , batch_size , keep_input , capacity = 32 , <nl> " " " Helper function for ` batch_join ` and ` maybe_batch_join ` . " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> - " Queue - using input pipelines are not supported when eager execution is " <nl> - " enabled . Please use tf . data to ingest data into your model instead . " ) <nl> + " Input pipelines based on Queues are not supported when eager execution " <nl> + " is enabled . Please use tf . data to ingest data into your model " <nl> + " instead . " ) <nl> tensor_list_list = _as_tensor_list_list ( tensors_list ) <nl> with ops . name_scope ( name , " batch_join " , <nl> _flatten ( tensor_list_list ) + [ keep_input ] ) as name : <nl> def _shuffle_batch ( tensors , batch_size , capacity , min_after_dequeue , <nl> " " " Helper function for ` shuffle_batch ` and ` maybe_shuffle_batch ` . " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> - " Queue - using input pipelines are not supported when eager execution is " <nl> - " enabled . Please use tf . data to ingest data into your model instead . " ) <nl> + " Input pipelines based on Queues are not supported when eager execution " <nl> + " is enabled . Please use tf . data to ingest data into your model " <nl> + " instead . " ) <nl> tensor_list = _as_tensor_list ( tensors ) <nl> with ops . name_scope ( name , " shuffle_batch " , <nl> list ( tensor_list ) + [ keep_input ] ) as name : <nl> def _shuffle_batch_join ( tensors_list , batch_size , capacity , <nl> " " " Helper function for ` shuffle_batch_join ` and ` maybe_shuffle_batch_join ` . " " " <nl> if context . in_eager_mode ( ) : <nl> raise ValueError ( <nl> - " Queue - using input pipelines are not supported when eager execution is " <nl> - " enabled . Please use tf . data to ingest data into your model instead . " ) <nl> + " Input pipelines based on Queues are not supported when eager execution " <nl> + " is enabled . Please use tf . data to ingest data into your model " <nl> + " instead . " ) <nl> tensor_list_list = _as_tensor_list_list ( tensors_list ) <nl> with ops . name_scope ( name , " shuffle_batch_join " , <nl> _flatten ( tensor_list_list ) + [ keep_input ] ) as name : <nl> def batch ( tensors , batch_size , num_threads = 1 , capacity = 32 , <nl> Raises : <nl> ValueError : If the ` shapes ` are not specified , and cannot be <nl> inferred from the elements of ` tensors ` . <nl> + <nl> + @ compatibility ( eager ) <nl> + Input pipelines based on Queues are not supported when eager execution is <nl> + enabled . Please use the ` tf . data ` API to ingest data under eager execution . <nl> + @ end_compatibility <nl> " " " <nl> return _batch ( <nl> tensors , <nl> def batch_join ( tensors_list , batch_size , capacity = 32 , enqueue_many = False , <nl> Raises : <nl> ValueError : If the ` shapes ` are not specified , and cannot be <nl> inferred from the elements of ` tensor_list_list ` . <nl> + <nl> + @ compatibility ( eager ) <nl> + Input pipelines based on Queues are not supported when eager execution is <nl> + enabled . Please use the ` tf . data ` API to ingest data under eager execution . <nl> + @ end_compatibility <nl> " " " <nl> return _batch_join ( <nl> tensors_list , <nl> def shuffle_batch ( tensors , batch_size , capacity , min_after_dequeue , <nl> Raises : <nl> ValueError : If the ` shapes ` are not specified , and cannot be <nl> inferred from the elements of ` tensors ` . <nl> + <nl> + @ compatibility ( eager ) <nl> + Input pipelines based on Queues are not supported when eager execution is <nl> + enabled . Please use the ` tf . data ` API to ingest data under eager execution . <nl> + @ end_compatibility <nl> " " " <nl> return _shuffle_batch ( <nl> tensors , <nl> def maybe_shuffle_batch ( tensors , batch_size , capacity , min_after_dequeue , <nl> Raises : <nl> ValueError : If the ` shapes ` are not specified , and cannot be <nl> inferred from the elements of ` tensors ` . <nl> + <nl> + @ compatibility ( eager ) <nl> + Input pipelines based on Queues are not supported when eager execution is <nl> + enabled . Please use the ` tf . data ` API to ingest data under eager execution . <nl> + @ end_compatibility <nl> " " " <nl> return _shuffle_batch ( <nl> tensors , <nl> def shuffle_batch_join ( tensors_list , batch_size , capacity , <nl> Raises : <nl> ValueError : If the ` shapes ` are not specified , and cannot be <nl> inferred from the elements of ` tensors_list ` . <nl> + <nl> + @ compatibility ( eager ) <nl> + Input pipelines based on Queues are not supported when eager execution is <nl> + enabled . Please use the ` tf . data ` API to ingest data under eager execution . <nl> + @ end_compatibility <nl> " " " <nl> return _shuffle_batch_join ( <nl> tensors_list , <nl> def maybe_shuffle_batch_join ( tensors_list , batch_size , capacity , <nl> Raises : <nl> ValueError : If the ` shapes ` are not specified , and cannot be <nl> inferred from the elements of ` tensors_list ` . <nl> + <nl> + @ compatibility ( eager ) <nl> + Input pipelines based on Queues are not supported when eager execution is <nl> + enabled . Please use the ` tf . data ` API to ingest data under eager execution . <nl> + @ end_compatibility <nl> " " " <nl> return _shuffle_batch_join ( <nl> tensors_list , <nl> mmm a / tensorflow / python / training / optimizer . py <nl> ppp b / tensorflow / python / training / optimizer . py <nl> def compute_gradients ( self , loss , var_list = None , <nl> Raises : <nl> TypeError : If ` var_list ` contains anything else than ` Variable ` objects . <nl> ValueError : If some arguments are invalid . <nl> + RuntimeError : If called with eager execution enabled and if ` grad_loss ` <nl> + is not ` None ` or ` loss ` is not callable . <nl> <nl> @ compatibility ( eager ) <nl> When eager execution is enabled , ` loss ` should be a Python function that <nl> def compute_gradients ( self , loss , var_list = None , <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> if grad_loss is not None : <nl> - raise ValueError ( " ` grad_loss ` argument to Optimizer . compute_gradients " <nl> - " not supported when eager execution is enabled . " ) <nl> + raise RuntimeError ( <nl> + " ` grad_loss ` argument to Optimizer . compute_gradients " <nl> + " not supported when eager execution is enabled . " ) <nl> if not callable ( loss ) : <nl> - raise ValueError ( " ` loss ` passed to Optimizer . compute_gradients should " <nl> - " be a function when eager execution is enabled . " ) <nl> + raise RuntimeError ( <nl> + " ` loss ` passed to Optimizer . compute_gradients should " <nl> + " be a function when eager execution is enabled . " ) <nl> # TODO ( agarwal ) : consider passing parameters to the ` loss ` function . <nl> if var_list is None : <nl> return backprop . implicit_grad ( loss ) ( ) <nl> mmm a / tensorflow / python / training / queue_runner_impl . py <nl> ppp b / tensorflow / python / training / queue_runner_impl . py <nl> <nl> <nl> from tensorflow . core . protobuf import queue_runner_pb2 <nl> from tensorflow . python . client import session <nl> + from tensorflow . python . eager import context <nl> from tensorflow . python . framework import errors <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . platform import tf_logging as logging <nl> def start_queue_runners ( sess = None , coord = None , daemon = True , start = True , <nl> <nl> Returns : <nl> A list of threads . <nl> + <nl> + Raises : <nl> + RuntimeError : If called with eager execution enabled . <nl> + ValueError : If called without a default ` tf . Session ` registered . <nl> + <nl> + @ compatibility ( eager ) <nl> + Not compatible with eager execution . To ingest data under eager execution , <nl> + use the ` tf . data ` API instead . <nl> + @ end_compatibility <nl> " " " <nl> + if context . in_eager_mode ( ) : <nl> + raise RuntimeError ( " Queues are not compatible with eager execution . " ) <nl> if sess is None : <nl> sess = ops . get_default_session ( ) <nl> if not sess : <nl> mmm a / tensorflow / python / training / saver . py <nl> ppp b / tensorflow / python / training / saver . py <nl> class ResourceVariableSaveable ( SaveableObject ) : <nl> <nl> def __init__ ( self , var , slice_spec , name ) : <nl> self . _var_device = var . device <nl> + self . _var_shape = var . shape <nl> if isinstance ( var , ops . Tensor ) : <nl> self . handle_op = var . op . inputs [ 0 ] <nl> tensor = var <nl> def restore ( self , restored_tensors , restored_shapes ) : <nl> # Copy the restored tensor to the variable ' s device . <nl> with ops . device ( self . _var_device ) : <nl> restored_tensor = array_ops . identity ( restored_tensor ) <nl> - return resource_variable_ops . assign_variable_op ( <nl> - self . handle_op , restored_tensor ) <nl> + return resource_variable_ops . shape_safe_assign_variable_handle ( <nl> + self . handle_op , self . _var_shape , restored_tensor ) <nl> <nl> def __init__ ( self , write_version = saver_pb2 . SaverDef . V2 ) : <nl> self . _write_version = write_version <nl> def __init__ ( self , <nl> Raises : <nl> TypeError : If ` var_list ` is invalid . <nl> ValueError : If any of the keys or values in ` var_list ` are not unique . <nl> + RuntimeError : If eager execution is enabled and ` var_list ` does not specify <nl> + a list of varialbes to save . <nl> + <nl> + @ compatibility ( eager ) <nl> + When eager execution is enabled , ` var_list ` must specify a ` list ` or ` dict ` <nl> + of variables to save . Otherwise , a ` RuntimeError ` will be raised . <nl> + @ end_compatibility <nl> " " " <nl> if defer_build and var_list : <nl> raise ValueError ( <nl> " If ` var_list ` is provided then build cannot be deferred . " <nl> " Either set defer_build = False or var_list = None . " ) <nl> if context . in_eager_mode ( ) and var_list is None : <nl> - raise ValueError ( <nl> - " When eager execution is enabled , ` var_list ` must specify a list of " <nl> - " variables to save " ) <nl> + raise RuntimeError ( <nl> + " When eager execution is enabled , ` var_list ` must specify a list or " <nl> + " dict of variables to save " ) <nl> self . _var_list = var_list <nl> self . _reshape = reshape <nl> self . _sharded = sharded <nl> def __init__ ( self , <nl> <nl> def build ( self ) : <nl> if context . in_eager_mode ( ) : <nl> - raise ValueError ( " Use save / restore instead of build in eager mode . " ) <nl> + raise RuntimeError ( " Use save / restore instead of build in eager mode . " ) <nl> self . _build ( self . _filename , build_save = True , build_restore = True ) <nl> <nl> def _build_eager ( self , checkpoint_path , build_save , build_restore ) : <nl> def save ( self , <nl> It requires a session in which the graph was launched . The variables to <nl> save must also have been initialized . <nl> <nl> - The method returns the path of the newly created checkpoint file . This <nl> - path can be passed directly to a call to ` restore ( ) ` . <nl> + The method returns the path prefix of the newly created checkpoint files . <nl> + This string can be passed directly to a call to ` restore ( ) ` . <nl> <nl> Args : <nl> - sess : A Session to use to save the variables . None in eager mode . <nl> - save_path : String . Path to the checkpoint filename . If the saver is <nl> - ` sharded ` , this is the prefix of the sharded checkpoint filename . <nl> + sess : A Session to use to save the variables . <nl> + save_path : String . Prefix of filenames created for the checkpoint . <nl> global_step : If provided the global step number is appended to <nl> - ` save_path ` to create the checkpoint filename . The optional argument <nl> + ` save_path ` to create the checkpoint filenames . The optional argument <nl> can be a ` Tensor ` , a ` Tensor ` name or an integer . <nl> latest_filename : Optional name for the protocol buffer file that will <nl> - contains the list of most recent checkpoint filenames . That file , <nl> + contains the list of most recent checkpoints . That file , <nl> kept in the same directory as the checkpoint files , is automatically <nl> managed by the saver to keep track of recent checkpoints . Defaults to <nl> ' checkpoint ' . <nl> def save ( self , <nl> ` CheckpointStateProto ` . <nl> <nl> Returns : <nl> - A string : path at which the variables were saved . If the saver is <nl> + A string : path prefix used for the checkpoint files . If the saver is <nl> sharded , this string ends with : ' - ? ? ? ? ? - of - nnnnn ' where ' nnnnn ' <nl> is the number of shards created . <nl> If the saver is empty , returns None . <nl> def import_meta_graph ( meta_graph_or_file , clear_devices = False , <nl> <nl> A None value is returned if no variables exist in the ` MetaGraphDef ` <nl> ( i . e . , there are no variables to restore ) . <nl> + <nl> + Raises : <nl> + RuntimeError : If called with eager execution enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Exporting / importing meta graphs is not supported . No graph exists when eager <nl> + execution is enabled . <nl> + @ end_compatibility <nl> " " " # pylint : disable = g - doc - exception <nl> if context . in_eager_mode ( ) : <nl> - raise ValueError ( " Exporting / importing meta graphs is not supported when " <nl> - " eager execution is enabled . No graph exists when eager " <nl> - " execution is enabled . " ) <nl> + raise RuntimeError ( " Exporting / importing meta graphs is not supported when " <nl> + " eager execution is enabled . No graph exists when eager " <nl> + " execution is enabled . " ) <nl> if not isinstance ( meta_graph_or_file , meta_graph_pb2 . MetaGraphDef ) : <nl> meta_graph_def = meta_graph . read_meta_graph_file ( meta_graph_or_file ) <nl> else : <nl> def export_meta_graph ( filename = None , <nl> <nl> Raises : <nl> ValueError : When the ` GraphDef ` is larger than 2GB . <nl> + RuntimeError : If called with eager execution enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Exporting / importing meta graphs is not supported . No graph exists when eager <nl> + execution is enabled . <nl> + @ end_compatibility <nl> " " " <nl> if context . in_eager_mode ( ) : <nl> - raise ValueError ( " Exporting / importing meta graphs is not supported when " <nl> - " eager execution is enabled . No graph exists when eager " <nl> - " execution is enabled . " ) <nl> + raise RuntimeError ( " Exporting / importing meta graphs is not supported when " <nl> + " eager execution is enabled . No graph exists when eager " <nl> + " execution is enabled . " ) <nl> meta_graph_def , _ = meta_graph . export_scoped_meta_graph ( <nl> filename = filename , <nl> meta_info_def = meta_info_def , <nl> mmm a / tensorflow / python / training / supervisor . py <nl> ppp b / tensorflow / python / training / supervisor . py <nl> <nl> <nl> from tensorflow . core . framework . summary_pb2 import Summary <nl> from tensorflow . core . util . event_pb2 import SessionLog <nl> + from tensorflow . python . eager import context <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import meta_graph <nl> from tensorflow . python . framework import ops <nl> def __init__ ( self , <nl> <nl> Returns : <nl> A ` Supervisor ` . <nl> + <nl> + Raises : <nl> + RuntimeError : If called with eager execution enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + ` Supervisor ` s are not supported when eager execution is enabled . <nl> + @ end_compatibility <nl> " " " <nl> + if context . in_eager_mode ( ) : <nl> + raise RuntimeError ( " Supervisors are compatible with eager execution . " ) <nl> # Set default values of arguments . <nl> if graph is None : <nl> graph = ops . get_default_graph ( ) <nl> def start_queue_runners ( self , sess , queue_runners = None ) : <nl> <nl> Returns : <nl> The list of threads started for the ` QueueRunners ` . <nl> + <nl> + Raises : <nl> + RuntimeError : If called with eager execution enabled . <nl> + <nl> + @ compatibility ( eager ) <nl> + Queues are not compatible with eager execution . To ingest data when eager <nl> + execution is enabled , use the ` tf . data ` API . <nl> + @ end_compatibility <nl> " " " <nl> + if context . in_eager_mode ( ) : <nl> + raise RuntimeError ( " Queues are not compatible with eager execution . " ) <nl> if queue_runners is None : <nl> queue_runners = self . _graph . get_collection ( ops . GraphKeys . QUEUE_RUNNERS ) <nl> threads = [ ] <nl> mmm a / tensorflow / python / training / training_util . py <nl> ppp b / tensorflow / python / training / training_util . py <nl> def create_global_step ( graph = None ) : <nl> raise ValueError ( ' " global_step " already exists . ' ) <nl> # Create in proper graph and base name_scope . <nl> with graph . as_default ( ) as g , g . name_scope ( None ) : <nl> + if context . in_eager_mode ( ) : <nl> + with ops . device ( ' cpu : 0 ' ) : <nl> + return variable_scope . get_variable ( <nl> + ops . GraphKeys . GLOBAL_STEP , <nl> + shape = [ ] , <nl> + dtype = dtypes . int64 , <nl> + initializer = init_ops . zeros_initializer ( ) , <nl> + trainable = False , <nl> + collections = [ ops . GraphKeys . GLOBAL_VARIABLES , <nl> + ops . GraphKeys . GLOBAL_STEP ] ) <nl> return variable_scope . get_variable ( <nl> ops . GraphKeys . GLOBAL_STEP , <nl> shape = [ ] , <nl> dtype = dtypes . int64 , <nl> initializer = init_ops . zeros_initializer ( ) , <nl> trainable = False , <nl> - collections = [ ops . GraphKeys . GLOBAL_VARIABLES , ops . GraphKeys . GLOBAL_STEP ] ) <nl> + collections = [ ops . GraphKeys . GLOBAL_VARIABLES , <nl> + ops . GraphKeys . GLOBAL_STEP ] ) <nl> <nl> <nl> def get_or_create_global_step ( graph = None ) : <nl> mmm a / tensorflow / tools / api / golden / tensorflow . - optimizer - options . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . - optimizer - options . pbtxt <nl> tf_class { <nl> name : " Level " <nl> mtype : " < class \ ' google . protobuf . internal . enum_type_wrapper . EnumTypeWrapper \ ' > " <nl> } <nl> + member { <nl> + name : " MAX_FOLDED_CONSTANT_IN_BYTES_FIELD_NUMBER " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> member { <nl> name : " OFF " <nl> mtype : " < type \ ' int \ ' > " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - activation . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - activation . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - activity - regularization . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - activity - regularization . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - add . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - add . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - alpha - dropout . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - alpha - dropout . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - average - pooling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - average - pooling1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - average - pooling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - average - pooling2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - average - pooling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - average - pooling3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - average . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - average . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - avg - pool1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - avg - pool1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - avg - pool2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - avg - pool2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - avg - pool3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - avg - pool3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - batch - normalization . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - batch - normalization . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - bidirectional . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - bidirectional . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - concatenate . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - concatenate . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv - l - s - t - m2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv - l - s - t - m2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv2 - d - transpose . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv2 - d - transpose . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv3 - d - transpose . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv3 - d - transpose . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - conv3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution2 - d - transpose . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution2 - d - transpose . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution3 - d - transpose . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution3 - d - transpose . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - convolution3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - cropping1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - cropping1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - cropping2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - cropping2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - cropping3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - cropping3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - dense . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - dense . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - dot . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - dot . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - dropout . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - dropout . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - e - l - u . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - e - l - u . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - embedding . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - embedding . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - flatten . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - flatten . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - g - r - u . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - g - r - u . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - gaussian - dropout . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - gaussian - dropout . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - gaussian - noise . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - gaussian - noise . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - average - pooling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - average - pooling1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - average - pooling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - average - pooling2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - average - pooling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - average - pooling3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - avg - pool1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - avg - pool1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - avg - pool2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - avg - pool2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - avg - pool3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - avg - pool3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pool1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pool1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pool2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pool2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pool3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pool3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pooling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pooling1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pooling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pooling2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pooling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - global - max - pooling3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - input - layer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - input - layer . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - l - s - t - m . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - l - s - t - m . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - lambda . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - lambda . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - layer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - layer . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - leaky - re - l - u . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - leaky - re - l - u . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - locally - connected1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - locally - connected1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - locally - connected2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - locally - connected2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - masking . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - masking . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pool1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pool1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pool2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pool2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pool3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pool3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pooling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pooling1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pooling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pooling2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pooling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - max - pooling3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - maximum . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - maximum . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - multiply . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - multiply . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - p - re - l - u . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - p - re - l - u . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - permute . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - permute . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - repeat - vector . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - repeat - vector . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - reshape . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - reshape . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - separable - conv2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - separable - conv2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - separable - convolution2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - separable - convolution2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - simple - r - n - n . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - simple - r - n - n . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - spatial - dropout1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - spatial - dropout1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - spatial - dropout2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - spatial - dropout2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - spatial - dropout3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - spatial - dropout3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - thresholded - re - l - u . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - thresholded - re - l - u . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - time - distributed . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - time - distributed . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - up - sampling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - up - sampling1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - up - sampling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - up - sampling2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - up - sampling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - up - sampling3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - wrapper . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - wrapper . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - zero - padding1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - zero - padding1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - zero - padding2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - zero - padding2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . layers . - zero - padding3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . layers . - zero - padding3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . models . - model . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . models . - model . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . keras . models . - sequential . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . keras . models . - sequential . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_weight " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - average - pooling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - average - pooling1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - average - pooling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - average - pooling2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - average - pooling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - average - pooling3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - batch - normalization . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - batch - normalization . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - conv1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - conv1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - conv2 - d - transpose . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - conv2 - d - transpose . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - conv2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - conv2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - conv3 - d - transpose . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - conv3 - d - transpose . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - conv3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - conv3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - dense . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - dense . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - dropout . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - dropout . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - flatten . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - flatten . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - layer . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - layer . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - max - pooling1 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - max - pooling1 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - max - pooling2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - max - pooling2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - max - pooling3 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - max - pooling3 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . layers . - separable - conv2 - d . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . layers . - separable - conv2 - d . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - basic - l - s - t - m - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - basic - l - s - t - m - cell . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - basic - r - n - n - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - basic - r - n - n - cell . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - device - wrapper . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - device - wrapper . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - dropout - wrapper . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - dropout - wrapper . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - g - r - u - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - g - r - u - cell . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - l - s - t - m - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - l - s - t - m - cell . pbtxt <nl> <nl> path : " tensorflow . nn . rnn_cell . LSTMCell " <nl> tf_class { <nl> is_instance : " < class \ ' tensorflow . python . ops . rnn_cell_impl . LSTMCell \ ' > " <nl> + is_instance : " < class \ ' tensorflow . python . ops . rnn_cell_impl . _LayerRNNCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . ops . rnn_cell_impl . RNNCell \ ' > " <nl> is_instance : " < class \ ' tensorflow . python . layers . base . Layer \ ' > " <nl> is_instance : " < type \ ' object \ ' > " <nl> tf_class { <nl> } <nl> member_method { <nl> name : " __init__ " <nl> - argspec : " args = [ \ ' self \ ' , \ ' num_units \ ' , \ ' use_peepholes \ ' , \ ' cell_clip \ ' , \ ' initializer \ ' , \ ' num_proj \ ' , \ ' proj_clip \ ' , \ ' num_unit_shards \ ' , \ ' num_proj_shards \ ' , \ ' forget_bias \ ' , \ ' state_is_tuple \ ' , \ ' activation \ ' , \ ' reuse \ ' ] , varargs = None , keywords = None , defaults = [ \ ' False \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' 1 . 0 \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' num_units \ ' , \ ' use_peepholes \ ' , \ ' cell_clip \ ' , \ ' initializer \ ' , \ ' num_proj \ ' , \ ' proj_clip \ ' , \ ' num_unit_shards \ ' , \ ' num_proj_shards \ ' , \ ' forget_bias \ ' , \ ' state_is_tuple \ ' , \ ' activation \ ' , \ ' reuse \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ ' False \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' 1 . 0 \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " add_loss " <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> tf_class { <nl> } <nl> member_method { <nl> name : " build " <nl> - argspec : " args = [ \ ' self \ ' , \ ' _ \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + argspec : " args = [ \ ' self \ ' , \ ' inputs_shape \ ' ] , varargs = None , keywords = None , defaults = None " <nl> } <nl> member_method { <nl> name : " call " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - multi - r - n - n - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - multi - r - n - n - cell . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - r - n - n - cell . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - r - n - n - cell . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - residual - wrapper . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . nn . rnn_cell . - residual - wrapper . pbtxt <nl> tf_class { <nl> } <nl> member_method { <nl> name : " add_variable " <nl> - argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' self \ ' , \ ' name \ ' , \ ' shape \ ' , \ ' dtype \ ' , \ ' initializer \ ' , \ ' regularizer \ ' , \ ' trainable \ ' , \ ' constraint \ ' , \ ' partitioner \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' True \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " apply " <nl> mmm a / tensorflow / tools / gcs_test / python / gcs_smoke . py <nl> ppp b / tensorflow / tools / gcs_test / python / gcs_smoke . py <nl> <nl> <nl> FLAGS = flags . FLAGS <nl> <nl> + <nl> def create_examples ( num_examples , input_mean ) : <nl> " " " Create ExampleProto ' s containing data . " " " <nl> ids = np . arange ( num_examples ) . reshape ( [ num_examples , 1 ] ) <nl> def create_examples ( num_examples , input_mean ) : <nl> examples . append ( ex ) <nl> return examples <nl> <nl> + <nl> def create_dir_test ( ) : <nl> " " " Verifies file_io directory handling methods . " " " <nl> <nl> def create_dir_test ( ) : <nl> print ( " Deleted directory recursively % s in % s milliseconds " % ( <nl> dir_name , elapsed_ms ) ) <nl> <nl> + <nl> def create_object_test ( ) : <nl> " " " Verifies file_io ' s object manipulation methods . " " " <nl> starttime_ms = int ( round ( time . time ( ) * 1000 ) ) <nl> def create_object_test ( ) : <nl> print ( " Creating file % s . " % file_name ) <nl> file_io . write_string_to_file ( file_name , " test file creation . " ) <nl> elapsed_ms = int ( round ( time . time ( ) * 1000 ) ) - starttime_ms <nl> - print ( " Created % d files in % s milliseconds " % ( len ( files_to_create ) , elapsed_ms ) ) <nl> + print ( " Created % d files in % s milliseconds " % ( <nl> + len ( files_to_create ) , elapsed_ms ) ) <nl> <nl> # Listing files of pattern1 . <nl> list_files_pattern = " % s / test_file * . txt " % dir_name <nl> def create_object_test ( ) : <nl> file_io . delete_recursively ( dir_name ) <nl> <nl> <nl> - if __name__ = = " __main__ " : <nl> + def main ( argv ) : <nl> + del argv # Unused . <nl> + <nl> # Sanity check on the GCS bucket URL . <nl> if not FLAGS . gcs_bucket_url or not FLAGS . gcs_bucket_url . startswith ( " gs : / / " ) : <nl> print ( " ERROR : Invalid GCS bucket URL : \ " % s \ " " % FLAGS . gcs_bucket_url ) <nl> def create_object_test ( ) : <nl> # tf_record_iterator works . <nl> record_iter = tf . python_io . tf_record_iterator ( input_path ) <nl> read_count = 0 <nl> - for r in record_iter : <nl> + for _ in record_iter : <nl> read_count + = 1 <nl> print ( " Read % d records using tf_record_iterator " % read_count ) <nl> <nl> def create_object_test ( ) : <nl> <nl> # Verify that running the read op in a session works . <nl> print ( " \ n = = = Testing TFRecordReader . read op in a session . . . = = = " ) <nl> - with tf . Graph ( ) . as_default ( ) as g : <nl> + with tf . Graph ( ) . as_default ( ) : <nl> filename_queue = tf . train . string_input_producer ( [ input_path ] , num_epochs = 1 ) <nl> reader = tf . TFRecordReader ( ) <nl> _ , serialized_example = reader . read ( filename_queue ) <nl> def create_object_test ( ) : <nl> <nl> create_dir_test ( ) <nl> create_object_test ( ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + tf . app . run ( main ) <nl> mmm a / tensorflow / workspace . bzl <nl> ppp b / tensorflow / workspace . bzl <nl> def tf_workspace ( path_prefix = " " , tf_repo_name = " " ) : <nl> deps = [ " @ com_google_guava " ] , <nl> ) <nl> <nl> + java_import_external ( <nl> + name = " javax_validation " , <nl> + jar_sha256 = " e459f313ebc6db2483f8ceaad39af07086361b474fa92e40f442e8de5d9895dc " , <nl> + jar_urls = [ <nl> + " http : / / repo1 . maven . org / maven2 / javax / validation / validation - api / 1 . 0 . 0 . GA / validation - api - 1 . 0 . 0 . GA . jar " , <nl> + ] , <nl> + licenses = [ " notice " ] , # Apache 2 . 0 <nl> + ) <nl> + <nl> native . new_http_archive ( <nl> name = " com_google_pprof " , <nl> urls = [ <nl> similarity index 100 % <nl> rename from tensorflow / opensource_only / arm_neon_2_x86_sse . BUILD <nl> rename to third_party / arm_neon_2_x86_sse . BUILD <nl>
Merge pull request from benoitsteiner / branch_173716375
tensorflow/tensorflow
4be569ca656b6f46efb73bd3aaa87c6ad42dd37d
2017-10-27T22:23:00Z
mmm a / test / IRGen / pic . swift <nl> ppp b / test / IRGen / pic . swift <nl> public func use_global ( ) - > Int { <nl> / / armv7k : ldr [ [ R_ADR ] ] , { { \ [ } } [ [ R_ADR ] ] { { \ ] } } <nl> <nl> / / arm64 - LABEL : { { _ ? } } $ S4main10use_globalSiyF : <nl> - / / arm64 : bl _swift_beginAccess <nl> / / arm64 : adrp [ [ REG1 : x [ 0 - 9 ] + ] ] , _ $ S4main6globalSivp @ PAGE <nl> - / / arm64 : add [ [ REG2 : x [ 0 - 9 ] + ] ] , [ [ REG1 ] ] , _ $ S4main6globalSivp @ PAGEOFF <nl> - / / arm64 : ldr { { x [ 0 - 9 ] + } } , { { \ [ } } [ [ REG2 ] ] { { \ ] } } <nl> + / / arm64 : add [ [ REG1 ] ] , [ [ REG1 ] ] , _ $ S4main6globalSivp @ PAGEOFF <nl> + / / This is a spill around beginAccess that is not strictly necessary . <nl> + / / arm64 : str [ [ REG1 ] ] , { { \ [ } } sp , # 16 { { \ ] } } <nl> + / / arm64 - NEXT : str <nl> + / / arm64 - NEXT : bl _swift_beginAccess <nl> + / / arm64 - NEXT : ldr [ [ REG2 : x [ 0 - 9 ] + ] ] , { { \ [ } } sp , # 16 { { \ ] } } <nl> + / / arm64 - NEXT : ldr { { x [ 0 - 9 ] + } } , { { \ [ } } [ [ REG2 ] ] { { \ ] } } <nl>
Merge pull request from gottesmm / master - next - rdar42837829
apple/swift
e425bd49793cabc31778b28aa04aef533e1b7832
2018-08-03T01:38:54Z
mmm a / doc / REST - interface . md <nl> ppp b / doc / REST - interface . md <nl> Only supports JSON as output format . <nl> * headers : ( numeric ) the current number of headers we have validated <nl> * bestblockhash : ( string ) the hash of the currently best block <nl> * difficulty : ( numeric ) the current difficulty <nl> + * mediantime : ( numeric ) the median time of the 11 blocks before the most recent block on the blockchain <nl> * verificationprogress : ( numeric ) estimate of verification progress [ 0 . . 1 ] <nl> * chainwork : ( string ) total amount of work in active chain , in hexadecimal <nl> * pruned : ( boolean ) if the blocks are subject to pruning <nl> * pruneheight : ( numeric ) heighest block available <nl> * softforks : ( array ) status of softforks in progress <nl> + * bip9_softforks : ( object ) status of BIP9 softforks in progress <nl> <nl> # # # # Query UTXO set <nl> ` GET / rest / getutxos / < checkmempool > / < txid > - < n > / < txid > - < n > / . . . / < txid > - < n > . < bin | hex | json > ` <nl> Example : <nl> ` ` ` <nl> $ curl localhost : 18332 / rest / getutxos / checkmempool / b2cdfd7b89def827ff8af7cd9bff7627ff72e5e8b0f71210f92ea7a4000c5d75 - 0 . json 2 > / dev / null | json_pp <nl> { <nl> - " chaintipHash " : " 00000000fb01a7f3745a717f8caebee056c484e6e0bfe4a9591c235bb70506fb " , <nl> " chainHeight " : 325347 , <nl> + " chaintipHash " : " 00000000fb01a7f3745a717f8caebee056c484e6e0bfe4a9591c235bb70506fb " , <nl> + " bitmap " : " 1 " , <nl> " utxos " : [ <nl> { <nl> + " txvers " : 1 <nl> + " height " : 2147483647 , <nl> + " value " : 8 . 8687 , <nl> " scriptPubKey " : { <nl> - " addresses " : [ <nl> - " mi7as51dvLJsizWnTMurtRmrP8hG2m1XvD " <nl> - ] , <nl> - " type " : " pubkeyhash " , <nl> + " asm " : " OP_DUP OP_HASH160 1c7cebb529b86a04c683dfa87be49de35bcf589e OP_EQUALVERIFY OP_CHECKSIG " , <nl> " hex " : " 76a9141c7cebb529b86a04c683dfa87be49de35bcf589e88ac " , <nl> " reqSigs " : 1 , <nl> - " asm " : " OP_DUP OP_HASH160 1c7cebb529b86a04c683dfa87be49de35bcf589e OP_EQUALVERIFY OP_CHECKSIG " <nl> - } , <nl> - " value " : 8 . 8687 , <nl> - " height " : 2147483647 , <nl> - " txvers " : 1 <nl> + " type " : " pubkeyhash " , <nl> + " addresses " : [ <nl> + " mi7as51dvLJsizWnTMurtRmrP8hG2m1XvD " <nl> + ] <nl> + } <nl> } <nl> - ] , <nl> - " bitmap " : " 1 " <nl> + ] <nl> } <nl> ` ` ` <nl> <nl> Only supports JSON as output format . <nl> * size : ( numeric ) the number of transactions in the TX mempool <nl> * bytes : ( numeric ) size of the TX mempool in bytes <nl> * usage : ( numeric ) total TX mempool memory usage <nl> + * maxmempool : ( numeric ) maximum memory usage for the mempool in bytes <nl> + * mempoolminfee : ( numeric ) minimum feerate ( BTC per KB ) for tx to be accepted <nl> <nl> ` GET / rest / mempool / contents . json ` <nl> <nl> mmm a / src / rpc / net . cpp <nl> ppp b / src / rpc / net . cpp <nl> UniValue getaddednodeinfo ( const JSONRPCRequest & request ) <nl> " , . . . \ n " <nl> " ] \ n " <nl> " \ nExamples : \ n " <nl> - + HelpExampleCli ( " getaddednodeinfo " , " true " ) <nl> - + HelpExampleCli ( " getaddednodeinfo " , " true \ " 192 . 168 . 0 . 201 \ " " ) <nl> - + HelpExampleRpc ( " getaddednodeinfo " , " true , \ " 192 . 168 . 0 . 201 \ " " ) <nl> + + HelpExampleCli ( " getaddednodeinfo " , " \ " 192 . 168 . 0 . 201 \ " " ) <nl> + + HelpExampleRpc ( " getaddednodeinfo " , " \ " 192 . 168 . 0 . 201 \ " " ) <nl> ) ; <nl> <nl> if ( ! g_connman ) <nl>
REST / RPC example update
bitcoin/bitcoin
b8bb4257fe7b2f077e1c8b4145ff13d1041787e8
2017-06-30T11:21:08Z
mmm a / modules / imgproc / src / color . cpp <nl> ppp b / modules / imgproc / src / color . cpp <nl> struct HSV2RGB_f <nl> typedef float channel_type ; <nl> <nl> HSV2RGB_f ( int _dstcn , int _blueIdx , float _hrange ) <nl> - : dstcn ( _dstcn ) , blueIdx ( _blueIdx ) , hscale ( 6 . f / _hrange ) { } <nl> + : dstcn ( _dstcn ) , blueIdx ( _blueIdx ) , hscale ( 6 . f / _hrange ) { <nl> + # if CV_SSE2 <nl> + haveSIMD = checkHardwareSupport ( CV_CPU_SSE2 ) ; <nl> + # endif <nl> + } <nl> + <nl> + # if CV_SSE2 <nl> + void process ( __m128 & v_h0 , __m128 & v_h1 , __m128 & v_s0 , <nl> + __m128 & v_s1 , __m128 & v_v0 , __m128 & v_v1 ) const <nl> + { <nl> + v_h0 = _mm_mul_ps ( v_h0 , _mm_set1_ps ( hscale ) ) ; <nl> + v_h1 = _mm_mul_ps ( v_h1 , _mm_set1_ps ( hscale ) ) ; <nl> + <nl> + __m128 v_pre_sector0 = _mm_cvtepi32_ps ( _mm_cvttps_epi32 ( v_h0 ) ) ; <nl> + __m128 v_pre_sector1 = _mm_cvtepi32_ps ( _mm_cvttps_epi32 ( v_h1 ) ) ; <nl> + <nl> + v_h0 = _mm_sub_ps ( v_h0 , v_pre_sector0 ) ; <nl> + v_h1 = _mm_sub_ps ( v_h1 , v_pre_sector1 ) ; <nl> + <nl> + __m128 v_tab00 = v_v0 ; <nl> + __m128 v_tab01 = v_v1 ; <nl> + __m128 v_tab10 = _mm_mul_ps ( v_v0 , _mm_sub_ps ( _mm_set1_ps ( 1 . 0f ) , v_s0 ) ) ; <nl> + __m128 v_tab11 = _mm_mul_ps ( v_v1 , _mm_sub_ps ( _mm_set1_ps ( 1 . 0f ) , v_s1 ) ) ; <nl> + __m128 v_tab20 = _mm_mul_ps ( v_v0 , _mm_sub_ps ( _mm_set1_ps ( 1 . 0f ) , _mm_mul_ps ( v_s0 , v_h0 ) ) ) ; <nl> + __m128 v_tab21 = _mm_mul_ps ( v_v1 , _mm_sub_ps ( _mm_set1_ps ( 1 . 0f ) , _mm_mul_ps ( v_s1 , v_h1 ) ) ) ; <nl> + __m128 v_tab30 = _mm_mul_ps ( v_v0 , _mm_sub_ps ( _mm_set1_ps ( 1 . 0f ) , _mm_mul_ps ( v_s0 , _mm_sub_ps ( _mm_set1_ps ( 1 . 0f ) , v_h0 ) ) ) ) ; <nl> + __m128 v_tab31 = _mm_mul_ps ( v_v1 , _mm_sub_ps ( _mm_set1_ps ( 1 . 0f ) , _mm_mul_ps ( v_s1 , _mm_sub_ps ( _mm_set1_ps ( 1 . 0f ) , v_h1 ) ) ) ) ; <nl> + <nl> + __m128 v_sector0 = _mm_div_ps ( v_pre_sector0 , _mm_set1_ps ( 6 . 0f ) ) ; <nl> + __m128 v_sector1 = _mm_div_ps ( v_pre_sector1 , _mm_set1_ps ( 6 . 0f ) ) ; <nl> + v_sector0 = _mm_cvtepi32_ps ( _mm_cvttps_epi32 ( v_sector0 ) ) ; <nl> + v_sector1 = _mm_cvtepi32_ps ( _mm_cvttps_epi32 ( v_sector1 ) ) ; <nl> + v_sector0 = _mm_mul_ps ( v_sector0 , _mm_set1_ps ( 6 . 0f ) ) ; <nl> + v_sector1 = _mm_mul_ps ( v_sector1 , _mm_set1_ps ( 6 . 0f ) ) ; <nl> + v_sector0 = _mm_sub_ps ( v_pre_sector0 , v_sector0 ) ; <nl> + v_sector1 = _mm_sub_ps ( v_pre_sector1 , v_sector1 ) ; <nl> + <nl> + v_h0 = _mm_and_ps ( v_tab10 , _mm_cmplt_ps ( v_sector0 , _mm_set1_ps ( 2 . 0f ) ) ) ; <nl> + v_h1 = _mm_and_ps ( v_tab11 , _mm_cmplt_ps ( v_sector1 , _mm_set1_ps ( 2 . 0f ) ) ) ; <nl> + v_h0 = _mm_or_ps ( v_h0 , _mm_and_ps ( v_tab30 , _mm_cmpeq_ps ( v_sector0 , _mm_set1_ps ( 2 . 0f ) ) ) ) ; <nl> + v_h1 = _mm_or_ps ( v_h1 , _mm_and_ps ( v_tab31 , _mm_cmpeq_ps ( v_sector1 , _mm_set1_ps ( 2 . 0f ) ) ) ) ; <nl> + v_h0 = _mm_or_ps ( v_h0 , _mm_and_ps ( v_tab00 , _mm_cmpeq_ps ( v_sector0 , _mm_set1_ps ( 3 . 0f ) ) ) ) ; <nl> + v_h1 = _mm_or_ps ( v_h1 , _mm_and_ps ( v_tab01 , _mm_cmpeq_ps ( v_sector1 , _mm_set1_ps ( 3 . 0f ) ) ) ) ; <nl> + v_h0 = _mm_or_ps ( v_h0 , _mm_and_ps ( v_tab00 , _mm_cmpeq_ps ( v_sector0 , _mm_set1_ps ( 4 . 0f ) ) ) ) ; <nl> + v_h1 = _mm_or_ps ( v_h1 , _mm_and_ps ( v_tab01 , _mm_cmpeq_ps ( v_sector1 , _mm_set1_ps ( 4 . 0f ) ) ) ) ; <nl> + v_h0 = _mm_or_ps ( v_h0 , _mm_and_ps ( v_tab20 , _mm_cmpgt_ps ( v_sector0 , _mm_set1_ps ( 4 . 0f ) ) ) ) ; <nl> + v_h1 = _mm_or_ps ( v_h1 , _mm_and_ps ( v_tab21 , _mm_cmpgt_ps ( v_sector1 , _mm_set1_ps ( 4 . 0f ) ) ) ) ; <nl> + v_s0 = _mm_and_ps ( v_tab30 , _mm_cmplt_ps ( v_sector0 , _mm_set1_ps ( 1 . 0f ) ) ) ; <nl> + v_s1 = _mm_and_ps ( v_tab31 , _mm_cmplt_ps ( v_sector1 , _mm_set1_ps ( 1 . 0f ) ) ) ; <nl> + v_s0 = _mm_or_ps ( v_s0 , _mm_and_ps ( v_tab00 , _mm_cmpeq_ps ( v_sector0 , _mm_set1_ps ( 1 . 0f ) ) ) ) ; <nl> + v_s1 = _mm_or_ps ( v_s1 , _mm_and_ps ( v_tab01 , _mm_cmpeq_ps ( v_sector1 , _mm_set1_ps ( 1 . 0f ) ) ) ) ; <nl> + v_s0 = _mm_or_ps ( v_s0 , _mm_and_ps ( v_tab00 , _mm_cmpeq_ps ( v_sector0 , _mm_set1_ps ( 2 . 0f ) ) ) ) ; <nl> + v_s1 = _mm_or_ps ( v_s1 , _mm_and_ps ( v_tab01 , _mm_cmpeq_ps ( v_sector1 , _mm_set1_ps ( 2 . 0f ) ) ) ) ; <nl> + v_s0 = _mm_or_ps ( v_s0 , _mm_and_ps ( v_tab20 , _mm_cmpeq_ps ( v_sector0 , _mm_set1_ps ( 3 . 0f ) ) ) ) ; <nl> + v_s1 = _mm_or_ps ( v_s1 , _mm_and_ps ( v_tab21 , _mm_cmpeq_ps ( v_sector1 , _mm_set1_ps ( 3 . 0f ) ) ) ) ; <nl> + v_s0 = _mm_or_ps ( v_s0 , _mm_and_ps ( v_tab10 , _mm_cmpgt_ps ( v_sector0 , _mm_set1_ps ( 3 . 0f ) ) ) ) ; <nl> + v_s1 = _mm_or_ps ( v_s1 , _mm_and_ps ( v_tab11 , _mm_cmpgt_ps ( v_sector1 , _mm_set1_ps ( 3 . 0f ) ) ) ) ; <nl> + v_v0 = _mm_and_ps ( v_tab00 , _mm_cmplt_ps ( v_sector0 , _mm_set1_ps ( 1 . 0f ) ) ) ; <nl> + v_v1 = _mm_and_ps ( v_tab01 , _mm_cmplt_ps ( v_sector1 , _mm_set1_ps ( 1 . 0f ) ) ) ; <nl> + v_v0 = _mm_or_ps ( v_v0 , _mm_and_ps ( v_tab20 , _mm_cmpeq_ps ( v_sector0 , _mm_set1_ps ( 1 . 0f ) ) ) ) ; <nl> + v_v1 = _mm_or_ps ( v_v1 , _mm_and_ps ( v_tab21 , _mm_cmpeq_ps ( v_sector1 , _mm_set1_ps ( 1 . 0f ) ) ) ) ; <nl> + v_v0 = _mm_or_ps ( v_v0 , _mm_and_ps ( v_tab10 , _mm_cmpeq_ps ( v_sector0 , _mm_set1_ps ( 2 . 0f ) ) ) ) ; <nl> + v_v1 = _mm_or_ps ( v_v1 , _mm_and_ps ( v_tab11 , _mm_cmpeq_ps ( v_sector1 , _mm_set1_ps ( 2 . 0f ) ) ) ) ; <nl> + v_v0 = _mm_or_ps ( v_v0 , _mm_and_ps ( v_tab10 , _mm_cmpeq_ps ( v_sector0 , _mm_set1_ps ( 3 . 0f ) ) ) ) ; <nl> + v_v1 = _mm_or_ps ( v_v1 , _mm_and_ps ( v_tab11 , _mm_cmpeq_ps ( v_sector1 , _mm_set1_ps ( 3 . 0f ) ) ) ) ; <nl> + v_v0 = _mm_or_ps ( v_v0 , _mm_and_ps ( v_tab30 , _mm_cmpeq_ps ( v_sector0 , _mm_set1_ps ( 4 . 0f ) ) ) ) ; <nl> + v_v1 = _mm_or_ps ( v_v1 , _mm_and_ps ( v_tab31 , _mm_cmpeq_ps ( v_sector1 , _mm_set1_ps ( 4 . 0f ) ) ) ) ; <nl> + v_v0 = _mm_or_ps ( v_v0 , _mm_and_ps ( v_tab00 , _mm_cmpgt_ps ( v_sector0 , _mm_set1_ps ( 4 . 0f ) ) ) ) ; <nl> + v_v1 = _mm_or_ps ( v_v1 , _mm_and_ps ( v_tab01 , _mm_cmpgt_ps ( v_sector1 , _mm_set1_ps ( 4 . 0f ) ) ) ) ; <nl> + } <nl> + # endif <nl> <nl> void operator ( ) ( const float * src , float * dst , int n ) const <nl> { <nl> - int i , bidx = blueIdx , dcn = dstcn ; <nl> + int i = 0 , bidx = blueIdx , dcn = dstcn ; <nl> float _hscale = hscale ; <nl> float alpha = ColorChannel < float > : : max ( ) ; <nl> n * = 3 ; <nl> <nl> - for ( i = 0 ; i < n ; i + = 3 , dst + = dcn ) <nl> + # if CV_SSE2 <nl> + if ( haveSIMD ) <nl> + { <nl> + for ( ; i < = n - 24 ; i + = 24 , dst + = dcn * 8 ) <nl> + { <nl> + __m128 v_h0 = _mm_loadu_ps ( src + i + 0 ) ; <nl> + __m128 v_h1 = _mm_loadu_ps ( src + i + 4 ) ; <nl> + __m128 v_s0 = _mm_loadu_ps ( src + i + 8 ) ; <nl> + __m128 v_s1 = _mm_loadu_ps ( src + i + 12 ) ; <nl> + __m128 v_v0 = _mm_loadu_ps ( src + i + 16 ) ; <nl> + __m128 v_v1 = _mm_loadu_ps ( src + i + 20 ) ; <nl> + <nl> + _mm_deinterleave_ps ( v_h0 , v_h1 , v_s0 , v_s1 , v_v0 , v_v1 ) ; <nl> + <nl> + process ( v_h0 , v_h1 , v_s0 , v_s1 , v_v0 , v_v1 ) ; <nl> + <nl> + if ( dcn = = 3 ) <nl> + { <nl> + if ( bidx ) <nl> + { <nl> + _mm_interleave_ps ( v_v0 , v_v1 , v_s0 , v_s1 , v_h0 , v_h1 ) ; <nl> + <nl> + _mm_storeu_ps ( dst + 0 , v_v0 ) ; <nl> + _mm_storeu_ps ( dst + 4 , v_v1 ) ; <nl> + _mm_storeu_ps ( dst + 8 , v_s0 ) ; <nl> + _mm_storeu_ps ( dst + 12 , v_s1 ) ; <nl> + _mm_storeu_ps ( dst + 16 , v_h0 ) ; <nl> + _mm_storeu_ps ( dst + 20 , v_h1 ) ; <nl> + } <nl> + else <nl> + { <nl> + _mm_interleave_ps ( v_h0 , v_h1 , v_s0 , v_s1 , v_v0 , v_v1 ) ; <nl> + <nl> + _mm_storeu_ps ( dst + 0 , v_h0 ) ; <nl> + _mm_storeu_ps ( dst + 4 , v_h1 ) ; <nl> + _mm_storeu_ps ( dst + 8 , v_s0 ) ; <nl> + _mm_storeu_ps ( dst + 12 , v_s1 ) ; <nl> + _mm_storeu_ps ( dst + 16 , v_v0 ) ; <nl> + _mm_storeu_ps ( dst + 20 , v_v1 ) ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + __m128 v_a0 = _mm_set1_ps ( alpha ) ; <nl> + __m128 v_a1 = _mm_set1_ps ( alpha ) ; <nl> + if ( bidx ) <nl> + { <nl> + _mm_interleave_ps ( v_v0 , v_v1 , v_s0 , v_s1 , v_h0 , v_h1 , v_a0 , v_a1 ) ; <nl> + <nl> + _mm_storeu_ps ( dst + 0 , v_v0 ) ; <nl> + _mm_storeu_ps ( dst + 4 , v_v1 ) ; <nl> + _mm_storeu_ps ( dst + 8 , v_s0 ) ; <nl> + _mm_storeu_ps ( dst + 12 , v_s1 ) ; <nl> + _mm_storeu_ps ( dst + 16 , v_h0 ) ; <nl> + _mm_storeu_ps ( dst + 20 , v_h1 ) ; <nl> + _mm_storeu_ps ( dst + 24 , v_a0 ) ; <nl> + _mm_storeu_ps ( dst + 28 , v_a1 ) ; <nl> + } <nl> + else <nl> + { <nl> + _mm_interleave_ps ( v_h0 , v_h1 , v_s0 , v_s1 , v_v0 , v_v1 , v_a0 , v_a1 ) ; <nl> + <nl> + _mm_storeu_ps ( dst + 0 , v_h0 ) ; <nl> + _mm_storeu_ps ( dst + 4 , v_h1 ) ; <nl> + _mm_storeu_ps ( dst + 8 , v_s0 ) ; <nl> + _mm_storeu_ps ( dst + 12 , v_s1 ) ; <nl> + _mm_storeu_ps ( dst + 16 , v_v0 ) ; <nl> + _mm_storeu_ps ( dst + 20 , v_v1 ) ; <nl> + _mm_storeu_ps ( dst + 24 , v_a0 ) ; <nl> + _mm_storeu_ps ( dst + 28 , v_a1 ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + # endif <nl> + for ( ; i < n ; i + = 3 , dst + = dcn ) <nl> { <nl> float h = src [ i ] , s = src [ i + 1 ] , v = src [ i + 2 ] ; <nl> float b , g , r ; <nl> struct HSV2RGB_f <nl> <nl> int dstcn , blueIdx ; <nl> float hscale ; <nl> + # if CV_SSE2 <nl> + bool haveSIMD ; <nl> + # endif <nl> } ; <nl> <nl> <nl>
Merge pull request from K - Shinotsuka : issue34
opencv/opencv
c1ee798213a576a1170ca71ec4fcbd8ad553b1bc
2016-11-22T13:57:13Z
mmm a / modules / dreamview / frontend / src / renderer / tileground . js <nl> ppp b / modules / dreamview / frontend / src / renderer / tileground . js <nl> export default class TileGround { <nl> mpp : metadata . mpp , <nl> tile : metadata . tile , <nl> imageUrl : metadata . image_url , <nl> + availableMapTiles : new Set ( metadata . availableImages ) , <nl> } ; <nl> <nl> this . mapId = metadata . mapid ; <nl> export default class TileGround { <nl> } <nl> <nl> appendTiles ( row , col , key , coordinates , scene ) { <nl> + const imageName = ` $ { this . metadata . mpp } _ $ { row } _ $ { col } _ $ { this . metadata . tile } . png ` ; <nl> + if ( ! this . metadata . availableMapTiles . has ( imageName ) ) { <nl> + return ; <nl> + } <nl> + <nl> const mapUrl = this . metadata . imageUrl <nl> - ? ` $ { this . mapUrlPrefix } / $ { this . metadata . mpp } _ $ { row } _ $ { col } _ $ { this . metadata . tile } . png ` <nl> + ? ` $ { this . mapUrlPrefix } / $ { imageName } ` <nl> : ` $ { this . mapUrlPrefix } ? mapId = $ { this . mapId } & i = $ { row } & j = $ { col } ` ; <nl> <nl> const position = coordinates . applyOffset ( { <nl>
Dreamview : update offlineview fetching image map
ApolloAuto/apollo
4c20148173baa9979235ee8572f60ca43e53d60f
2019-05-24T22:35:12Z
mmm a / tensorflow / lite / profiling / profile_buffer . h <nl> ppp b / tensorflow / lite / profiling / profile_buffer . h <nl> class ProfileBuffer { <nl> / / Returns the profile event at the given index . If the index is invalid a <nl> / / nullptr is returned . The return event may get overwritten if more events <nl> / / are added to buffer . <nl> - const struct ProfileEvent * const At ( int index ) const { <nl> + const struct ProfileEvent * const At ( size_t index ) const { <nl> size_t size = Size ( ) ; <nl> if ( index > = size ) { <nl> return nullptr ; <nl>
Updated profile_buffer . h
tensorflow/tensorflow
6f01d30a9585663b35394749c644502da48ec6ef
2019-02-24T09:45:50Z
mmm a / js / common / tests / shell - foxx . js <nl> ppp b / js / common / tests / shell - foxx . js <nl> function DocumentationAndConstraintsSpec ( ) { <nl> assertEqual ( routes [ 0 ] . docs . parameters [ 1 ] . name , " idParam " ) ; <nl> assertEqual ( routes [ 0 ] . docs . parameters [ 1 ] . description , " Id of the Foxx " ) ; <nl> assertEqual ( routes [ 0 ] . docs . parameters [ 1 ] . dataType , " int " ) ; <nl> - <nl> } , <nl> <nl> testDefineQueryParam : function ( ) { <nl> function DocumentationAndConstraintsSpec ( ) { <nl> assertEqual ( app . models [ jsonSchema . id ] , jsonSchema ) ; <nl> } , <nl> <nl> + testSetParamForBodyParam : function ( ) { <nl> + var req = { parameters : { } } , <nl> + res = { } , <nl> + paramName = stub ( ) , <nl> + description = stub ( ) , <nl> + requestBody = stub ( ) , <nl> + ModelPrototype = stub ( ) , <nl> + jsonSchemaId = stub ( ) , <nl> + called = false ; <nl> + <nl> + allow ( req ) <nl> + . toReceive ( " body " ) <nl> + . andReturn ( requestBody ) ; <nl> + <nl> + ModelPrototype = mockConstructor ( requestBody ) ; <nl> + ModelPrototype . toJSONSchema = function ( ) { return { id : jsonSchemaId } ; } ; <nl> + <nl> + app . get ( ' / foxx ' , function ( providedReq ) { <nl> + called = ( providedReq . parameters [ paramName ] instanceof ModelPrototype ) ; <nl> + } ) . bodyParam ( paramName , description , ModelPrototype ) ; <nl> + <nl> + routes [ 0 ] . action . callback ( req , res ) ; <nl> + <nl> + assertTrue ( called ) ; <nl> + ModelPrototype . assertIsSatisfied ( ) ; <nl> + } , <nl> + <nl> testDocumentationForErrorResponse : function ( ) { <nl> var CustomErrorClass = function ( ) { } ; <nl> <nl> mmm a / js / server / modules / org / arangodb / foxx / request_context . js <nl> ppp b / js / server / modules / org / arangodb / foxx / request_context . js <nl> var RequestContext , <nl> is = require ( " org / arangodb / is " ) , <nl> UnauthorizedError = require ( " org / arangodb / foxx / authentication " ) . UnauthorizedError , <nl> createErrorBubbleWrap , <nl> + createBodyParamBubbleWrap , <nl> addCheck ; <nl> <nl> + createBodyParamBubbleWrap = function ( handler , paramName , Proto ) { <nl> + return function ( req , res ) { <nl> + req . parameters [ paramName ] = new Proto ( req . body ( ) ) ; <nl> + handler ( req , res ) ; <nl> + } ; <nl> + } ; <nl> + <nl> createErrorBubbleWrap = function ( handler , errorClass , code , reason , errorHandler ) { <nl> ' use strict ' ; <nl> if ( is . notExisty ( errorHandler ) ) { <nl> extend ( RequestContext . prototype , { <nl> / / / Expect the body of the request to be a JSON with the attributes you annotated <nl> / / / in your model . It will appear alongside the provided description in your <nl> / / / Documentation . <nl> + / / / This will initialize a ` Model ` with the data and provide it to you via the <nl> + / / / params as ` paramName ` . <nl> / / / For information about how to annotate your models , see the Model section . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> bodyParam : function ( paramName , description , Proto ) { <nl> ' use strict ' ; <nl> + var handler = this . route . action . callback ; <nl> + <nl> this . docs . addBodyParam ( paramName , description , Proto . toJSONSchema ( paramName ) ) ; <nl> + this . route . action . callback = createBodyParamBubbleWrap ( handler , paramName , Proto ) ; <nl> + <nl> return this ; <nl> } , <nl> <nl>
Foxx : Provide the Model
arangodb/arangodb
efc5ee16dd9f132ea4fa0c32abe011177a5c01d9
2013-09-18T13:32:41Z
mmm a / lib / SILPasses / DefiniteInitialization . cpp <nl> ppp b / lib / SILPasses / DefiniteInitialization . cpp <nl> bool ElementPromotion : : promoteLoad ( SILInstruction * Inst ) { <nl> / / If there are no values available at this load point , then we fail to <nl> / / promote this load and there is nothing to do . <nl> bool AnyAvailable = false ; <nl> - for ( unsigned i = 0 , e = AvailableValues . size ( ) ; i ! = e ; + + i ) <nl> + for ( unsigned i = FirstElt , e = i + NumLoadSubElements ; i ! = e ; + + i ) <nl> if ( AvailableValues [ i ] . first . isValid ( ) ) { <nl> AnyAvailable = true ; <nl> break ; <nl>
Only check elements that we care about . This is only a cleanup , no
apple/swift
b8e7ca29964bee5886ef85da47d8f49c373044c4
2013-11-21T07:28:54Z
mmm a / test / cpp / end2end / thread_stress_test . cc <nl> ppp b / test / cpp / end2end / thread_stress_test . cc <nl> <nl> # include < grpc / support / time . h > <nl> # include < gtest / gtest . h > <nl> <nl> + # include " src / core / surface / api_trace . h " <nl> # include " src / proto / grpc / testing / duplicate / echo_duplicate . grpc . pb . h " <nl> # include " src / proto / grpc / testing / echo . grpc . pb . h " <nl> # include " test / core / util / port . h " <nl> using grpc : : testing : : EchoRequest ; <nl> using grpc : : testing : : EchoResponse ; <nl> using std : : chrono : : system_clock ; <nl> <nl> + const int kNumThreads = 100 ; / / Number of threads <nl> + const int kNumRpcs = 1000 ; / / Number of RPCs per thread <nl> + <nl> namespace grpc { <nl> namespace testing { <nl> <nl> class TestServiceImpl : public : : grpc : : testing : : EchoTestService : : Service { <nl> MaybeEchoDeadline ( context , request , response ) ; <nl> if ( request - > has_param ( ) & & request - > param ( ) . client_cancel_after_us ( ) ) { <nl> { <nl> - std : : unique_lock < std : : mutex > lock ( mu_ ) ; <nl> + unique_lock < mutex > lock ( mu_ ) ; <nl> signal_client_ = true ; <nl> } <nl> while ( ! context - > IsCancelled ( ) ) { <nl> class TestServiceImpl : public : : grpc : : testing : : EchoTestService : : Service { <nl> } <nl> <nl> bool signal_client ( ) { <nl> - std : : unique_lock < std : : mutex > lock ( mu_ ) ; <nl> + unique_lock < mutex > lock ( mu_ ) ; <nl> return signal_client_ ; <nl> } <nl> <nl> private : <nl> bool signal_client_ ; <nl> - std : : mutex mu_ ; <nl> + mutex mu_ ; <nl> } ; <nl> <nl> class TestServiceImplDupPkg <nl> class TestServiceImplDupPkg <nl> } <nl> } ; <nl> <nl> - class End2endTest : public : : testing : : Test { <nl> - protected : <nl> - End2endTest ( ) : kMaxMessageSize_ ( 8192 ) { } <nl> - <nl> - void SetUp ( ) GRPC_OVERRIDE { <nl> + class CommonStressTest { <nl> + public : <nl> + CommonStressTest ( ) : kMaxMessageSize_ ( 8192 ) { } <nl> + void SetUp ( ) { <nl> int port = grpc_pick_unused_port_or_die ( ) ; <nl> server_address_ < < " localhost : " < < port ; <nl> / / Setup server <nl> class End2endTest : public : : testing : : Test { <nl> builder . RegisterService ( & dup_pkg_service_ ) ; <nl> server_ = builder . BuildAndStart ( ) ; <nl> } <nl> - <nl> - void TearDown ( ) GRPC_OVERRIDE { server_ - > Shutdown ( ) ; } <nl> - <nl> + void TearDown ( ) { server_ - > Shutdown ( ) ; } <nl> void ResetStub ( ) { <nl> std : : shared_ptr < Channel > channel = <nl> CreateChannel ( server_address_ . str ( ) , InsecureChannelCredentials ( ) ) ; <nl> stub_ = grpc : : testing : : EchoTestService : : NewStub ( channel ) ; <nl> } <nl> + grpc : : testing : : EchoTestService : : Stub * GetStub ( ) { return stub_ . get ( ) ; } <nl> <nl> + private : <nl> std : : unique_ptr < grpc : : testing : : EchoTestService : : Stub > stub_ ; <nl> std : : unique_ptr < Server > server_ ; <nl> std : : ostringstream server_address_ ; <nl> class End2endTest : public : : testing : : Test { <nl> TestServiceImplDupPkg dup_pkg_service_ ; <nl> } ; <nl> <nl> + class End2endTest : public : : testing : : Test { <nl> + protected : <nl> + End2endTest ( ) { } <nl> + void SetUp ( ) GRPC_OVERRIDE { common_ . SetUp ( ) ; } <nl> + void TearDown ( ) GRPC_OVERRIDE { common_ . TearDown ( ) ; } <nl> + void ResetStub ( ) { common_ . ResetStub ( ) ; } <nl> + <nl> + CommonStressTest common_ ; <nl> + } ; <nl> + <nl> static void SendRpc ( grpc : : testing : : EchoTestService : : Stub * stub , int num_rpcs ) { <nl> EchoRequest request ; <nl> EchoResponse response ; <nl> static void SendRpc ( grpc : : testing : : EchoTestService : : Stub * stub , int num_rpcs ) { <nl> } <nl> <nl> TEST_F ( End2endTest , ThreadStress ) { <nl> - ResetStub ( ) ; <nl> + common_ . ResetStub ( ) ; <nl> std : : vector < std : : thread * > threads ; <nl> - for ( int i = 0 ; i < 100 ; + + i ) { <nl> - threads . push_back ( new std : : thread ( SendRpc , stub_ . get ( ) , 1000 ) ) ; <nl> + for ( int i = 0 ; i < kNumThreads ; + + i ) { <nl> + threads . push_back ( new std : : thread ( SendRpc , common_ . GetStub ( ) , kNumRpcs ) ) ; <nl> } <nl> - for ( int i = 0 ; i < 100 ; + + i ) { <nl> + for ( int i = 0 ; i < kNumThreads ; + + i ) { <nl> threads [ i ] - > join ( ) ; <nl> delete threads [ i ] ; <nl> } <nl> } <nl> <nl> + class AsyncClientEnd2endTest : public : : testing : : Test { <nl> + protected : <nl> + AsyncClientEnd2endTest ( ) : rpcs_outstanding_ ( 0 ) { } <nl> + <nl> + void SetUp ( ) GRPC_OVERRIDE { common_ . SetUp ( ) ; } <nl> + void TearDown ( ) GRPC_OVERRIDE { <nl> + void * ignored_tag ; <nl> + bool ignored_ok ; <nl> + while ( cq_ . Next ( & ignored_tag , & ignored_ok ) ) <nl> + ; <nl> + common_ . TearDown ( ) ; <nl> + } <nl> + <nl> + void Wait ( ) { <nl> + unique_lock < mutex > l ( mu_ ) ; <nl> + while ( rpcs_outstanding_ ! = 0 ) { <nl> + cv_ . wait ( l ) ; <nl> + } <nl> + <nl> + cq_ . Shutdown ( ) ; <nl> + } <nl> + <nl> + struct AsyncClientCall { <nl> + EchoResponse response ; <nl> + ClientContext context ; <nl> + Status status ; <nl> + std : : unique_ptr < ClientAsyncResponseReader < EchoResponse > > response_reader ; <nl> + } ; <nl> + <nl> + void AsyncSendRpc ( int num_rpcs ) { <nl> + for ( int i = 0 ; i < num_rpcs ; + + i ) { <nl> + AsyncClientCall * call = new AsyncClientCall ; <nl> + EchoRequest request ; <nl> + request . set_message ( " Hello " ) ; <nl> + call - > response_reader = <nl> + common_ . GetStub ( ) - > AsyncEcho ( & call - > context , request , & cq_ ) ; <nl> + call - > response_reader - > Finish ( & call - > response , & call - > status , <nl> + ( void * ) call ) ; <nl> + <nl> + unique_lock < mutex > l ( mu_ ) ; <nl> + rpcs_outstanding_ + + ; <nl> + } <nl> + } <nl> + <nl> + void AsyncCompleteRpc ( ) { <nl> + while ( true ) { <nl> + void * got_tag ; <nl> + bool ok = false ; <nl> + if ( ! cq_ . Next ( & got_tag , & ok ) ) break ; <nl> + AsyncClientCall * call = static_cast < AsyncClientCall * > ( got_tag ) ; <nl> + GPR_ASSERT ( ok ) ; <nl> + delete call ; <nl> + <nl> + bool notify ; <nl> + { <nl> + unique_lock < mutex > l ( mu_ ) ; <nl> + rpcs_outstanding_ - - ; <nl> + notify = ( rpcs_outstanding_ = = 0 ) ; <nl> + } <nl> + if ( notify ) { <nl> + cv_ . notify_all ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + CommonStressTest common_ ; <nl> + CompletionQueue cq_ ; <nl> + mutex mu_ ; <nl> + condition_variable cv_ ; <nl> + int rpcs_outstanding_ ; <nl> + } ; <nl> + <nl> + TEST_F ( AsyncClientEnd2endTest , ThreadStress ) { <nl> + common_ . ResetStub ( ) ; <nl> + std : : vector < std : : thread * > send_threads , completion_threads ; <nl> + for ( int i = 0 ; i < kNumThreads / 2 ; + + i ) { <nl> + completion_threads . push_back ( new std : : thread ( <nl> + & AsyncClientEnd2endTest_ThreadStress_Test : : AsyncCompleteRpc , this ) ) ; <nl> + } <nl> + for ( int i = 0 ; i < kNumThreads / 2 ; + + i ) { <nl> + send_threads . push_back ( <nl> + new std : : thread ( & AsyncClientEnd2endTest_ThreadStress_Test : : AsyncSendRpc , <nl> + this , kNumRpcs ) ) ; <nl> + } <nl> + for ( int i = 0 ; i < kNumThreads / 2 ; + + i ) { <nl> + send_threads [ i ] - > join ( ) ; <nl> + delete send_threads [ i ] ; <nl> + } <nl> + <nl> + Wait ( ) ; <nl> + for ( int i = 0 ; i < kNumThreads / 2 ; + + i ) { <nl> + completion_threads [ i ] - > join ( ) ; <nl> + delete completion_threads [ i ] ; <nl> + } <nl> + } <nl> + <nl> } / / namespace testing <nl> } / / namespace grpc <nl> <nl>
Merge pull request from vjpai / async_thread_stress_test
grpc/grpc
3ad28d0f1ce2274dad4e28827b5bc5456f9e1fc6
2016-02-04T19:04:22Z
mmm a / ChangeLog <nl> ppp b / ChangeLog <nl> <nl> + 2009 - 07 - 04 Tatsuhiro Tsujikawa < t - tujikawa @ users . sourceforge . net > <nl> + <nl> + Removed calls to Request : : resetUrl ( ) in AbstractCommand . cc . <nl> + Removed hidden option PREF_RESET_URI . <nl> + * src / AbstractCommand . cc <nl> + * src / OptionHandlerFactory . cc <nl> + * src / prefs . cc <nl> + * src / prefs . h <nl> + <nl> 2009 - 07 - 03 Tatsuhiro Tsujikawa < t - tujikawa @ users . sourceforge . net > <nl> <nl> Removed unused variable / function . <nl> mmm a / src / AbstractCommand . cc <nl> ppp b / src / AbstractCommand . cc <nl> bool AbstractCommand : : execute ( ) { <nl> _requestGroup - > setLastUriResult ( req - > getUrl ( ) , err . getCode ( ) ) ; <nl> } <nl> onAbort ( ) ; <nl> - / / TODO Do we need this ? <nl> - / / req - > resetUrl ( ) ; <nl> tryReserved ( ) ; <nl> return true ; <nl> } catch ( DlRetryEx & err ) { <nl> bool AbstractCommand : : execute ( ) { <nl> bool isAbort = maxTries ! = 0 & & req - > getTryCount ( ) > = maxTries ; <nl> if ( isAbort ) { <nl> onAbort ( ) ; <nl> - req - > resetUrl ( ) ; <nl> - } else { <nl> - if ( getOption ( ) - > getAsBool ( PREF_RESET_URI ) ) { <nl> - req - > resetUrl ( ) ; <nl> - } <nl> - } <nl> - if ( isAbort ) { <nl> logger - > info ( MSG_MAX_TRY , cuid , req - > getTryCount ( ) ) ; <nl> logger - > error ( MSG_DOWNLOAD_ABORTED , err , cuid , req - > getUrl ( ) . c_str ( ) ) ; <nl> _fileEntry - > addURIResult ( req - > getUrl ( ) , err . getCode ( ) ) ; <nl> mmm a / src / OptionHandlerFactory . cc <nl> ppp b / src / OptionHandlerFactory . cc <nl> OptionHandlers OptionHandlerFactory : : createOptionHandlers ( ) <nl> op - > addTag ( TAG_HTTP ) ; <nl> handlers . push_back ( op ) ; <nl> } <nl> - { <nl> - SharedHandle < OptionHandler > op ( new BooleanOptionHandler <nl> - ( PREF_RESET_URI , <nl> - NO_DESCRIPTION , <nl> - V_FALSE , <nl> - OptionHandler : : OPT_ARG ) ) ; <nl> - op - > hide ( ) ; <nl> - handlers . push_back ( op ) ; <nl> - } <nl> { <nl> SharedHandle < OptionHandler > op ( new NumberOptionHandler <nl> ( PREF_RETRY_WAIT , <nl> mmm a / src / prefs . cc <nl> ppp b / src / prefs . cc <nl> const std : : string PREF_XML_RPC_LISTEN_PORT ( " xml - rpc - listen - port " ) ; <nl> / / value : true | false <nl> const std : : string PREF_ENABLE_XML_RPC ( " enable - xml - rpc " ) ; <nl> / / value : true | false <nl> - const std : : string PREF_RESET_URI ( " reset - uri " ) ; <nl> - / / value : true | false <nl> const std : : string PREF_DRY_RUN ( " dry - run " ) ; <nl> / / value : true | false <nl> const std : : string PREF_REUSE_URI ( " reuse - uri " ) ; <nl> mmm a / src / prefs . h <nl> ppp b / src / prefs . h <nl> extern const std : : string PREF_XML_RPC_LISTEN_PORT ; <nl> / / value : true | false <nl> extern const std : : string PREF_ENABLE_XML_RPC ; <nl> / / value : true | false <nl> - extern const std : : string PREF_RESET_URI ; <nl> - / / value : true | false <nl> extern const std : : string PREF_DRY_RUN ; <nl> / / value : true | false <nl> extern const std : : string PREF_REUSE_URI ; <nl>
2009 - 07 - 04 Tatsuhiro Tsujikawa < t - tujikawa @ users . sourceforge . net >
aria2/aria2
e45a0e7e5fa64069b69775d9ed5b11c0729d074e
2009-07-03T15:38:29Z
mmm a / ports / tinyxml2 / CONTROL <nl> ppp b / ports / tinyxml2 / CONTROL <nl> <nl> Source : tinyxml2 <nl> - Version : 3 . 0 . 0 <nl> + Version : 5 . 0 . 0 <nl> Description : A simple , small , efficient , C + + XML parser <nl> \ No newline at end of file <nl> mmm a / ports / tinyxml2 / portfile . cmake <nl> ppp b / ports / tinyxml2 / portfile . cmake <nl> <nl> - # Common Ambient Variables : <nl> - # CURRENT_BUILDTREES_DIR = $ { VCPKG_ROOT_DIR } \ buildtrees \ $ { PORT } <nl> - # CURRENT_PACKAGES_DIR = $ { VCPKG_ROOT_DIR } \ packages \ $ { PORT } _ $ { TARGET_TRIPLET } <nl> - # CURRENT_PORT DIR = $ { VCPKG_ROOT_DIR } \ ports \ $ { PORT } <nl> - # PORT = current port name ( zlib , etc ) <nl> - # TARGET_TRIPLET = current triplet ( x86 - windows , x64 - windows - static , etc ) <nl> - # VCPKG_CRT_LINKAGE = C runtime linkage type ( static , dynamic ) <nl> - # VCPKG_LIBRARY_LINKAGE = target library linkage type ( static , dynamic ) <nl> - # VCPKG_ROOT_DIR = < C : \ path \ to \ current \ vcpkg > <nl> - # VCPKG_TARGET_ARCHITECTURE = target architecture ( x64 , x86 , arm ) <nl> - # <nl> - <nl> include ( vcpkg_common_functions ) <nl> <nl> vcpkg_from_github ( <nl> vcpkg_from_github ( <nl> <nl> vcpkg_configure_cmake ( <nl> SOURCE_PATH $ { SOURCE_PATH } <nl> - # OPTIONS - DUSE_THIS_IN_ALL_BUILDS = 1 <nl> - # OPTIONS_RELEASE - DOPTIMIZE = 1 <nl> - # OPTIONS_DEBUG - DDEBUGGABLE = 1 <nl> ) <nl> <nl> vcpkg_install_cmake ( ) <nl> <nl> vcpkg_fixup_cmake_targets ( CONFIG_PATH " lib / cmake / tinyxml2 " ) <nl> <nl> - # changes target search path <nl> - file ( READ $ { CURRENT_PACKAGES_DIR } / share / tinyxml2 / tinyxml2Targets . cmake TINYXML2_TARGETS ) <nl> - string ( REPLACE " get_filename_component ( _IMPORT_PREFIX \ " \ $ { CMAKE_CURRENT_LIST_FILE } \ " PATH ) \ nget_filename_component ( _IMPORT_PREFIX \ " \ $ { _IMPORT_PREFIX } \ " PATH ) " <nl> - " get_filename_component ( _IMPORT_PREFIX \ " \ $ { CMAKE_CURRENT_LIST_FILE } \ " PATH ) " TINYXML2_TARGETS $ { TINYXML2_TARGETS } ) <nl> - file ( WRITE $ { CURRENT_PACKAGES_DIR } / share / tinyxml2 / tinyxml2Targets . cmake " $ { TINYXML2_TARGETS } " ) <nl> - <nl> vcpkg_copy_pdbs ( ) <nl> <nl> file ( REMOVE_RECURSE $ { CURRENT_PACKAGES_DIR } / debug / include ) <nl>
update control file and cleanup portfile
microsoft/vcpkg
d387490dc8e349741433ccff0d735cb31d088b78
2017-06-20T22:41:50Z
mmm a / docs / docs / compile . xml <nl> ppp b / docs / docs / compile . xml <nl> it by typing : <nl> sudo apt - get install libx11 - dev <nl> < / code_box > <nl> <nl> - < h2 > Compiling on Windows using gcc < / h2 > <nl> + < h2 > Compiling on Windows Using GCC < / h2 > <nl> < p > <nl> The commands for gcc on windows are the same as above but you may also have to link <nl> ( via the - l option ) to the following libraries : gdi32 , comctl32 , user32 , ws2_32 , or imm32 . <nl> sudo apt - get install libx11 - dev <nl> windows development than gcc . <nl> < / p > <nl> <nl> - < h2 > Compiling on Windows using Visual Studio < / h2 > <nl> + < h2 > Compiling on Windows Using Visual Studio < / h2 > <nl> < p > <nl> All you need to do is create an empty console project . Then add dlib / all / source . cpp to it and add the <nl> folder containing the dlib folder to the # include search path . Then you can compile any example program <nl> mmm a / docs / docs / release_notes . xml <nl> ppp b / docs / docs / release_notes . xml <nl> <nl> < current > <nl> New Stuff : <nl> - Image Processing : <nl> - - Added the option to make the features generated by the poly_image <nl> - rotationally invariant . <nl> + - Added the option to make the features generated by poly_image rotationally <nl> + invariant . <nl> - Added a set of routines for warping , scaling , and resizing images . <nl> See the new " Scaling and Rotating " section of the image processing <nl> documentation for details . <nl> New Stuff : <nl> - Added the get_option ( ) routines which slightly simplify option parsing <nl> from the command line and config files . <nl> - Added the 128bit version of Murmur hash . <nl> - - Added the rls_filter and kalman_filter objects . These are tools for <nl> + - Added the kalman_filter and rls_filter objects . These are tools for <nl> performing Kalman filtering and recursive least squares filtering . <nl> - Added the circular_buffer object . <nl> <nl>
updated docs
davisking/dlib
28215b40ded18d7feb18b35c20fb00a5ffd81210
2012-03-23T00:15:31Z
mmm a / cocos2dx / include / CCNode . h <nl> ppp b / cocos2dx / include / CCNode . h <nl> Order in transformations with grid enabled <nl> - Each node has a camera . By default it points to the center of the CCNode . <nl> * / <nl> <nl> - class CCX_DLL CCNode : public NSObject , public SelectorProtocol <nl> + class CCX_DLL CCNode : public SelectorProtocol , public NSObject <nl> { <nl> <nl> / / variable property <nl>
issue
cocos2d/cocos2d-x
32c2e83b3f6a035594f188bd07beb6e12b660e1a
2010-08-28T02:56:18Z
mmm a / tensorflow / python / keras / benchmark / BUILD <nl> ppp b / tensorflow / python / keras / benchmark / BUILD <nl> cuda_py_test ( <nl> ] , <nl> ) <nl> <nl> + cuda_py_test ( <nl> + name = " applications_saved_model_test " , <nl> + size = " medium " , <nl> + srcs = [ " applications_saved_model_test . py " ] , <nl> + shard_count = 8 , <nl> + deps = [ <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python / keras / applications " , <nl> + " @ absl_py / / absl / testing : parameterized " , <nl> + ] , <nl> + ) <nl> + <nl> + cuda_py_test ( <nl> + name = " model_components_benchmarks_test " , <nl> + srcs = [ " model_components_benchmarks_test . py " ] , <nl> + python_version = " PY3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : random_ops " , <nl> + " / / tensorflow / python : training_lib " , <nl> + " / / tensorflow / python / data / ops : dataset_ops " , <nl> + " / / tensorflow / python / eager : backprop " , <nl> + " / / tensorflow / python / eager : context " , <nl> + " / / tensorflow / python / eager : profiler " , <nl> + " / / tensorflow / python / eager : test " , <nl> + ] , <nl> + ) <nl> + <nl> py_test ( <nl> name = " keras_examples_benchmark_test " , <nl> size = " medium " , <nl> srcs = [ " keras_examples_benchmark_test . py " ] , <nl> python_version = " PY3 " , <nl> deps = [ <nl> + " : benchmark_util " , <nl> " / / tensorflow / python / keras " , <nl> " / / third_party / py / numpy " , <nl> - " : benchmark_util " , <nl> ] , <nl> ) <nl> <nl> py_library ( <nl> - name = " benchmark_util " , <nl> - srcs = [ " benchmark_util . py " ] , <nl> - deps = [ " : distribution_util " ] <nl> + name = " benchmark_util " , <nl> + srcs = [ " benchmark_util . py " ] , <nl> + deps = [ " : distribution_util " ] , <nl> ) <nl> <nl> py_library ( <nl> - name = " distribution_util " , <nl> - srcs = [ " distribution_util . py " ] , <nl> - deps = [ <nl> + name = " distribution_util " , <nl> + srcs = [ " distribution_util . py " ] , <nl> + deps = [ <nl> " / / tensorflow / python / eager : remote " , <nl> - ] , <nl> - ) <nl> \ No newline at end of file <nl> + ] , <nl> + ) <nl> mmm a / tensorflow / python / keras / benchmark / benchmark_util . py <nl> ppp b / tensorflow / python / keras / benchmark / benchmark_util . py <nl> def on_batch_end ( self , e , logs ) : <nl> <nl> <nl> def measure_performance ( model_fn , <nl> - x = None , <nl> - y = None , <nl> - epoch = 2 , <nl> - batch_size = 32 , <nl> - run_iters = 4 , <nl> - optimizer = None , <nl> - loss = None , <nl> - metrics = None , <nl> - verbose = 0 , <nl> - num_gpus = 0 , <nl> - distribution_strategy = ' off ' ) : <nl> + x = None , <nl> + y = None , <nl> + epoch = 2 , <nl> + batch_size = 32 , <nl> + run_iters = 4 , <nl> + optimizer = None , <nl> + loss = None , <nl> + metrics = None , <nl> + verbose = 0 , <nl> + num_gpus = 0 , <nl> + distribution_strategy = ' off ' ) : <nl> " " " Run models and measure the performance . <nl> <nl> Arguments : <nl>
Fix the error checks from github and formatting code
tensorflow/tensorflow
762015a7da37bd4023388b540bf504fee6f94cb1
2020-06-27T01:44:25Z
mmm a / lib / Syntax / Status . md <nl> ppp b / lib / Syntax / Status . md <nl> <nl> - # Swift Syntax Library Implementation Status <nl> - <nl> - # # Represented Grammar Productions <nl> - <nl> - Include the following in each entry : <nl> - <nl> - - Grammar production ( s ) <nl> - - See [ Summary of the Swift Grammar ] ( https : / / developer . apple . com / library / content / documentation / Swift / Conceptual / Swift_Programming_Language / zzSummaryOfTheGrammar . html ) <nl> - - C + + types representing the productions <nl> - - Testing status <nl> - - With APIs <nl> - - Make APIs <nl> - - Builder APIs ( if applicable ) <nl> - - SR links <nl> - - Related lib / AST changes <nl> - <nl> - # # # Declarations <nl> - <nl> - - declaration - modifiers <nl> - - ` DeclModifierListSyntax ` <nl> - <nl> - - declaration - modifier <nl> - - ` DeclModifierSyntax ` <nl> - <nl> - - struct - declaration <nl> - - ` StructDeclSyntax ` <nl> - <nl> - - typealias - assignment <nl> - typealias - declaration <nl> - - ` TypeAliasDeclSyntax ` <nl> - <nl> - - class - body <nl> - class - members <nl> - <nl> - - ` DeclMembersSyntax ` <nl> - - ` ClassDeclSyntax ` used for the ` { ` ` } ` braces . <nl> - <nl> - - extension - body <nl> - - ` DeclMembersSyntax ` <nl> - <nl> - - protocol - body <nl> - - ` DeclMembersSyntax ` <nl> - <nl> - - struct - body <nl> - struct - members <nl> - - ` DeclMembersSyntax ` <nl> - - ` StructDeclSyntax ` used for the ` { ` ` } ` braces . <nl> - <nl> - - function - declaration <nl> - - ` FunctionDeclSyntax ` <nl> - <nl> - - function - body <nl> - - ` CodeBlockSyntax ` <nl> - <nl> - - function - result <nl> - - ` TypeSyntax ` <nl> - <nl> - - function - signature <nl> - - ` FunctionSignatureSyntax ` <nl> - <nl> - - parameter - clause <nl> - - ` FunctionParameterClauseSyntax ` <nl> - <nl> - - parameter - list <nl> - - ` FunctionParameterListSyntax ` <nl> - <nl> - - parameter <nl> - - ` FunctionParameterSyntax ` <nl> - <nl> - # # # Statements <nl> - <nl> - - statement <nl> - - ` StmtSyntax ` ( Abstract base class ) <nl> - <nl> - - statements <nl> - - ` StmtListSyntax ` <nl> - <nl> - - code - block <nl> - - ` CodeBlockSyntax ` <nl> - <nl> - - fallthrough - statement <nl> - - ` FallthroughStmtSyntax ` <nl> - <nl> - - break - statement <nl> - - ` BreakStmtSyntax ` <nl> - <nl> - - continue - statement <nl> - - ` ContinueStmtSyntax ` <nl> - <nl> - - return - statement <nl> - - ` ReturnStmtSyntax ` <nl> - <nl> - # # # Expressions <nl> - <nl> - - binary - literal <nl> - - decimal - literal <nl> - - hexadecimal - literal <nl> - - integer - literal <nl> - - octal - literal <nl> - - ` IntegerLiteralExprSyntax ` <nl> - <nl> - - function - call - argument <nl> - - ` FunctionCallArgumentSyntax ` <nl> - <nl> - - function - call - argument - list <nl> - - ` FunctionCallArgumentListSyntax ` <nl> - <nl> - - function - call - expression <nl> - - function - call - argument - clause <nl> - - ` FunctionCallExprSyntax ` <nl> - <nl> - # # # Types <nl> - <nl> - - type <nl> - - type - annotation <nl> - - ` TypeSyntax ` ( Abstract base class ) <nl> - <nl> - - protocol - identifier <nl> - - type - identifier <nl> - - ` TypeIdentifierSyntax ` <nl> - <nl> - - optional - type <nl> - - ` OptionalTypeSyntax ` <nl> - <nl> - - implicitly - unwrapped - optional - type <nl> - - ` ImplicitlyUnwrappedOptionalTypeSyntax ` <nl> - <nl> - - array - type <nl> - - ` ArrayTypeSyntax ` <nl> - <nl> - - dictionary - type <nl> - - ` DictionaryTypeSyntax ` <nl> - <nl> - - function - type <nl> - - function - type - argument <nl> - - function - type - argument - clause <nl> - - ` FunctionTypeSyntax ` <nl> - <nl> - - function - type - argument - list <nl> - - ` TypeArgumentListSyntax ` <nl> - <nl> - - metatype - type <nl> - - ` MetatypeTypeSyntax ` <nl> - <nl> - - tuple - type <nl> - - ` TupleTypeSyntax ` <nl> - <nl> - - tuple - type - element <nl> - - ` TupleTypeElementSyntax ` <nl> - <nl> - - tuple - type - element - list <nl> - - ` TupleTypeElementListSyntax ` <nl> - <nl> - # # # Type Attributes <nl> - <nl> - - attribute <nl> - - attribute - argument - clause <nl> - - ` TypeAttributeSyntax ` <nl> - <nl> - - attributes <nl> - - ` TypeAttributesSyntax ` <nl> - <nl> - - balanced - token <nl> - - ` BalancedTokenSyntax ` <nl> - <nl> - - balanced - tokens <nl> - - ` BalancedTokensSyntax ` <nl> - <nl> - # # # Generics <nl> - <nl> - - generic - argument - clause <nl> - - ` GenericArgumentClauseSyntax ` <nl> - <nl> - - generic - argument - list <nl> - - ` GenericArgumentListSyntax ` <nl> - <nl> - - generic - parameter - clause <nl> - - ` GenericParameterClauseSyntax ` <nl> - <nl> - - generic - parameter <nl> - - ` GenericParameterSyntax ` <nl> - <nl> - - generic - parameter - list <nl> - - ` GenericParameterListSyntax ` <nl> - <nl> - - conformance - requirement <nl> - - ` ConformanceRequirementSyntax ` <nl> - <nl> - - same - type - requirement <nl> - - ` SameTypeRequirementSyntax ` <nl> - <nl> - - generic - where - clause <nl> - - ` GenericWhereClauseSyntax ` <nl> - <nl> - - requirement - list <nl> - - ` GenericRequirementListSyntax ` <nl> - <nl> - # # # Identifiers and Terminal Tokens <nl> - <nl> - - access - level - modifier <nl> - - argument - label <nl> - - attribute - name <nl> - - boolean - literal <nl> - - class - name <nl> - - closure - parameter - name <nl> - - element - name <nl> - - enum - case - name <nl> - - enum - name <nl> - - external - parameter - name <nl> - - function - name <nl> - - identifier - pattern <nl> - - import - kind <nl> - - import - path - identifier <nl> - - label - name <nl> - - local - parameter - name <nl> - - mutation - modifier <nl> - - platform - name <nl> - - precedence - group - name <nl> - - protocol - name <nl> - - sign <nl> - - struct - name <nl> - - type - name <nl> - - typealias - name <nl> - - variable - name <nl> - - identifier <nl> - - ` TokenSyntax ` <nl> - <nl> - # # Unrepresented Grammar Productions <nl> - <nl> - These are categorized somewhat by difficulty and priority . <nl> - <nl> - # # # Easy <nl> - <nl> - - array - literal <nl> - - array - literal - items <nl> - - as - pattern <nl> - - case - condition <nl> - - case - label <nl> - - dynamic - type - expression <nl> - - floating - point - literal <nl> - - forced - value - expression <nl> - - identifier - list <nl> - - implicit - member - expression <nl> - - import - path <nl> - - in - out - expression <nl> - - interpolated - text <nl> - - interpolated - text - item <nl> - - is - pattern <nl> - - key - path - expression <nl> - - line - control - statement <nl> - - optional - chaining - expression <nl> - - optional - pattern <nl> - - parenthesized - expression <nl> - - platform - condition <nl> - - platform - version <nl> - - postfix - operator - declaration <nl> - - precedence - group - assignment <nl> - - precedence - group - associativity <nl> - - precedence - group - names <nl> - - statement - label <nl> - - static - string - literal <nl> - - swift - version <nl> - - throw - statement <nl> - - value - binding - pattern <nl> - - where - clause <nl> - - dictionary - literal <nl> - - dictionary - literal - items <nl> - - dictionary - literal - item <nl> - - capture - list <nl> - - capture - list - items <nl> - - capture - list - item <nl> - - defer - statement <nl> - <nl> - # # # Medium <nl> - <nl> - - else - directive - clause <nl> - - elseif - directive - clauses <nl> - - elseif - directive - clause <nl> - - precedence - group - declaration <nl> - - precedence - group - relation <nl> - - expression - list <nl> - - availability - condition <nl> - - availability - arguments <nl> - - availability - argument <nl> - - switch - cases <nl> - - switch - case <nl> - - constant - declaration <nl> - - catch - clauses <nl> - - catch - clause <nl> - - variable - declaration <nl> - - do - statement <nl> - - for - in - statement <nl> - - guard - statement <nl> - - case - item - list <nl> - - import - declaration <nl> - - if - directive - clause <nl> - - if - statement <nl> - - else - clause <nl> - - protocol - associated - type - declaration <nl> - - repeat - while - statement <nl> - - while - statement <nl> - - tuple - expression <nl> - - tuple - element - list <nl> - - tuple - element <nl> - - tuple - pattern <nl> - - tuple - pattern - element - list <nl> - - tuple - pattern - element <nl> - - switch - statement <nl> - - explicit - member - expression <nl> - - optional - binding - condition <nl> - - operator - declaration <nl> - - selector - expression <nl> - - protocol - composition - type <nl> - - conditional - operator <nl> - - deinitializer - declaration <nl> - - didSet - clause <nl> - - willSet - clause <nl> - - pattern - initializer - list <nl> - - pattern - initializer <nl> - - prefix - expression <nl> - - prefix - operator - declaration <nl> - - infix - operator - declaration <nl> - - infix - operator - group <nl> - - binary - expression <nl> - <nl> - # # # Hard <nl> - <nl> - - protocol - declaration <nl> - - closure - expression <nl> - - closure - signature <nl> - - closure - parameter - clause <nl> - - closure - parameter - list <nl> - - closure - parameter <nl> - - extension - declaration <nl> - - enum - declaration <nl> - - class - declaration <nl> - - getter - setter - block <nl> - - getter - setter - keyword - block <nl> - - getter - keyword - clause <nl> - - getter - clause <nl> - - setter - keyword - clause <nl> - - setter - clause <nl> - - setter - name <nl> - - subscript - declaration <nl> - - enum - case - pattern <nl> - - initializer - declaration <nl> - - initializer - head <nl> - - interpolated - string - literal <nl> - - conditional - compilation - block <nl> - <nl> - # # Trivial and Intermediate Grammar Productions <nl> - <nl> - - binary - expressions <nl> - - binary - operator <nl> - - compilation - condition <nl> - - capture - specifier <nl> - - precedence - group - attributes <nl> - - precedence - group - attribute <nl> - - prefix - operator <nl> - - type - casting - operator <nl> - - willSet - didSet - block <nl> - - architecture <nl> - - string - literal <nl> - - argument - names <nl> - - array - literal - item <nl> - - type - casting - pattern <nl> - - assignment - operator <nl> - - expression - pattern <nl> - - binary - digit <nl> - - binary - literal - character <nl> - - binary - literal - characters <nl> - - branch - statement <nl> - - class - member <nl> - - class - requirement <nl> - - condition <nl> - - condition - list <nl> - - compiler - control - statement <nl> - - control - transfer - statement <nl> - - decimal - digit <nl> - - decimal - digits <nl> - - decimal - exponent <nl> - - decimal - fraction <nl> - - decimal - literal - character <nl> - - decimal - literal - characters <nl> - - default - argument - clause <nl> - - default - label <nl> - - dot - operator - character <nl> - - dot - operator - characters <nl> - - dot - operator - head <nl> - - else - directive <nl> - - elseif - directive <nl> - - endif - directive <nl> - - escaped - character <nl> - - expression <nl> - - extension - member <nl> - - file - name <nl> - - floating - point - e <nl> - - floating - point - p <nl> - - function - head <nl> - - hexadecimal - digit <nl> - - hexadecimal - exponent <nl> - - hexadecimal - fraction <nl> - - identifier - character <nl> - - identifier - characters <nl> - - identifier - head <nl> - - if - directive <nl> - - where - expression <nl> - - implicit - parameter - name <nl> - - initializer <nl> - - initializer - body <nl> - - initializer - expression <nl> - - labeled - statement ( TODO : Put in loop - , if - , switch - , do - statement layout ) <nl> - - line - number <nl> - - literal <nl> - - literal - expression <nl> - - loop - statement <nl> - - nil - literal <nl> - - numeric - literal <nl> - - octal - digit <nl> - - octal - literal - character <nl> - - octal - literal - characters <nl> - - operating - system <nl> - - operator <nl> - - operator - character <nl> - - operator - characters <nl> - - operator - head <nl> - - pattern <nl> - - postfix - expression <nl> - - postfix - operator <nl> - - postfix - self - expression <nl> - - protocol - composition - continuation <nl> - - protocol - initializer - declaration <nl> - - protocol - member <nl> - - protocol - member - declaration <nl> - - protocol - members <nl> - - extension - members <nl> - - protocol - method - declaration <nl> - - protocol - property - declaration <nl> - - protocol - subscript - declaration <nl> - - quoted - text <nl> - - quoted - text - item <nl> - - raw - value - assignment <nl> - - raw - value - literal <nl> - - raw - value - style - enum <nl> - - raw - value - style - enum - case <nl> - - raw - value - style - enum - case - clause <nl> - - raw - value - style - enum - case - list <nl> - - playground - literal <nl> - - raw - value - style - enum - member <nl> - - raw - value - style - enum - members <nl> - - requirement <nl> - - self - expression <nl> - - self - initializer - expression <nl> - - self - method - expression <nl> - - self - subscript - expression <nl> - - struct - member <nl> - - subscript - expression <nl> - - subscript - head <nl> - - subscript - result <nl> - - superclass - expression <nl> - - superclass - initializer - expression <nl> - - superclass - method - expression <nl> - - superclass - subscript - expression <nl> - - top - level - declaration <nl> - - try - operator <nl> - - type - inheritance - clause <nl> - - type - inheritance - list <nl> - - unicode - scalar - digits <nl> - - union - style - enum <nl> - - union - style - enum - case <nl> - - union - style - enum - case - clause <nl> - - union - style - enum - case - list <nl> - - union - style - enum - member <nl> - - union - style - enum - members <nl> - - variable - declaration - head <nl> - - wildcard - expression <nl> - - wildcard - pattern <nl> - - primary - expression <nl> - - generic - argument <nl> + # libSyntax nodes status <nl> + <nl> + # # Expression <nl> + <nl> + # # # Specialized : <nl> + * NilLiteralExpr <nl> + * IntegerLiteralExpr <nl> + * FloatLiteralExpr <nl> + * BooleanLiteralExpr <nl> + * StringLiteralExpr <nl> + * DiscardAssignmentExpr <nl> + * DeclRefExpr <nl> + * IfExpr <nl> + * AssignExpr <nl> + * TypeExpr <nl> + * UnresolvedMemberExpr <nl> + * SequenceExpr <nl> + * TupleElementExpr <nl> + * TupleExpr <nl> + * ArrayExpr <nl> + * DictionaryExpr <nl> + * PrefixUnaryExpr <nl> + * TryExpr <nl> + * ForceTryExpr <nl> + * OptionalTryExpr <nl> + * ClosureExpr <nl> + <nl> + # # # In - progress ( UnknownExpr ) : <nl> + * InterpolatedStringLiteralExpr <nl> + * ObjectLiteralExpr <nl> + * MagicIdentifierLiteralExpr <nl> + * CallExpr <nl> + * UnresolvedDotExpr <nl> + * InOutExpr <nl> + * KeyPathExpr <nl> + * KeyPathDotExpr <nl> + * EditorPlaceholderExpr <nl> + <nl> + # # # Not - specialized ( UnknownExpr ) : <nl> + * SuperRefExpr <nl> + * UnresolvedSpecializeExpr <nl> + * DotSelfExpr <nl> + * SubscriptExpr <nl> + * KeyPathApplicationExpr <nl> + * CaptureListExpr <nl> + * AutoClosureExpr <nl> + * DynamicTypeExpr <nl> + * BindOptionalExpr <nl> + * OptionalEvaluationExpr <nl> + * ForceValueExpr <nl> + * PostfixUnaryExpr <nl> + * ForcedCheckedCastExpr <nl> + * ConditionalCheckedCastExpr <nl> + * IsExpr <nl> + * CoerceExpr <nl> + * ArrowExpr <nl> + * UnresolvedPatternExpr <nl> + * ObjCSelectorExpr <nl> + <nl> + # # Declaration <nl> + <nl> + # # # Specialized : <nl> + * TopLevelCodeDecl <nl> + * StructDecl <nl> + * FuncDecl <nl> + * ProtocolDecl <nl> + * ImportDecl <nl> + <nl> + # # # In - progress ( UnknownDecl ) : <nl> + * TypeAliasDecl <nl> + * PatternBindingDecl <nl> + * VarDecl <nl> + * IfConfigDecl <nl> + * ClassDecl ( SR - 6571 ) <nl> + * ExtensionDecl ( SR - 6572 ) <nl> + <nl> + # # # Not - specialized ( UnknownDecl ) : <nl> + * EnumCaseDecl <nl> + * PrecedenceGroupDecl <nl> + * InfixOperatorDecl <nl> + * PrefixOperatorDecl <nl> + * PostfixOperatorDecl <nl> + * AssociatedTypeDecl <nl> + * EnumDecl <nl> + * SubscriptDecl <nl> + * ConstructorDecl <nl> + * DestructorDecl <nl> + * EnumElementDecl <nl> + <nl> + # # Statement <nl> + # # # Specialized : <nl> + * BraceStmt <nl> + * ReturnStmt <nl> + <nl> + # # # Not - specialized ( UnknownStmt ) : <nl> + * DeferStmt <nl> + * IfStmt <nl> + * GuardStmt <nl> + * WhileStmt <nl> + * DoStmt <nl> + * DoCatchStmt <nl> + * RepeatWhileStmt <nl> + * ForEachStmt <nl> + * SwitchStmt <nl> + * CaseStmt <nl> + * CatchStmt <nl> + * BreakStmt <nl> + * ContinueStmt <nl> + * FallthroughStmt <nl> + * FailStmt <nl> + * ThrowStmt <nl> + <nl> + # # Pattern <nl> + # # # Not - specialized : <nl> + * ParenPattern <nl> + * TuplePattern <nl> + * NamedPattern <nl> + * AnyPattern <nl> + * TypedPattern <nl> + * VarPattern <nl> + <nl> + <nl> + # # TypeRepr <nl> + * To - be filled <nl>
[ Doc ] libSyntax : add a document describing the status of libSyntax node specialization . NFC
apple/swift
50eb2e3531880fa499d18a55df51c5705085b197
2017-12-14T21:51:27Z
mmm a / Telegram / SourceFiles / data / data_document . cpp <nl> ppp b / Telegram / SourceFiles / data / data_document . cpp <nl> bool DocumentData : : useStreamingLoader ( ) const { <nl> <nl> bool DocumentData : : canBeStreamed ( ) const { <nl> / / For now video messages are not streamed . <nl> - return hasRemoteLocation ( ) & & supportsStreaming ( ) & & ! isVideoMessage ( ) ; <nl> + return hasRemoteLocation ( ) & & supportsStreaming ( ) ; <nl> } <nl> <nl> bool DocumentData : : canBePlayed ( ) const { <nl> mmm a / Telegram / SourceFiles / history / view / media / history_view_gif . cpp <nl> ppp b / Telegram / SourceFiles / history / view / media / history_view_gif . cpp <nl> void Gif : : draw ( Painter & p , const QRect & r , TextSelection selection , crl : : time ms <nl> / / auto loaded = _data - > loaded ( ) ; <nl> auto displayLoading = ( item - > id < 0 ) | | _data - > displayLoading ( ) ; <nl> auto selected = ( selection = = FullSelection ) ; <nl> - const auto startPlayAsync = autoplayEnabled ( ) <nl> + const auto canBePlayed = _data - > canBePlayed ( ) ; <nl> + const auto activeRoundPlaying = activeRoundStreamed ( ) ; <nl> + const auto autoplay = autoplayEnabled ( ) & & canBePlayed ; <nl> + const auto streamingMode = _streamed | | activeRoundPlaying | | autoplay ; <nl> + const auto startPlayAsync = autoplay <nl> & & ! _streamed <nl> - & & _data - > canBePlayed ( ) <nl> - & & ! activeRoundStreamed ( ) ; <nl> + & & ! activeRoundPlaying ; <nl> if ( startPlayAsync ) { <nl> _parent - > delegate ( ) - > elementAnimationAutoplayAsync ( _parent ) ; <nl> + } else if ( _streamed & & ! _streamed - > active ( ) & & ! _streamed - > failed ( ) ) { <nl> + startStreamedPlayer ( ) ; <nl> } <nl> <nl> auto paintx = 0 , painty = 0 , paintw = width ( ) , painth = height ( ) ; <nl> void Gif : : draw ( Painter & p , const QRect & r , TextSelection selection , crl : : time ms <nl> auto displayMute = false ; <nl> const auto streamed = activeCurrentStreamed ( ) ; <nl> <nl> - if ( ( ! streamed | | item - > id < 0 ) & & displayLoading ) { <nl> + if ( ( ! streamed | | item - > isSending ( ) ) & & displayLoading ) { <nl> ensureAnimation ( ) ; <nl> if ( ! _animation - > radial . animating ( ) ) { <nl> _animation - > radial . start ( dataProgress ( ) ) ; <nl> void Gif : : draw ( Painter & p , const QRect & r , TextSelection selection , crl : : time ms <nl> } <nl> <nl> if ( radial <nl> - | | ( ! startPlayAsync <nl> - & & ( ! _streamed <nl> - | | _streamed - > waitingShown ( ) <nl> - | | _streamed - > player ( ) . failed ( ) ) <nl> + | | ( ! streamingMode <nl> & & ( ( ! _data - > loaded ( ) & & ! _data - > loading ( ) ) <nl> | | ! autoplayEnabled ( ) ) ) ) { <nl> - auto radialOpacity = ( radial & & _data - > loaded ( ) & & item - > id > 0 ) <nl> - ? _animation - > radial . opacity ( ) <nl> - : streamed <nl> + const auto radialOpacity = streamed <nl> ? streamed - > waitingOpacity ( ) <nl> + : ( radial & & _data - > loaded ( ) & & ! item - > isSending ( ) ) <nl> + ? _animation - > radial . opacity ( ) <nl> : 1 . ; <nl> auto inner = QRect ( rthumb . x ( ) + ( rthumb . width ( ) - st : : msgFileSize ) / 2 , rthumb . y ( ) + ( rthumb . height ( ) - st : : msgFileSize ) / 2 , st : : msgFileSize , st : : msgFileSize ) ; <nl> p . setPen ( Qt : : NoPen ) ; <nl> void Gif : : draw ( Painter & p , const QRect & r , TextSelection selection , crl : : time ms <nl> } <nl> <nl> p . setOpacity ( radialOpacity ) ; <nl> - auto icon = [ & ] ( ) - > const style : : icon * { <nl> - if ( _data - > loaded ( ) & & ! radial ) { <nl> + const auto icon = [ & ] ( ) - > const style : : icon * { <nl> + if ( streamingMode ) { <nl> + return nullptr ; <nl> + } else if ( ( _data - > loaded ( ) | | canBePlayed ) & & ! radial ) { <nl> return & ( selected ? st : : historyFileThumbPlaySelected : st : : historyFileThumbPlay ) ; <nl> } else if ( radial | | _data - > loading ( ) ) { <nl> if ( item - > id > 0 | | _data - > uploading ( ) ) { <nl> void Gif : : draw ( Painter & p , const QRect & r , TextSelection selection , crl : : time ms <nl> if ( icon ) { <nl> icon - > paintInCenter ( p , inner ) ; <nl> } <nl> + p . setOpacity ( 1 ) ; <nl> if ( radial ) { <nl> - p . setOpacity ( 1 ) ; <nl> QRect rinner ( inner . marginsRemoved ( QMargins ( st : : msgFileRadialLine , st : : msgFileRadialLine , st : : msgFileRadialLine , st : : msgFileRadialLine ) ) ) ; <nl> const auto fg = selected <nl> ? st : : historyFileThumbRadialFgSelected <nl> void Gif : : draw ( Painter & p , const QRect & r , TextSelection selection , crl : : time ms <nl> } <nl> } <nl> <nl> - if ( ! isRound & & ( ! streamed | | item - > id < 0 ) ) { <nl> + if ( ! isRound & & ( ! streamingMode | | item - > isSending ( ) ) ) { <nl> auto statusX = paintx + st : : msgDateImgDelta + st : : msgDateImgPadding . x ( ) ; <nl> auto statusY = painty + st : : msgDateImgDelta + st : : msgDateImgPadding . y ( ) ; <nl> auto statusW = st : : normalFont - > width ( _statusText ) + 2 * st : : msgDateImgPadding . x ( ) ; <nl> TextState Gif : : textState ( QPoint point , StateRequest request ) const { <nl> if ( _data - > uploading ( ) ) { <nl> result . link = _cancell ; <nl> } else { <nl> - result . link = _data - > loaded ( ) <nl> + result . link = ( _data - > loaded ( ) | | _data - > canBePlayed ( ) ) <nl> ? _openl : <nl> _data - > loading ( ) <nl> ? _cancell <nl> void Gif : : playAnimation ( bool autoplay ) { <nl> if ( ! autoplayEnabled ( ) ) { <nl> history ( ) - > owner ( ) . checkPlayingVideoFiles ( ) ; <nl> } <nl> - if ( ! createStreamedPlayer ( ) ) { <nl> - return ; <nl> - } <nl> - auto options = : : Media : : Streaming : : PlaybackOptions ( ) ; <nl> - options . audioId = AudioMsgId ( _data , _realParent - > fullId ( ) ) ; <nl> - options . waitForMarkAsShown = true ; <nl> - / / if ( ! _streamed - > withSound ) { <nl> - options . mode = : : Media : : Streaming : : Mode : : Video ; <nl> - options . loop = true ; <nl> - / / } <nl> - _streamed - > play ( options ) ; <nl> + createStreamedPlayer ( ) ; <nl> } <nl> } <nl> <nl> - bool Gif : : createStreamedPlayer ( ) { <nl> + void Gif : : createStreamedPlayer ( ) { <nl> auto shared = _data - > owner ( ) . documentStreamer ( <nl> _data , <nl> _realParent - > fullId ( ) ) ; <nl> if ( ! shared ) { <nl> - return false ; <nl> + return ; <nl> } <nl> setStreamed ( std : : make_unique < : : Media : : Streaming : : Instance > ( <nl> std : : move ( shared ) , <nl> bool Gif : : createStreamedPlayer ( ) { <nl> if ( _streamed - > ready ( ) ) { <nl> streamingReady ( base : : duplicate ( _streamed - > info ( ) ) ) ; <nl> } <nl> - return true ; <nl> + startStreamedPlayer ( ) ; <nl> + } <nl> + <nl> + void Gif : : startStreamedPlayer ( ) const { <nl> + Expects ( _streamed ! = nullptr ) ; <nl> + <nl> + auto options = : : Media : : Streaming : : PlaybackOptions ( ) ; <nl> + options . audioId = AudioMsgId ( _data , _realParent - > fullId ( ) ) ; <nl> + options . waitForMarkAsShown = true ; <nl> + / / if ( ! _streamed - > withSound ) { <nl> + options . mode = : : Media : : Streaming : : Mode : : Video ; <nl> + options . loop = true ; <nl> + / / } <nl> + _streamed - > play ( options ) ; <nl> } <nl> <nl> void Gif : : setStreamed ( std : : unique_ptr < : : Media : : Streaming : : Instance > value ) { <nl> mmm a / Telegram / SourceFiles / history / view / media / history_view_gif . h <nl> ppp b / Telegram / SourceFiles / history / view / media / history_view_gif . h <nl> class Gif final : public File { <nl> : : Media : : Streaming : : Instance * activeCurrentStreamed ( ) const ; <nl> : : Media : : View : : PlaybackProgress * videoPlayback ( ) const ; <nl> <nl> - bool createStreamedPlayer ( ) ; <nl> + void createStreamedPlayer ( ) ; <nl> + void startStreamedPlayer ( ) const ; <nl> void setStreamed ( std : : unique_ptr < : : Media : : Streaming : : Instance > value ) ; <nl> void handleStreamingUpdate ( : : Media : : Streaming : : Update & & update ) ; <nl> void handleStreamingError ( : : Media : : Streaming : : Error & & error ) ; <nl> mmm a / Telegram / SourceFiles / media / streaming / media_streaming_instance . cpp <nl> ppp b / Telegram / SourceFiles / media / streaming / media_streaming_instance . cpp <nl> bool Instance : : ready ( ) const { <nl> return _shared - > player ( ) . ready ( ) ; <nl> } <nl> <nl> + std : : optional < Error > Instance : : failed ( ) const { <nl> + Expects ( _shared ! = nullptr ) ; <nl> + <nl> + return _shared - > player ( ) . failed ( ) ; <nl> + } <nl> + <nl> bool Instance : : paused ( ) const { <nl> Expects ( _shared ! = nullptr ) ; <nl> <nl> mmm a / Telegram / SourceFiles / media / streaming / media_streaming_instance . h <nl> ppp b / Telegram / SourceFiles / media / streaming / media_streaming_instance . h <nl> class Instance { <nl> <nl> [ [ nodiscard ] ] bool active ( ) const ; <nl> [ [ nodiscard ] ] bool ready ( ) const ; <nl> + [ [ nodiscard ] ] std : : optional < Error > failed ( ) const ; <nl> <nl> [ [ nodiscard ] ] bool paused ( ) const ; <nl> <nl> class Instance { <nl> [ [ nodiscard ] ] QImage frame ( const FrameRequest & request ) const ; <nl> bool markFrameShown ( ) ; <nl> <nl> - rpl : : lifetime & lifetime ( ) ; <nl> + [ [ nodiscard ] ] rpl : : lifetime & lifetime ( ) ; <nl> <nl> private : <nl> const std : : shared_ptr < Document > _shared ; <nl> mmm a / Telegram / SourceFiles / media / streaming / media_streaming_video_track . cpp <nl> ppp b / Telegram / SourceFiles / media / streaming / media_streaming_video_track . cpp <nl> QImage VideoTrack : : frame ( <nl> const auto j = none <nl> ? frame - > prepared . emplace ( instance , useRequest ) . first <nl> : i ; <nl> + if ( changed & & ! none ) { <nl> + i - > second . request = useRequest ; <nl> + } <nl> if ( frame - > prepared . size ( ) > 1 ) { <nl> for ( auto & [ alreadyInstance , prepared ] : frame - > prepared ) { <nl> if ( alreadyInstance ! = instance <nl>
Fix round video messages streaming .
telegramdesktop/tdesktop
38199276f1bad060764ecbb3de0ef6abba8d7b52
2019-12-12T13:26:53Z
mmm a / dbms / include / DB / DataStreams / FormatFactory . h <nl> ppp b / dbms / include / DB / DataStreams / FormatFactory . h <nl> class FormatFactory <nl> { <nl> public : <nl> BlockInputStreamPtr getInput ( const String & name , ReadBuffer & buf , <nl> - Block & sample , size_t max_block_size ) const ; <nl> + const Block & sample , size_t max_block_size ) const ; <nl> <nl> BlockOutputStreamPtr getOutput ( const String & name , WriteBuffer & buf , <nl> - Block & sample ) const ; <nl> + const Block & sample ) const ; <nl> } ; <nl> <nl> } <nl> mmm a / dbms / include / DB / DataStreams / RemoteBlockInputStream . h <nl> ppp b / dbms / include / DB / DataStreams / RemoteBlockInputStream . h <nl> class RemoteBlockInputStream : public IProfilingBlockInputStream <nl> if ( ! is_cancelled . compare_exchange_strong ( old_val , true , std : : memory_order_seq_cst , std : : memory_order_relaxed ) ) <nl> return ; <nl> <nl> + { <nl> + std : : lock_guard < std : : mutex > lock ( external_tables_mutex ) ; <nl> + <nl> + / / / Останавливаем отправку внешних данных . <nl> + for ( auto & vec : external_tables_data ) <nl> + for ( auto & elem : vec ) <nl> + if ( IProfilingBlockInputStream * stream = dynamic_cast < IProfilingBlockInputStream * > ( elem . first . get ( ) ) ) <nl> + stream - > cancel ( ) ; <nl> + } <nl> + <nl> if ( ! isQueryPending ( ) | | hasThrownException ( ) ) <nl> return ; <nl> <nl> class RemoteBlockInputStream : public IProfilingBlockInputStream <nl> { <nl> size_t count = parallel_replicas - > size ( ) ; <nl> <nl> - std : : vector < ExternalTablesData > instances ; <nl> - instances . reserve ( count ) ; <nl> - <nl> - for ( size_t i = 0 ; i < count ; + + i ) <nl> { <nl> - ExternalTablesData res ; <nl> - for ( const auto & table : external_tables ) <nl> + std : : lock_guard < std : : mutex > lock ( external_tables_mutex ) ; <nl> + <nl> + external_tables_data . reserve ( count ) ; <nl> + <nl> + for ( size_t i = 0 ; i < count ; + + i ) <nl> { <nl> - StoragePtr cur = table . second ; <nl> - QueryProcessingStage : : Enum stage = QueryProcessingStage : : Complete ; <nl> - DB : : BlockInputStreams input = cur - > read ( cur - > getColumnNamesList ( ) , ASTPtr ( ) , context , settings , <nl> - stage , DEFAULT_BLOCK_SIZE , 1 ) ; <nl> - if ( input . size ( ) = = 0 ) <nl> - res . push_back ( std : : make_pair ( new OneBlockInputStream ( cur - > getSampleBlock ( ) ) , table . first ) ) ; <nl> - else <nl> - res . push_back ( std : : make_pair ( input [ 0 ] , table . first ) ) ; <nl> + ExternalTablesData res ; <nl> + for ( const auto & table : external_tables ) <nl> + { <nl> + StoragePtr cur = table . second ; <nl> + QueryProcessingStage : : Enum stage = QueryProcessingStage : : Complete ; <nl> + DB : : BlockInputStreams input = cur - > read ( cur - > getColumnNamesList ( ) , ASTPtr ( ) , context , settings , <nl> + stage , DEFAULT_BLOCK_SIZE , 1 ) ; <nl> + if ( input . size ( ) = = 0 ) <nl> + res . push_back ( std : : make_pair ( new OneBlockInputStream ( cur - > getSampleBlock ( ) ) , table . first ) ) ; <nl> + else <nl> + res . push_back ( std : : make_pair ( input [ 0 ] , table . first ) ) ; <nl> + } <nl> + external_tables_data . push_back ( std : : move ( res ) ) ; <nl> } <nl> - instances . push_back ( std : : move ( res ) ) ; <nl> } <nl> <nl> - parallel_replicas - > sendExternalTablesData ( instances ) ; <nl> + parallel_replicas - > sendExternalTablesData ( external_tables_data ) ; <nl> } <nl> <nl> Block readImpl ( ) override <nl> class RemoteBlockInputStream : public IProfilingBlockInputStream <nl> QueryProcessingStage : : Enum stage ; <nl> Context context ; <nl> <nl> + / / / Потоки для чтения из временных таблиц - для последующей отправки данных на удалённые серверы для GLOBAL - подзапросов . <nl> + std : : vector < ExternalTablesData > external_tables_data ; <nl> + std : : mutex external_tables_mutex ; <nl> + <nl> / / / Установили соединения с репликами , но ещё не отправили запрос . <nl> std : : atomic < bool > established { false } ; <nl> <nl> mmm a / dbms / include / DB / Functions / FunctionsArray . h <nl> ppp b / dbms / include / DB / Functions / FunctionsArray . h <nl> namespace DB <nl> * Например : arrayEnumerateUniq ( [ 10 , 20 , 10 , 30 ] ) = [ 1 , 1 , 2 , 1 ] <nl> * arrayEnumerateUniq ( arr1 , arr2 . . . ) <nl> * - для кортежей из элементов на соответствующих позициях в нескольких массивах . <nl> + * <nl> + * emptyArrayToSingle ( arr ) - заменить пустые массивы на массивы из одного элемента со значением " по - умолчанию " . <nl> * / <nl> <nl> <nl> class FunctionRange : public IFunction <nl> } ; <nl> <nl> <nl> + class FunctionEmptyArrayToSingle : public IFunction <nl> + { <nl> + public : <nl> + static constexpr auto name = " emptyArrayToSingle " ; <nl> + static IFunction * create ( const Context & context ) { return new FunctionEmptyArrayToSingle ; } <nl> + <nl> + / / / Получить имя функции . <nl> + String getName ( ) const <nl> + { <nl> + return name ; <nl> + } <nl> + <nl> + / / / Получить типы результата по типам аргументов . Если функция неприменима для данных аргументов - кинуть исключение . <nl> + DataTypePtr getReturnType ( const DataTypes & arguments ) const <nl> + { <nl> + if ( arguments . size ( ) ! = 1 ) <nl> + throw Exception ( " Number of arguments for function " + getName ( ) + " doesn ' t match : passed " <nl> + + toString ( arguments . size ( ) ) + " , should be 1 . " , <nl> + ErrorCodes : : NUMBER_OF_ARGUMENTS_DOESNT_MATCH ) ; <nl> + <nl> + const DataTypeArray * array_type = typeid_cast < const DataTypeArray * > ( arguments [ 0 ] . get ( ) ) ; <nl> + if ( ! array_type ) <nl> + throw Exception ( " Argument for function " + getName ( ) + " must be array . " , <nl> + ErrorCodes : : ILLEGAL_TYPE_OF_ARGUMENT ) ; <nl> + <nl> + return arguments [ 0 ] - > clone ( ) ; <nl> + } <nl> + <nl> + / / / Выполнить функцию над блоком . <nl> + void execute ( Block & block , const ColumnNumbers & arguments , size_t result ) <nl> + { <nl> + if ( executeConst ( block , arguments , result ) ) <nl> + return ; <nl> + <nl> + const ColumnArray * array = typeid_cast < const ColumnArray * > ( block . getByPosition ( arguments [ 0 ] ) . column . get ( ) ) ; <nl> + if ( ! array ) <nl> + throw Exception ( " Illegal column " + block . getByPosition ( arguments [ 0 ] ) . column - > getName ( ) + " of first argument of function " + getName ( ) , <nl> + ErrorCodes : : ILLEGAL_COLUMN ) ; <nl> + <nl> + ColumnPtr res_ptr = array - > cloneEmpty ( ) ; <nl> + block . getByPosition ( result ) . column = res_ptr ; <nl> + ColumnArray & res = static_cast < ColumnArray & > ( * res_ptr ) ; <nl> + <nl> + const IColumn & src_data = array - > getData ( ) ; <nl> + const ColumnArray : : Offsets_t & src_offsets = array - > getOffsets ( ) ; <nl> + IColumn & res_data = res . getData ( ) ; <nl> + ColumnArray : : Offsets_t & res_offsets = res . getOffsets ( ) ; <nl> + <nl> + if ( ! ( executeNumber < UInt8 > ( src_data , src_offsets , res_data , res_offsets ) <nl> + | | executeNumber < UInt16 > ( src_data , src_offsets , res_data , res_offsets ) <nl> + | | executeNumber < UInt32 > ( src_data , src_offsets , res_data , res_offsets ) <nl> + | | executeNumber < UInt64 > ( src_data , src_offsets , res_data , res_offsets ) <nl> + | | executeNumber < Int8 > ( src_data , src_offsets , res_data , res_offsets ) <nl> + | | executeNumber < Int16 > ( src_data , src_offsets , res_data , res_offsets ) <nl> + | | executeNumber < Int32 > ( src_data , src_offsets , res_data , res_offsets ) <nl> + | | executeNumber < Int64 > ( src_data , src_offsets , res_data , res_offsets ) <nl> + | | executeNumber < Float32 > ( src_data , src_offsets , res_data , res_offsets ) <nl> + | | executeNumber < Float64 > ( src_data , src_offsets , res_data , res_offsets ) <nl> + | | executeString ( src_data , src_offsets , res_data , res_offsets ) <nl> + | | executeFixedString ( src_data , src_offsets , res_data , res_offsets ) ) ) <nl> + throw Exception ( " Illegal column " + block . getByPosition ( arguments [ 0 ] ) . column - > getName ( ) <nl> + + " of first argument of function " + getName ( ) , <nl> + ErrorCodes : : ILLEGAL_COLUMN ) ; <nl> + } <nl> + <nl> + private : <nl> + bool executeConst ( Block & block , const ColumnNumbers & arguments , size_t result ) <nl> + { <nl> + if ( const ColumnConstArray * const_array = typeid_cast < const ColumnConstArray * > ( block . getByPosition ( arguments [ 0 ] ) . column . get ( ) ) ) <nl> + { <nl> + if ( const_array - > getData ( ) . empty ( ) ) <nl> + { <nl> + auto nested_type = typeid_cast < const DataTypeArray & > ( * block . getByPosition ( arguments [ 0 ] ) . type ) . getNestedType ( ) ; <nl> + <nl> + block . getByPosition ( result ) . column = new ColumnConstArray ( <nl> + block . rowsInFirstColumn ( ) , <nl> + { nested_type - > getDefault ( ) } , <nl> + nested_type - > clone ( ) ) ; <nl> + } <nl> + else <nl> + block . getByPosition ( result ) . column = block . getByPosition ( arguments [ 0 ] ) . column ; <nl> + <nl> + return true ; <nl> + } <nl> + else <nl> + return false ; <nl> + } <nl> + <nl> + template < typename T > <nl> + bool executeNumber ( <nl> + const IColumn & src_data , const ColumnArray : : Offsets_t & src_offsets , <nl> + IColumn & res_data_col , ColumnArray : : Offsets_t & res_offsets ) <nl> + { <nl> + if ( const ColumnVector < T > * src_data_concrete = typeid_cast < const ColumnVector < T > * > ( & src_data ) ) <nl> + { <nl> + const PODArray < T > & src_data = src_data_concrete - > getData ( ) ; <nl> + PODArray < T > & res_data = typeid_cast < ColumnVector < T > & > ( res_data_col ) . getData ( ) ; <nl> + size_t size = src_offsets . size ( ) ; <nl> + res_offsets . resize ( size ) ; <nl> + res_data . reserve ( src_data . size ( ) ) ; <nl> + <nl> + ColumnArray : : Offset_t src_prev_offset = 0 ; <nl> + ColumnArray : : Offset_t res_prev_offset = 0 ; <nl> + <nl> + for ( size_t i = 0 ; i < size ; + + i ) <nl> + { <nl> + if ( src_offsets [ i ] ! = src_prev_offset ) <nl> + { <nl> + size_t size_to_write = src_offsets [ i ] - src_prev_offset ; <nl> + size_t prev_res_data_size = res_data . size ( ) ; <nl> + res_data . resize ( prev_res_data_size + size_to_write ) ; <nl> + memcpy ( & res_data [ prev_res_data_size ] , & src_data [ src_prev_offset ] , size_to_write * sizeof ( T ) ) ; <nl> + res_prev_offset + = size_to_write ; <nl> + res_offsets [ i ] = res_prev_offset ; <nl> + } <nl> + else <nl> + { <nl> + res_data . push_back ( T ( ) ) ; <nl> + + + res_prev_offset ; <nl> + res_offsets [ i ] = res_prev_offset ; <nl> + } <nl> + <nl> + src_prev_offset = src_offsets [ i ] ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + else <nl> + return false ; <nl> + } <nl> + <nl> + bool executeFixedString ( <nl> + const IColumn & src_data , const ColumnArray : : Offsets_t & src_offsets , <nl> + IColumn & res_data_col , ColumnArray : : Offsets_t & res_offsets ) <nl> + { <nl> + if ( const ColumnFixedString * src_data_concrete = typeid_cast < const ColumnFixedString * > ( & src_data ) ) <nl> + { <nl> + const size_t n = src_data_concrete - > getN ( ) ; <nl> + const ColumnFixedString : : Chars_t & src_data = src_data_concrete - > getChars ( ) ; <nl> + ColumnFixedString : : Chars_t & res_data = typeid_cast < ColumnFixedString & > ( res_data_col ) . getChars ( ) ; <nl> + size_t size = src_offsets . size ( ) ; <nl> + res_offsets . resize ( size ) ; <nl> + res_data . reserve ( src_data . size ( ) ) ; <nl> + <nl> + ColumnArray : : Offset_t src_prev_offset = 0 ; <nl> + ColumnArray : : Offset_t res_prev_offset = 0 ; <nl> + <nl> + for ( size_t i = 0 ; i < size ; + + i ) <nl> + { <nl> + if ( src_offsets [ i ] ! = src_prev_offset ) <nl> + { <nl> + size_t size_to_write = src_offsets [ i ] - src_prev_offset ; <nl> + size_t prev_res_data_size = res_data . size ( ) ; <nl> + res_data . resize ( prev_res_data_size + size_to_write * n ) ; <nl> + memcpy ( & res_data [ prev_res_data_size ] , & src_data [ src_prev_offset ] , size_to_write * n ) ; <nl> + res_prev_offset + = size_to_write ; <nl> + res_offsets [ i ] = res_prev_offset ; <nl> + } <nl> + else <nl> + { <nl> + size_t prev_res_data_size = res_data . size ( ) ; <nl> + res_data . resize ( prev_res_data_size + n ) ; <nl> + memset ( & res_data [ prev_res_data_size ] , 0 , n ) ; <nl> + + + res_prev_offset ; <nl> + res_offsets [ i ] = res_prev_offset ; <nl> + } <nl> + <nl> + src_prev_offset = src_offsets [ i ] ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + else <nl> + return false ; <nl> + } <nl> + <nl> + bool executeString ( <nl> + const IColumn & src_data , const ColumnArray : : Offsets_t & src_array_offsets , <nl> + IColumn & res_data_col , ColumnArray : : Offsets_t & res_array_offsets ) <nl> + { <nl> + if ( const ColumnString * src_data_concrete = typeid_cast < const ColumnString * > ( & src_data ) ) <nl> + { <nl> + const ColumnString : : Offsets_t & src_string_offsets = src_data_concrete - > getOffsets ( ) ; <nl> + ColumnString : : Offsets_t & res_string_offsets = typeid_cast < ColumnString & > ( res_data_col ) . getOffsets ( ) ; <nl> + <nl> + const ColumnString : : Chars_t & src_data = src_data_concrete - > getChars ( ) ; <nl> + ColumnString : : Chars_t & res_data = typeid_cast < ColumnString & > ( res_data_col ) . getChars ( ) ; <nl> + <nl> + size_t size = src_array_offsets . size ( ) ; <nl> + res_array_offsets . resize ( size ) ; <nl> + res_string_offsets . reserve ( src_string_offsets . size ( ) ) ; <nl> + res_data . reserve ( src_data . size ( ) ) ; <nl> + <nl> + ColumnArray : : Offset_t src_array_prev_offset = 0 ; <nl> + ColumnArray : : Offset_t res_array_prev_offset = 0 ; <nl> + <nl> + ColumnString : : Offset_t src_string_prev_offset = 0 ; <nl> + ColumnString : : Offset_t res_string_prev_offset = 0 ; <nl> + <nl> + for ( size_t i = 0 ; i < size ; + + i ) <nl> + { <nl> + if ( src_array_offsets [ i ] ! = src_array_prev_offset ) <nl> + { <nl> + size_t array_size = src_array_offsets [ i ] - src_array_prev_offset ; <nl> + <nl> + size_t bytes_to_copy = 0 ; <nl> + size_t from_string_prev_offset_local = src_string_prev_offset ; <nl> + for ( size_t j = 0 ; j < array_size ; + + j ) <nl> + { <nl> + size_t string_size = src_string_offsets [ src_array_prev_offset + j ] - from_string_prev_offset_local ; <nl> + <nl> + res_string_prev_offset + = string_size ; <nl> + res_string_offsets . push_back ( res_string_prev_offset ) ; <nl> + <nl> + from_string_prev_offset_local + = string_size ; <nl> + bytes_to_copy + = string_size ; <nl> + } <nl> + <nl> + size_t res_data_old_size = res_data . size ( ) ; <nl> + res_data . resize ( res_data_old_size + bytes_to_copy ) ; <nl> + memcpy ( & res_data [ res_data_old_size ] , & src_data [ src_string_prev_offset ] , bytes_to_copy ) ; <nl> + <nl> + res_array_prev_offset + = array_size ; <nl> + res_array_offsets [ i ] = res_array_prev_offset ; <nl> + } <nl> + else <nl> + { <nl> + res_data . push_back ( 0 ) ; / / / Пустая строка , включая ноль на конце . <nl> + <nl> + + + res_string_prev_offset ; <nl> + res_string_offsets . push_back ( res_string_prev_offset ) ; <nl> + <nl> + + + res_array_prev_offset ; <nl> + res_array_offsets [ i ] = res_array_prev_offset ; <nl> + } <nl> + <nl> + src_array_prev_offset = src_array_offsets [ i ] ; <nl> + <nl> + if ( src_array_prev_offset ) <nl> + src_string_prev_offset = src_string_offsets [ src_array_prev_offset - 1 ] ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + else <nl> + return false ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> struct NameHas { static constexpr auto name = " has " ; } ; <nl> struct NameIndexOf { static constexpr auto name = " indexOf " ; } ; <nl> struct NameCountEqual { static constexpr auto name = " countEqual " ; } ; <nl> <nl> - typedef FunctionArrayIndex < IndexToOne , NameHas > FunctionHas ; <nl> + typedef FunctionArrayIndex < IndexToOne , NameHas > FunctionHas ; <nl> typedef FunctionArrayIndex < IndexIdentity , NameIndexOf > FunctionIndexOf ; <nl> - typedef FunctionArrayIndex < IndexCount , NameCountEqual > FunctionCountEqual ; <nl> + typedef FunctionArrayIndex < IndexCount , NameCountEqual > FunctionCountEqual ; <nl> <nl> using FunctionEmptyArrayUInt8 = FunctionEmptyArray < DataTypeUInt8 > ; <nl> using FunctionEmptyArrayUInt16 = FunctionEmptyArray < DataTypeUInt16 > ; <nl> mmm a / dbms / include / DB / Functions / FunctionsURL . h <nl> ppp b / dbms / include / DB / Functions / FunctionsURL . h <nl> struct ExtractURLParameterImpl <nl> { <nl> size_t cur_offset = offsets [ i ] ; <nl> <nl> - const char * pos = nullptr ; <nl> + const char * str = reinterpret_cast < const char * > ( & data [ prev_offset ] ) ; <nl> <nl> - do <nl> + const char * pos = nullptr ; <nl> + const char * begin = strchr ( str , ' ? ' ) ; <nl> + if ( begin ! = nullptr ) <nl> { <nl> - const char * str = reinterpret_cast < const char * > ( & data [ prev_offset ] ) ; <nl> - <nl> - const char * begin = strchr ( str , ' ? ' ) ; <nl> - if ( begin = = nullptr ) <nl> - break ; <nl> - <nl> - pos = strstr ( begin + 1 , param_str ) ; <nl> - if ( pos = = nullptr ) <nl> - break ; <nl> - if ( pos ! = begin + 1 & & * ( pos - 1 ) ! = ' ; ' & & * ( pos - 1 ) ! = ' & ' ) <nl> + pos = begin + 1 ; <nl> + while ( true ) <nl> { <nl> - pos = nullptr ; <nl> - break ; <nl> + pos = strstr ( pos , param_str ) ; <nl> + <nl> + if ( pos = = nullptr ) <nl> + break ; <nl> + <nl> + if ( pos [ - 1 ] ! = ' ? ' & & pos [ - 1 ] ! = ' & ' ) <nl> + { <nl> + pos + = param_len ; <nl> + continue ; <nl> + } <nl> + else <nl> + { <nl> + pos + = param_len ; <nl> + break ; <nl> + } <nl> } <nl> - <nl> - pos + = param_len ; <nl> - } while ( false ) ; <nl> + } <nl> <nl> if ( pos ! = nullptr ) <nl> { <nl> - const char * end = strpbrk ( pos , " & ; # " ) ; <nl> + const char * end = strpbrk ( pos , " & # " ) ; <nl> if ( end = = nullptr ) <nl> end = pos + strlen ( pos ) ; <nl> <nl> mmm a / dbms / include / DB / Interpreters / ExpressionActions . h <nl> ppp b / dbms / include / DB / Interpreters / ExpressionActions . h <nl> struct ExpressionAction <nl> <nl> / / / Для ARRAY_JOIN <nl> NameSet array_joined_columns ; <nl> + bool array_join_is_left ; <nl> <nl> / / / Для JOIN <nl> const Join * join = nullptr ; <nl> struct ExpressionAction <nl> return a ; <nl> } <nl> <nl> - static ExpressionAction arrayJoin ( const NameSet & array_joined_columns ) <nl> + static ExpressionAction arrayJoin ( const NameSet & array_joined_columns , bool array_join_is_left ) <nl> { <nl> if ( array_joined_columns . empty ( ) ) <nl> throw Exception ( " No arrays to join " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> ExpressionAction a ; <nl> a . type = ARRAY_JOIN ; <nl> a . array_joined_columns = array_joined_columns ; <nl> + a . array_join_is_left = array_join_is_left ; <nl> return a ; <nl> } <nl> <nl> mmm a / dbms / include / DB / Interpreters / ExpressionAnalyzer . h <nl> ppp b / dbms / include / DB / Interpreters / ExpressionAnalyzer . h <nl> class ExpressionAnalyzer : private boost : : noncopyable <nl> / / / Превратить перечисление значений или подзапрос в ASTSet . node - функция in или notIn . <nl> void makeSet ( ASTFunction * node , const Block & sample_block ) ; <nl> <nl> + / / / Замена скалярных подзапросов на значения - константы . <nl> + void executeScalarSubqueries ( ) ; <nl> + void executeScalarSubqueriesImpl ( ASTPtr & ast ) ; <nl> + <nl> / / / Находит глобальные подзапросы в секциях GLOBAL IN / JOIN . Заполняет external_tables . <nl> void initGlobalSubqueriesAndExternalTables ( ) ; <nl> void initGlobalSubqueries ( ASTPtr & ast ) ; <nl> mmm a / dbms / include / DB / Interpreters / InterpreterCreateQuery . h <nl> ppp b / dbms / include / DB / Interpreters / InterpreterCreateQuery . h <nl> class InterpreterCreateQuery : public IInterpreter <nl> * / <nl> BlockIO execute ( ) override <nl> { <nl> - executeImpl ( false ) ; <nl> - return { } ; <nl> + return executeImpl ( false ) ; <nl> } <nl> <nl> / * * assume_metadata_exists - не проверять наличие файла с метаданными и не создавать его <nl> class InterpreterCreateQuery : public IInterpreter <nl> const ColumnDefaults & column_defaults ) ; <nl> <nl> private : <nl> - void executeImpl ( bool assume_metadata_exists ) ; <nl> + BlockIO executeImpl ( bool assume_metadata_exists ) ; <nl> <nl> / / / AST в список столбцов с типами . Столбцы типа Nested развернуты в список настоящих столбцов . <nl> using ColumnsAndDefaults = std : : pair < NamesAndTypesList , ColumnDefaults > ; <nl> mmm a / dbms / include / DB / Parsers / ASTSelectQuery . h <nl> ppp b / dbms / include / DB / Parsers / ASTSelectQuery . h <nl> class ASTSelectQuery : public ASTQueryWithOutput <nl> ASTPtr select_expression_list ; <nl> ASTPtr database ; <nl> ASTPtr table ; / / / Идентификатор , табличная функция или подзапрос ( рекурсивно ASTSelectQuery ) <nl> + bool array_join_is_left = false ; / / / LEFT ARRAY JOIN <nl> ASTPtr array_join_expression_list ; / / / ARRAY JOIN <nl> ASTPtr join ; / / / Обычный ( не ARRAY ) JOIN . <nl> bool final = false ; <nl> mmm a / dbms / include / DB / Parsers / ASTSubquery . h <nl> ppp b / dbms / include / DB / Parsers / ASTSubquery . h <nl> <nl> <nl> # include < DB / DataTypes / IDataType . h > <nl> <nl> - # include < DB / Parsers / IAST . h > <nl> + # include < DB / Parsers / ASTWithAlias . h > <nl> <nl> <nl> namespace DB <nl> namespace DB <nl> <nl> / * * Подзарос SELECT <nl> * / <nl> - class ASTSubquery : public IAST <nl> + class ASTSubquery : public ASTWithAlias <nl> { <nl> public : <nl> ASTSubquery ( ) = default ; <nl> - ASTSubquery ( const StringRange range_ ) : IAST ( range_ ) { } <nl> - <nl> + ASTSubquery ( const StringRange range_ ) : ASTWithAlias ( range_ ) { } <nl> + <nl> / * * Получить текст , который идентифицирует этот элемент . * / <nl> String getID ( ) const override { return " Subquery " ; } <nl> <nl> mmm a / dbms / include / DB / Storages / MergeTree / MergeTreeData . h <nl> ppp b / dbms / include / DB / Storages / MergeTree / MergeTreeData . h <nl> class MergeTreeData : public ITableDeclaration <nl> return it = = std : : end ( column_sizes ) ? 0 : it - > second ; <nl> } <nl> <nl> + using ColumnSizes = std : : unordered_map < std : : string , size_t > ; <nl> + ColumnSizes getColumnSizes ( ) const <nl> + { <nl> + Poco : : ScopedLock < Poco : : FastMutex > lock { data_parts_mutex } ; <nl> + return column_sizes ; <nl> + } <nl> + <nl> / / / Для ATTACH / DETACH / DROP PARTITION . <nl> static String getMonthName ( const Field & partition ) ; <nl> static DayNum_t getMonthDayNum ( const Field & partition ) ; <nl> class MergeTreeData : public ITableDeclaration <nl> <nl> NamesAndTypesListPtr columns ; <nl> / / / Актуальные размеры столбцов в сжатом виде <nl> - std : : unordered_map < std : : string , size_t > column_sizes ; <nl> + ColumnSizes column_sizes ; <nl> <nl> BrokenPartCallback broken_part_callback ; <nl> <nl> mmm a / dbms / include / DB / Storages / MergeTree / MergeTreeReader . h <nl> ppp b / dbms / include / DB / Storages / MergeTree / MergeTreeReader . h <nl> class MergeTreeReader <nl> + + right ; <nl> } <nl> <nl> - / / / Если правее засечек нет , просто используем DEFAULT_BUFFER_SIZE <nl> + / / / Если правее засечек нет , просто используем max_read_buffer_size <nl> if ( right > = ( * marks ) . size ( ) | | ( right + 1 = = ( * marks ) . size ( ) & & <nl> ( * marks ) [ right ] . offset_in_compressed_file = = ( * marks ) [ all_mark_ranges [ i ] . end ] . offset_in_compressed_file ) ) <nl> { <nl> mmm a / dbms / src / AggregateFunctions / AggregateFunctionFactory . cpp <nl> ppp b / dbms / src / AggregateFunctions / AggregateFunctionFactory . cpp <nl> AggregateFunctionPtr AggregateFunctionFactory : : get ( const String & name , const Da <nl> AggregateFunctionPtr nested = get ( String ( name . data ( ) , name . size ( ) - strlen ( " State " ) ) , argument_types , recursion_level + 1 ) ; <nl> return new AggregateFunctionState ( nested ) ; <nl> } <nl> - else if ( recursion_level = = 0 & & name . size ( ) > strlen ( " Merge " ) & & ! ( strcmp ( name . data ( ) + name . size ( ) - strlen ( " Merge " ) , " Merge " ) ) ) <nl> + else if ( recursion_level < = 1 & & name . size ( ) > strlen ( " Merge " ) & & ! ( strcmp ( name . data ( ) + name . size ( ) - strlen ( " Merge " ) , " Merge " ) ) ) <nl> { <nl> / / / Для агрегатных функций вида aggMerge , где agg - имя другой агрегатной функции . <nl> if ( argument_types . size ( ) ! = 1 ) <nl> AggregateFunctionPtr AggregateFunctionFactory : : get ( const String & name , const Da <nl> <nl> return new AggregateFunctionMerge ( nested ) ; <nl> } <nl> - else if ( recursion_level < = 1 & & name . size ( ) > = 3 & & name [ name . size ( ) - 2 ] = = ' I ' & & name [ name . size ( ) - 1 ] = = ' f ' ) <nl> + else if ( recursion_level < = 2 & & name . size ( ) > = 3 & & name [ name . size ( ) - 2 ] = = ' I ' & & name [ name . size ( ) - 1 ] = = ' f ' ) <nl> { <nl> if ( argument_types . empty ( ) ) <nl> throw Exception { <nl> AggregateFunctionPtr AggregateFunctionFactory : : get ( const String & name , const Da <nl> AggregateFunctionPtr nested = get ( String ( name . data ( ) , name . size ( ) - 2 ) , nested_dt , recursion_level + 1 ) ; <nl> return new AggregateFunctionIf ( nested ) ; <nl> } <nl> - else if ( recursion_level < = 2 & & name . size ( ) > strlen ( " Array " ) & & ! ( strcmp ( name . data ( ) + name . size ( ) - strlen ( " Array " ) , " Array " ) ) ) <nl> + else if ( recursion_level < = 3 & & name . size ( ) > strlen ( " Array " ) & & ! ( strcmp ( name . data ( ) + name . size ( ) - strlen ( " Array " ) , " Array " ) ) ) <nl> { <nl> / / / Для агрегатных функций вида aggArray , где agg - имя другой агрегатной функции . <nl> size_t num_agruments = argument_types . size ( ) ; <nl> AggregateFunctionPtr AggregateFunctionFactory : : get ( const String & name , const Da <nl> else <nl> throw Exception ( " Illegal type " + argument_types [ i ] - > getName ( ) + " of argument # " + toString ( i + 1 ) + " for aggregate function " + name + " . Must be array . " , ErrorCodes : : ILLEGAL_TYPE_OF_ARGUMENT ) ; <nl> } <nl> - AggregateFunctionPtr nested = get ( String ( name . data ( ) , name . size ( ) - strlen ( " Array " ) ) , nested_arguments , recursion_level + 2 ) ; / / / + 2 , чтобы ни один другой модификатор не мог идти перед Array <nl> + AggregateFunctionPtr nested = get ( String ( name . data ( ) , name . size ( ) - strlen ( " Array " ) ) , nested_arguments , recursion_level + 3 ) ; / / / + 3 , чтобы ни один другой модификатор не мог идти перед Array <nl> return new AggregateFunctionArray ( nested ) ; <nl> } <nl> else <nl> bool AggregateFunctionFactory : : isAggregateFunctionName ( const String & name , int <nl> if ( recursion_level < = 0 & & name . size ( ) > strlen ( " State " ) & & ! ( strcmp ( name . data ( ) + name . size ( ) - strlen ( " State " ) , " State " ) ) ) <nl> return isAggregateFunctionName ( String ( name . data ( ) , name . size ( ) - strlen ( " State " ) ) , recursion_level + 1 ) ; <nl> / / / Для агрегатных функций вида aggMerge , где agg - имя другой агрегатной функции . <nl> - if ( recursion_level < = 0 & & name . size ( ) > strlen ( " Merge " ) & & ! ( strcmp ( name . data ( ) + name . size ( ) - strlen ( " Merge " ) , " Merge " ) ) ) <nl> + if ( recursion_level < = 1 & & name . size ( ) > strlen ( " Merge " ) & & ! ( strcmp ( name . data ( ) + name . size ( ) - strlen ( " Merge " ) , " Merge " ) ) ) <nl> return isAggregateFunctionName ( String ( name . data ( ) , name . size ( ) - strlen ( " Merge " ) ) , recursion_level + 1 ) ; <nl> / / / Для агрегатных функций вида aggIf , где agg - имя другой агрегатной функции . <nl> - if ( recursion_level < = 1 & & name . size ( ) > = 3 & & name [ name . size ( ) - 2 ] = = ' I ' & & name [ name . size ( ) - 1 ] = = ' f ' ) <nl> + if ( recursion_level < = 2 & & name . size ( ) > = 3 & & name [ name . size ( ) - 2 ] = = ' I ' & & name [ name . size ( ) - 1 ] = = ' f ' ) <nl> return isAggregateFunctionName ( String ( name . data ( ) , name . size ( ) - 2 ) , recursion_level + 1 ) ; <nl> / / / Для агрегатных функций вида aggArray , где agg - имя другой агрегатной функции . <nl> - if ( recursion_level < = 2 & & name . size ( ) > strlen ( " Array " ) & & ! ( strcmp ( name . data ( ) + name . size ( ) - strlen ( " Array " ) , " Array " ) ) ) <nl> - return isAggregateFunctionName ( String ( name . data ( ) , name . size ( ) - strlen ( " Array " ) ) , recursion_level + 2 ) ; / / / + 2 , чтобы ни один другой модификатор не мог идти перед Array <nl> + if ( recursion_level < = 3 & & name . size ( ) > strlen ( " Array " ) & & ! ( strcmp ( name . data ( ) + name . size ( ) - strlen ( " Array " ) , " Array " ) ) ) <nl> + return isAggregateFunctionName ( String ( name . data ( ) , name . size ( ) - strlen ( " Array " ) ) , recursion_level + 3 ) ; / / / + 3 , чтобы ни один другой модификатор не мог идти перед Array <nl> <nl> return false ; <nl> } <nl> mmm a / dbms / src / Client / Client . cpp <nl> ppp b / dbms / src / Client / Client . cpp <nl> class Client : public Poco : : Util : : Application <nl> } <nl> <nl> <nl> + / * * Проверка для случая , когда в терминал вставляется многострочный запрос из буфера обмена . <nl> + * Позволяет не начинать выполнение одной строчки запроса , пока весь запрос не будет вставлен . <nl> + * / <nl> + static bool hasDataInSTDIN ( ) <nl> + { <nl> + timeval timeout = { 0 , 0 } ; <nl> + fd_set fds ; <nl> + FD_ZERO ( & fds ) ; <nl> + FD_SET ( STDIN_FILENO , & fds ) ; <nl> + return select ( 1 , & fds , 0 , 0 , & timeout ) = = 1 ; <nl> + } <nl> + <nl> + <nl> void loop ( ) <nl> { <nl> String query ; <nl> class Client : public Poco : : Util : : Application <nl> <nl> query + = line ; <nl> <nl> - if ( ! ends_with_backslash & & ( ends_with_semicolon | | has_vertical_output_suffix | | ! config ( ) . has ( " multiline " ) ) ) <nl> + if ( ! ends_with_backslash & & ( ends_with_semicolon | | has_vertical_output_suffix | | ( ! config ( ) . has ( " multiline " ) & & ! hasDataInSTDIN ( ) ) ) ) <nl> { <nl> if ( query ! = prev_query ) <nl> { <nl> class Client : public Poco : : Util : : Application <nl> copyData ( in , out ) ; <nl> } <nl> <nl> + process ( line ) ; <nl> + } <nl> + <nl> + <nl> + bool process ( const String & line ) <nl> + { <nl> if ( config ( ) . has ( " multiquery " ) ) <nl> { <nl> / / / Несколько запросов , разделенных ' ; ' . <nl> class Client : public Poco : : Util : : Application <nl> while ( isWhitespace ( * begin ) | | * begin = = ' ; ' ) <nl> + + begin ; <nl> <nl> - process ( query , ast ) ; <nl> + if ( ! processSingleQuery ( query , ast ) ) <nl> + return false ; <nl> } <nl> + <nl> + return true ; <nl> } <nl> else <nl> { <nl> - process ( line ) ; <nl> + return processSingleQuery ( line ) ; <nl> } <nl> } <nl> <nl> <nl> - bool process ( const String & line , ASTPtr parsed_query_ = nullptr ) <nl> + bool processSingleQuery ( const String & line , ASTPtr parsed_query_ = nullptr ) <nl> { <nl> if ( exit_strings . end ( ) ! = exit_strings . find ( line ) ) <nl> return false ; <nl> class Client : public Poco : : Util : : Application <nl> } <nl> <nl> <nl> - void onData ( Block & block ) <nl> + void initBlockOutputStream ( const Block & block ) <nl> { <nl> - if ( written_progress_chars ) <nl> - clearProgress ( ) ; <nl> - <nl> - if ( ! block ) <nl> - return ; <nl> - <nl> - processed_rows + = block . rows ( ) ; <nl> if ( ! block_std_out ) <nl> { <nl> String current_format = format ; <nl> class Client : public Poco : : Util : : Application <nl> block_std_out = context . getFormatFactory ( ) . getOutput ( current_format , std_out , block ) ; <nl> block_std_out - > writePrefix ( ) ; <nl> } <nl> + } <nl> + <nl> + <nl> + void onData ( Block & block ) <nl> + { <nl> + if ( written_progress_chars ) <nl> + clearProgress ( ) ; <nl> + <nl> + if ( ! block ) <nl> + return ; <nl> + <nl> + processed_rows + = block . rows ( ) ; <nl> + initBlockOutputStream ( block ) ; <nl> <nl> - / / / Загаловочный блок с нулем строк использовался для инициализации block_std_out , <nl> + / / / Заголовочный блок с нулем строк использовался для инициализации block_std_out , <nl> / / / выводить его не нужно <nl> if ( block . rows ( ) ! = 0 ) <nl> { <nl> class Client : public Poco : : Util : : Application <nl> <nl> void onTotals ( Block & block ) <nl> { <nl> + initBlockOutputStream ( block ) ; <nl> block_std_out - > setTotals ( block ) ; <nl> } <nl> <nl> void onExtremes ( Block & block ) <nl> { <nl> + initBlockOutputStream ( block ) ; <nl> block_std_out - > setExtremes ( block ) ; <nl> } <nl> <nl> mmm a / dbms / src / Core / Block . cpp <nl> ppp b / dbms / src / Core / Block . cpp <nl> std : : string Block : : dumpStructure ( ) const <nl> { <nl> if ( it ! = data . begin ( ) ) <nl> res < < " , " ; <nl> - res < < it - > name < < ' ' < < it - > type - > getName ( ) < < ' ' < < it - > column - > getName ( ) < < ' ' < < it - > column - > size ( ) ; <nl> + <nl> + res < < it - > name < < ' ' < < it - > type - > getName ( ) ; <nl> + <nl> + if ( it - > column ) <nl> + res < < ' ' < < it - > column - > getName ( ) < < ' ' < < it - > column - > size ( ) ; <nl> + else <nl> + res < < " nullptr " ; <nl> } <nl> return res . str ( ) ; <nl> } <nl> mmm a / dbms / src / DataStreams / FormatFactory . cpp <nl> ppp b / dbms / src / DataStreams / FormatFactory . cpp <nl> namespace DB <nl> { <nl> <nl> BlockInputStreamPtr FormatFactory : : getInput ( const String & name , ReadBuffer & buf , <nl> - Block & sample , size_t max_block_size ) const <nl> + const Block & sample , size_t max_block_size ) const <nl> { <nl> if ( name = = " Native " ) <nl> return new NativeBlockInputStream ( buf ) ; <nl> BlockInputStreamPtr FormatFactory : : getInput ( const String & name , ReadBuffer & bu <nl> <nl> <nl> BlockOutputStreamPtr FormatFactory : : getOutput ( const String & name , WriteBuffer & buf , <nl> - Block & sample ) const <nl> + const Block & sample ) const <nl> { <nl> if ( name = = " Native " ) <nl> return new NativeBlockOutputStream ( buf ) ; <nl> mmm a / dbms / src / Functions / FunctionsArray . cpp <nl> ppp b / dbms / src / Functions / FunctionsArray . cpp <nl> void registerFunctionsArray ( FunctionFactory & factory ) <nl> factory . registerFunction < FunctionEmptyArrayDate > ( ) ; <nl> factory . registerFunction < FunctionEmptyArrayDateTime > ( ) ; <nl> factory . registerFunction < FunctionEmptyArrayString > ( ) ; <nl> + factory . registerFunction < FunctionEmptyArrayToSingle > ( ) ; <nl> factory . registerFunction < FunctionRange > ( ) ; <nl> } <nl> <nl> mmm a / dbms / src / Interpreters / Context . cpp <nl> ppp b / dbms / src / Interpreters / Context . cpp <nl> <nl> # include < Poco / SharedPtr . h > <nl> # include < Poco / Mutex . h > <nl> # include < Poco / File . h > <nl> + # include < Poco / UUIDGenerator . h > <nl> <nl> # include < Yandex / logger_useful . h > <nl> <nl> struct ContextShared <nl> / / / Создаются при создании Distributed таблиц , так как нужно дождаться пока будут выставлены Settings <nl> Poco : : SharedPtr < Clusters > clusters ; <nl> <nl> + Poco : : UUIDGenerator uuid_generator ; <nl> + <nl> bool shutdown_called = false ; <nl> <nl> <nl> void Context : : setCurrentDatabase ( const String & name ) <nl> <nl> void Context : : setCurrentQueryId ( const String & query_id ) <nl> { <nl> + String query_id_to_set = query_id ; <nl> + if ( query_id_to_set . empty ( ) ) / / / Если пользователь не передал свой query_id , то генерируем его самостоятельно . <nl> + query_id_to_set = shared - > uuid_generator . createRandom ( ) . toString ( ) ; <nl> + <nl> Poco : : ScopedLock < Poco : : Mutex > lock ( shared - > mutex ) ; <nl> - current_query_id = query_id ; <nl> + current_query_id = query_id_to_set ; <nl> } <nl> <nl> <nl> mmm a / dbms / src / Interpreters / ExpressionActions . cpp <nl> ppp b / dbms / src / Interpreters / ExpressionActions . cpp <nl> <nl> # include < DB / DataTypes / DataTypeNested . h > <nl> # include < DB / DataTypes / DataTypeArray . h > <nl> # include < DB / Functions / IFunction . h > <nl> + # include < DB / Functions / FunctionsArray . h > <nl> # include < set > <nl> <nl> <nl> void ExpressionAction : : execute ( Block & block ) const <nl> if ( ! any_array ) <nl> throw Exception ( " ARRAY JOIN of not array : " + * array_joined_columns . begin ( ) , ErrorCodes : : TYPE_MISMATCH ) ; <nl> <nl> + / / / Если LEFT ARRAY JOIN , то создаём столбцы , в которых пустые массивы заменены на массивы с одним элементом - значением по - умолчанию . <nl> + std : : map < String , ColumnPtr > non_empty_array_columns ; <nl> + if ( array_join_is_left ) <nl> + { <nl> + for ( const auto & name : array_joined_columns ) <nl> + { <nl> + auto src_col = block . getByName ( name ) ; <nl> + <nl> + Block tmp_block { src_col , { { } , src_col . type , { } } } ; <nl> + <nl> + FunctionEmptyArrayToSingle ( ) . execute ( tmp_block , { 0 } , 1 ) ; <nl> + non_empty_array_columns [ name ] = tmp_block . getByPosition ( 1 ) . column ; <nl> + } <nl> + <nl> + any_array_ptr = non_empty_array_columns . begin ( ) - > second ; <nl> + any_array = typeid_cast < const ColumnArray * > ( & * any_array_ptr ) ; <nl> + } <nl> + <nl> size_t columns = block . columns ( ) ; <nl> for ( size_t i = 0 ; i < columns ; + + i ) <nl> { <nl> void ExpressionAction : : execute ( Block & block ) const <nl> if ( ! typeid_cast < const DataTypeArray * > ( & * current . type ) ) <nl> throw Exception ( " ARRAY JOIN of not array : " + current . name , ErrorCodes : : TYPE_MISMATCH ) ; <nl> <nl> - ColumnPtr array_ptr = current . column ; <nl> + ColumnPtr array_ptr = array_join_is_left ? non_empty_array_columns [ current . name ] : current . column ; <nl> + <nl> if ( array_ptr - > isConst ( ) ) <nl> array_ptr = dynamic_cast < const IColumnConst & > ( * array_ptr ) . convertToFullColumn ( ) ; <nl> <nl> std : : string ExpressionAction : : toString ( ) const <nl> break ; <nl> <nl> case ARRAY_JOIN : <nl> - ss < < " ARRAY JOIN " ; <nl> + ss < < ( array_join_is_left ? " LEFT " : " " ) < < " ARRAY JOIN " ; <nl> for ( NameSet : : const_iterator it = array_joined_columns . begin ( ) ; it ! = array_joined_columns . end ( ) ; + + it ) <nl> { <nl> if ( it ! = array_joined_columns . begin ( ) ) <nl> std : : string ExpressionActions : : getID ( ) const <nl> ss < < actions [ i ] . result_name ; <nl> if ( actions [ i ] . type = = ExpressionAction : : ARRAY_JOIN ) <nl> { <nl> - ss < < " { " ; <nl> + ss < < ( actions [ i ] . array_join_is_left ? " LEFT ARRAY JOIN " : " ARRAY JOIN " ) < < " { " ; <nl> for ( NameSet : : const_iterator it = actions [ i ] . array_joined_columns . begin ( ) ; <nl> it ! = actions [ i ] . array_joined_columns . end ( ) ; + + it ) <nl> { <nl> mmm a / dbms / src / Interpreters / ExpressionAnalyzer . cpp <nl> ppp b / dbms / src / Interpreters / ExpressionAnalyzer . cpp <nl> const std : : unordered_set < String > possibly_injective_function_names <nl> " dictGetDateTime " <nl> } ; <nl> <nl> + static bool functionIsInOperator ( const String & name ) <nl> + { <nl> + return name = = " in " | | name = = " notIn " ; <nl> + } <nl> + <nl> + static bool functionIsInOrGlobalInOperator ( const String & name ) <nl> + { <nl> + return name = = " in " | | name = = " notIn " | | name = = " globalIn " | | name = = " globalNotIn " ; <nl> + } <nl> + <nl> + <nl> + <nl> void ExpressionAnalyzer : : init ( ) <nl> { <nl> select_query = typeid_cast < ASTSelectQuery * > ( & * ast ) ; <nl> void ExpressionAnalyzer : : init ( ) <nl> LogicalExpressionsOptimizer logical_expressions_optimizer ( select_query , settings ) ; <nl> logical_expressions_optimizer . optimizeDisjunctiveEqualityChains ( ) ; <nl> <nl> + / / / Добавляет в множество известных алиасов те , которые объявлены в структуре таблицы ( ALIAS - столбцы ) . <nl> addStorageAliases ( ) ; <nl> <nl> / / / Создаёт словарь aliases : alias - > ASTPtr <nl> void ExpressionAnalyzer : : init ( ) <nl> / / / Common subexpression elimination . Rewrite rules . <nl> normalizeTree ( ) ; <nl> <nl> + / / / Выполнение скалярных подзапросов - замена их на значения - константы . <nl> + executeScalarSubqueries ( ) ; <nl> + <nl> / / / GROUP BY injective function elimination . <nl> optimizeGroupBy ( ) ; <nl> <nl> void ExpressionAnalyzer : : normalizeTreeImpl ( <nl> } <nl> <nl> / / / Может быть указано IN t , где t - таблица , что равносильно IN ( SELECT * FROM t ) . <nl> - if ( func_node - > name = = " in " | | func_node - > name = = " notIn " | | func_node - > name = = " globalIn " | | func_node - > name = = " globalNotIn " ) <nl> + if ( functionIsInOrGlobalInOperator ( func_node - > name ) ) <nl> if ( ASTIdentifier * right = typeid_cast < ASTIdentifier * > ( & * func_node - > arguments - > children . at ( 1 ) ) ) <nl> right - > kind = ASTIdentifier : : Table ; <nl> <nl> void ExpressionAnalyzer : : normalizeTreeImpl ( <nl> finished_asts [ initial_ast ] = ast ; <nl> } <nl> <nl> + <nl> + void ExpressionAnalyzer : : executeScalarSubqueries ( ) <nl> + { <nl> + if ( ! select_query ) <nl> + executeScalarSubqueriesImpl ( ast ) ; <nl> + else <nl> + { <nl> + for ( auto & child : ast - > children ) <nl> + { <nl> + / / / Не опускаемся в FROM и JOIN . <nl> + if ( child . get ( ) ! = select_query - > table . get ( ) & & child . get ( ) ! = select_query - > join . get ( ) ) <nl> + executeScalarSubqueriesImpl ( child ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + static ASTPtr addTypeConversion ( ASTLiteral * ast_ , const String & type_name ) <nl> + { <nl> + if ( 0 = = type_name . compare ( 0 , strlen ( " Array " ) , " Array " ) ) <nl> + return ast_ ; / / / Преобразование типов для массивов пока не поддерживаем . <nl> + <nl> + auto ast = std : : unique_ptr < ASTLiteral > ( ast_ ) ; <nl> + ASTFunction * func = new ASTFunction ( ast - > range ) ; <nl> + ASTPtr res = func ; <nl> + func - > alias = ast - > alias ; <nl> + ast - > alias . clear ( ) ; <nl> + func - > kind = ASTFunction : : FUNCTION ; <nl> + func - > name = " to " + type_name ; <nl> + ASTExpressionList * exp_list = new ASTExpressionList ( ast - > range ) ; <nl> + func - > arguments = exp_list ; <nl> + func - > children . push_back ( func - > arguments ) ; <nl> + exp_list - > children . push_back ( ast . release ( ) ) ; <nl> + return res ; <nl> + } <nl> + <nl> + <nl> + void ExpressionAnalyzer : : executeScalarSubqueriesImpl ( ASTPtr & ast ) <nl> + { <nl> + / * * Заменяем подзапросы , возвращающие ровно одну строку <nl> + * ( " скалярные " подзапросы ) на соответствующие константы . <nl> + * <nl> + * Если подзапрос возвращает более одного столбца , то он заменяется на кортеж констант . <nl> + * <nl> + * Особенности : <nl> + * <nl> + * Замена происходит во время анализа запроса , а не во время основной стадии выполнения . <nl> + * Это значит , что не будет работать индикатор прогресса во время выполнения этих запросов , <nl> + * а также такие запросы нельзя будет прервать . <nl> + * <nl> + * Зато результат запросов может быть использован для индекса в таблице . <nl> + * <nl> + * Скалярные подзапросы выполняются на сервере - инициаторе запроса . <nl> + * На удалённые серверы запрос отправляется с уже подставленными константами . <nl> + * / <nl> + <nl> + if ( ASTSubquery * subquery = typeid_cast < ASTSubquery * > ( ast . get ( ) ) ) <nl> + { <nl> + Context subquery_context = context ; <nl> + Settings subquery_settings = context . getSettings ( ) ; <nl> + subquery_settings . limits . max_result_rows = 1 ; <nl> + subquery_settings . extremes = 0 ; <nl> + subquery_context . setSettings ( subquery_settings ) ; <nl> + <nl> + ASTPtr query = subquery - > children . at ( 0 ) ; <nl> + BlockIO res = InterpreterSelectQuery ( query , subquery_context , QueryProcessingStage : : Complete , subquery_depth + 1 ) . execute ( ) ; <nl> + <nl> + Block block ; <nl> + try <nl> + { <nl> + block = res . in - > read ( ) ; <nl> + <nl> + if ( ! block ) <nl> + throw Exception ( " Scalar subquery returned empty result " , ErrorCodes : : INCORRECT_RESULT_OF_SCALAR_SUBQUERY ) ; <nl> + <nl> + if ( block . rows ( ) ! = 1 | | res . in - > read ( ) ) <nl> + throw Exception ( " Scalar subquery returned more than one row " , ErrorCodes : : INCORRECT_RESULT_OF_SCALAR_SUBQUERY ) ; <nl> + } <nl> + catch ( const Exception & e ) <nl> + { <nl> + if ( e . code ( ) = = ErrorCodes : : TOO_MUCH_ROWS ) <nl> + throw Exception ( " Scalar subquery returned more than one row " , ErrorCodes : : INCORRECT_RESULT_OF_SCALAR_SUBQUERY ) ; <nl> + else <nl> + throw ; <nl> + } <nl> + <nl> + size_t columns = block . columns ( ) ; <nl> + if ( columns = = 1 ) <nl> + { <nl> + ASTLiteral * lit = new ASTLiteral ( ast - > range , ( * block . getByPosition ( 0 ) . column ) [ 0 ] ) ; <nl> + lit - > alias = subquery - > alias ; <nl> + ast = addTypeConversion ( lit , block . getByPosition ( 0 ) . type - > getName ( ) ) ; <nl> + } <nl> + else <nl> + { <nl> + ASTFunction * tuple = new ASTFunction ( ast - > range ) ; <nl> + tuple - > alias = subquery - > alias ; <nl> + ast = tuple ; <nl> + tuple - > kind = ASTFunction : : FUNCTION ; <nl> + tuple - > name = " tuple " ; <nl> + ASTExpressionList * exp_list = new ASTExpressionList ( ast - > range ) ; <nl> + tuple - > arguments = exp_list ; <nl> + tuple - > children . push_back ( tuple - > arguments ) ; <nl> + <nl> + exp_list - > children . resize ( columns ) ; <nl> + for ( size_t i = 0 ; i < columns ; + + i ) <nl> + { <nl> + exp_list - > children [ i ] = addTypeConversion ( <nl> + new ASTLiteral ( ast - > range , ( * block . getByPosition ( i ) . column ) [ 0 ] ) , <nl> + block . getByPosition ( i ) . type - > getName ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + else <nl> + { <nl> + / * * Не опускаемся в подзапросы в аргументах IN . <nl> + * Но если аргумент - не подзапрос , то глубже внутри него могут быть подзапросы , и в них надо опускаться . <nl> + * / <nl> + ASTFunction * func = typeid_cast < ASTFunction * > ( ast . get ( ) ) ; <nl> + if ( func & & func - > kind = = ASTFunction : : FUNCTION <nl> + & & functionIsInOrGlobalInOperator ( func - > name ) ) <nl> + { <nl> + for ( auto & child : ast - > children ) <nl> + { <nl> + if ( child . get ( ) ! = func - > arguments ) <nl> + executeScalarSubqueriesImpl ( child ) ; <nl> + else <nl> + for ( size_t i = 0 , size = func - > arguments - > children . size ( ) ; i < size ; + + i ) <nl> + if ( i ! = 1 | | ! typeid_cast < ASTSubquery * > ( func - > arguments - > children [ i ] . get ( ) ) ) <nl> + executeScalarSubqueriesImpl ( func - > arguments - > children [ i ] ) ; <nl> + } <nl> + } <nl> + else <nl> + for ( auto & child : ast - > children ) <nl> + executeScalarSubqueriesImpl ( child ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> void ExpressionAnalyzer : : optimizeGroupBy ( ) <nl> { <nl> if ( ! ( select_query & & select_query - > group_expression_list ) ) <nl> void ExpressionAnalyzer : : makeSetsForIndexImpl ( ASTPtr & node , const Block & sampl <nl> makeSetsForIndexImpl ( child , sample_block ) ; <nl> <nl> ASTFunction * func = typeid_cast < ASTFunction * > ( node . get ( ) ) ; <nl> - if ( func & & func - > kind = = ASTFunction : : FUNCTION & & ( func - > name = = " in " | | func - > name = = " notIn " ) ) <nl> + if ( func & & func - > kind = = ASTFunction : : FUNCTION & & functionIsInOperator ( func - > name ) ) <nl> { <nl> IAST & args = * func - > arguments ; <nl> ASTPtr & arg = args . children . at ( 1 ) ; <nl> static SharedPtr < InterpreterSelectQuery > interpretSubquery ( <nl> * Так как результат этого поздапроса - ещё не результат всего запроса . <nl> * Вместо этого работают ограничения <nl> * max_rows_in_set , max_bytes_in_set , set_overflow_mode , <nl> - * max_rows_in_join , max_bytes_in_join , join_overflow_mode . <nl> + * max_rows_in_join , max_bytes_in_join , join_overflow_mode , <nl> + * которые проверяются отдельно ( в объектах Set , Join ) . <nl> * / <nl> Context subquery_context = context ; <nl> Settings subquery_settings = context . getSettings ( ) ; <nl> void ExpressionAnalyzer : : getActionsImpl ( ASTPtr ast , bool no_subqueries , bool onl <nl> actions_stack . addAction ( ExpressionAction : : copyColumn ( arg - > getColumnName ( ) , result_name ) ) ; <nl> NameSet joined_columns ; <nl> joined_columns . insert ( result_name ) ; <nl> - actions_stack . addAction ( ExpressionAction : : arrayJoin ( joined_columns ) ) ; <nl> + actions_stack . addAction ( ExpressionAction : : arrayJoin ( joined_columns , false ) ) ; <nl> } <nl> <nl> return ; <nl> void ExpressionAnalyzer : : getActionsImpl ( ASTPtr ast , bool no_subqueries , bool onl <nl> <nl> if ( node - > kind = = ASTFunction : : FUNCTION ) <nl> { <nl> - if ( node - > name = = " in " | | node - > name = = " notIn " | | node - > name = = " globalIn " | | node - > name = = " globalNotIn " ) <nl> + if ( functionIsInOrGlobalInOperator ( node - > name ) ) <nl> { <nl> if ( ! no_subqueries ) <nl> { <nl> void ExpressionAnalyzer : : addMultipleArrayJoinAction ( ExpressionActionsPtr & actio <nl> result_columns . insert ( result_source . first ) ; <nl> } <nl> <nl> - actions - > add ( ExpressionAction : : arrayJoin ( result_columns ) ) ; <nl> + actions - > add ( ExpressionAction : : arrayJoin ( result_columns , select_query - > array_join_is_left ) ) ; <nl> } <nl> <nl> bool ExpressionAnalyzer : : appendArrayJoin ( ExpressionActionsChain & chain , bool only_types ) <nl> mmm a / dbms / src / Interpreters / InterpreterCreateQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterCreateQuery . cpp <nl> <nl> # include < DB / IO / WriteHelpers . h > <nl> <nl> # include < DB / DataStreams / MaterializingBlockInputStream . h > <nl> - # include < DB / DataStreams / copyData . h > <nl> + # include < DB / DataStreams / NullAndDoCopyBlockInputStream . h > <nl> <nl> # include < DB / Parsers / ASTCreateQuery . h > <nl> # include < DB / Parsers / ASTNameTypePair . h > <nl> InterpreterCreateQuery : : InterpreterCreateQuery ( ASTPtr query_ptr_ , Context & cont <nl> } <nl> <nl> <nl> - void InterpreterCreateQuery : : executeImpl ( bool assume_metadata_exists ) <nl> + BlockIO InterpreterCreateQuery : : executeImpl ( bool assume_metadata_exists ) <nl> { <nl> String path = context . getPath ( ) ; <nl> String current_database = context . getCurrentDatabase ( ) ; <nl> void InterpreterCreateQuery : : executeImpl ( bool assume_metadata_exists ) <nl> if ( ! create . if_not_exists | | ! context . isDatabaseExist ( database_name ) ) <nl> context . addDatabase ( database_name ) ; <nl> <nl> - return ; <nl> + return { } ; <nl> } <nl> <nl> SharedPtr < InterpreterSelectQuery > interpreter_select ; <nl> void InterpreterCreateQuery : : executeImpl ( bool assume_metadata_exists ) <nl> if ( context . isTableExist ( database_name , table_name ) ) <nl> { <nl> if ( create . if_not_exists ) <nl> - return ; <nl> + return { } ; <nl> else <nl> throw Exception ( " Table " + database_name + " . " + table_name + " already exists . " , ErrorCodes : : TABLE_ALREADY_EXISTS ) ; <nl> } <nl> void InterpreterCreateQuery : : executeImpl ( bool assume_metadata_exists ) <nl> / / / Если запрос CREATE SELECT , то вставим в таблицу данные <nl> if ( create . select & & storage_name ! = " View " & & ( storage_name ! = " MaterializedView " | | create . is_populate ) ) <nl> { <nl> - BlockInputStreamPtr from = new MaterializingBlockInputStream ( interpreter_select - > execute ( ) . in ) ; <nl> - copyData ( * from , * res - > write ( query_ptr ) ) ; <nl> + BlockIO io ; <nl> + io . in_sample = select_sample ; <nl> + io . in = new NullAndDoCopyBlockInputStream ( <nl> + new MaterializingBlockInputStream ( interpreter_select - > execute ( ) . in ) , <nl> + res - > write ( query_ptr ) ) ; <nl> + <nl> + return io ; <nl> } <nl> + <nl> + return { } ; <nl> } <nl> <nl> InterpreterCreateQuery : : ColumnsAndDefaults InterpreterCreateQuery : : parseColumns ( ASTPtr expression_list ) <nl> mmm a / dbms / src / Interpreters / InterpreterInsertQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterInsertQuery . cpp <nl> BlockIO InterpreterInsertQuery : : execute ( ) <nl> InterpreterSelectQuery interpreter_select { query . select , context } ; <nl> BlockInputStreamPtr in { interpreter_select . execute ( ) . in } ; <nl> res . in = new NullAndDoCopyBlockInputStream { in , out } ; <nl> + res . in_sample = interpreter_select . getSampleBlock ( ) ; <nl> } <nl> <nl> return res ; <nl> mmm a / dbms / src / Interpreters / InterpreterSelectQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterSelectQuery . cpp <nl> BlockIO InterpreterSelectQuery : : execute ( ) <nl> / / / Ограничения на результат , квота на результат , а также колбек для прогресса . <nl> if ( IProfilingBlockInputStream * stream = dynamic_cast < IProfilingBlockInputStream * > ( & * streams [ 0 ] ) ) <nl> { <nl> - stream - > setProgressCallback ( context . getProgressCallback ( ) ) ; <nl> - stream - > setProcessListElement ( context . getProcessListElement ( ) ) ; <nl> - <nl> / / / Ограничения действуют только на конечный результат . <nl> if ( to_stage = = QueryProcessingStage : : Complete ) <nl> { <nl> mmm a / dbms / src / Interpreters / executeQuery . cpp <nl> ppp b / dbms / src / Interpreters / executeQuery . cpp <nl> static std : : tuple < ASTPtr , BlockIO > executeQueryImpl ( <nl> / / / Держим элемент списка процессов до конца обработки запроса . <nl> res . process_list_entry = process_list_entry ; <nl> <nl> + if ( res . in ) <nl> + { <nl> + if ( IProfilingBlockInputStream * stream = dynamic_cast < IProfilingBlockInputStream * > ( res . in . get ( ) ) ) <nl> + { <nl> + stream - > setProgressCallback ( context . getProgressCallback ( ) ) ; <nl> + stream - > setProcessListElement ( context . getProcessListElement ( ) ) ; <nl> + } <nl> + } <nl> + <nl> quota . addQuery ( current_time ) ; <nl> <nl> / / / Всё , что связано с логом запросов . <nl> mmm a / dbms / src / Parsers / ExpressionElementParsers . cpp <nl> ppp b / dbms / src / Parsers / ExpressionElementParsers . cpp <nl> bool ParserWithOptionalAlias : : parseImpl ( Pos & pos , Pos end , ASTPtr & node , Pos & <nl> { <nl> String alias_name = typeid_cast < ASTIdentifier & > ( * alias_node ) . name ; <nl> <nl> - if ( ASTFunction * func = typeid_cast < ASTFunction * > ( & * node ) ) <nl> - func - > alias = alias_name ; <nl> - else if ( ASTIdentifier * ident = typeid_cast < ASTIdentifier * > ( & * node ) ) <nl> - ident - > alias = alias_name ; <nl> - else if ( ASTLiteral * lit = typeid_cast < ASTLiteral * > ( & * node ) ) <nl> - lit - > alias = alias_name ; <nl> + if ( ASTWithAlias * ast_with_alias = dynamic_cast < ASTWithAlias * > ( node . get ( ) ) ) <nl> + ast_with_alias - > alias = alias_name ; <nl> else <nl> { <nl> expected = " alias cannot be here " ; <nl> mmm a / dbms / src / Parsers / ParserJoin . cpp <nl> ppp b / dbms / src / Parsers / ParserJoin . cpp <nl> bool ParserJoin : : parseImpl ( Pos & pos , Pos end , ASTPtr & node , Pos & max_parsed_p <nl> ParserString s_using ( " USING " , true , true ) ; <nl> <nl> ParserNotEmptyExpressionList exp_list ; <nl> - ParserSubquery subquery ; <nl> + ParserWithOptionalAlias subquery ( ParserPtr ( new ParserSubquery ) ) ; <nl> ParserIdentifier identifier ; <nl> <nl> ws . ignore ( pos , end ) ; <nl> bool ParserJoin : : parseImpl ( Pos & pos , Pos end , ASTPtr & node , Pos & max_parsed_p <nl> <nl> ws . ignore ( pos , end ) ; <nl> <nl> - / / / Может быть указан алиас . На данный момент , он ничего не значит и не используется . <nl> - ParserAlias ( ) . ignore ( pos , end ) ; <nl> - ws . ignore ( pos , end ) ; <nl> - <nl> if ( join - > kind ! = ASTJoin : : Cross ) <nl> { <nl> if ( ! s_using . ignore ( pos , end , max_parsed_pos , expected ) ) <nl> mmm a / dbms / src / Parsers / ParserSelectQuery . cpp <nl> ppp b / dbms / src / Parsers / ParserSelectQuery . cpp <nl> bool ParserSelectQuery : : parseImpl ( Pos & pos , Pos end , ASTPtr & node , Pos & max_p <nl> ParserString s_select ( " SELECT " , true , true ) ; <nl> ParserString s_distinct ( " DISTINCT " , true , true ) ; <nl> ParserString s_from ( " FROM " , true , true ) ; <nl> + ParserString s_left ( " LEFT " , true , true ) ; <nl> ParserString s_array ( " ARRAY " , true , true ) ; <nl> ParserString s_join ( " JOIN " , true , true ) ; <nl> ParserString s_using ( " USING " , true , true ) ; <nl> bool ParserSelectQuery : : parseImpl ( Pos & pos , Pos end , ASTPtr & node , Pos & max_p <nl> if ( ! parse_final_and_sample ( ) ) <nl> return false ; <nl> <nl> - / / / ARRAY JOIN expr list <nl> - if ( s_array . ignore ( pos , end , max_parsed_pos , expected ) ) <nl> + / / / [ LEFT ] ARRAY JOIN expr list <nl> + Pos saved_pos = pos ; <nl> + bool has_array_join = false ; <nl> + if ( s_left . ignore ( pos , end , max_parsed_pos , expected ) & & ws . ignore ( pos , end ) & & s_array . ignore ( pos , end , max_parsed_pos , expected ) ) <nl> + { <nl> + select_query - > array_join_is_left = true ; <nl> + has_array_join = true ; <nl> + } <nl> + else <nl> + { <nl> + pos = saved_pos ; <nl> + if ( s_array . ignore ( pos , end , max_parsed_pos , expected ) ) <nl> + has_array_join = true ; <nl> + } <nl> + <nl> + if ( has_array_join ) <nl> { <nl> ws . ignore ( pos , end ) ; <nl> <nl> bool ParserSelectQuery : : parseImpl ( Pos & pos , Pos end , ASTPtr & node , Pos & max_p <nl> ws . ignore ( pos , end ) ; <nl> } <nl> <nl> - / / / [ GLOBAL ] ANY | ALL INNER | LEFT JOIN ( subquery ) USING tuple <nl> + / / / [ GLOBAL ] [ ANY | ALL ] INNER | LEFT | RIGHT | FULL | CROSS [ OUTER ] JOIN ( subquery ) | table_name USING tuple <nl> join . parse ( pos , end , select_query - > join , max_parsed_pos , expected ) ; <nl> <nl> if ( ! parse_final_and_sample ( ) ) <nl> mmm a / dbms / src / Parsers / formatAST . cpp <nl> ppp b / dbms / src / Parsers / formatAST . cpp <nl> String backQuoteIfNeed ( const String & x ) <nl> } <nl> <nl> <nl> - String hightlight ( const String & keyword , const String & color_sequence , const bool hilite ) <nl> + static String hightlight ( const String & keyword , const String & color_sequence , const bool hilite ) <nl> { <nl> return hilite ? color_sequence + keyword + hilite_none : keyword ; <nl> } <nl> <nl> <nl> + static void writeAlias ( const String & name , std : : ostream & s , bool hilite , bool one_line ) <nl> + { <nl> + s < < ( hilite ? hilite_keyword : " " ) < < " AS " < < ( hilite ? hilite_alias : " " ) ; <nl> + <nl> + WriteBufferFromOStream wb ( s , 32 ) ; <nl> + writeProbablyBackQuotedString ( name , wb ) ; <nl> + wb . next ( ) ; <nl> + <nl> + s < < ( hilite ? hilite_none : " " ) ; <nl> + } <nl> + <nl> + <nl> void formatAST ( const ASTExpressionList & ast , std : : ostream & s , size_t indent , bool hilite , bool one_line , bool need_parens ) <nl> { <nl> for ( ASTs : : const_iterator it = ast . children . begin ( ) ; it ! = ast . children . end ( ) ; + + it ) <nl> void formatAST ( const ASTSelectQuery & ast , std : : ostream & s , size_t indent , bo <nl> <nl> if ( ast . array_join_expression_list ) <nl> { <nl> - s < < ( hilite ? hilite_keyword : " " ) < < nl_or_ws < < indent_str < < " ARRAY JOIN " < < ( hilite ? hilite_none : " " ) ; <nl> + s < < ( hilite ? hilite_keyword : " " ) < < nl_or_ws < < indent_str <nl> + < < ( ast . array_join_is_left ? " LEFT " : " " ) < < " ARRAY JOIN " < < ( hilite ? hilite_none : " " ) ; <nl> + <nl> one_line <nl> ? formatAST ( * ast . array_join_expression_list , s , indent , hilite , one_line ) <nl> : formatExpressionListMultiline ( typeid_cast < const ASTExpressionList & > ( * ast . array_join_expression_list ) , s , indent , hilite ) ; <nl> void formatAST ( const ASTSelectQuery & ast , std : : ostream & s , size_t indent , bo <nl> <nl> void formatAST ( const ASTSubquery & ast , std : : ostream & s , size_t indent , bool hilite , bool one_line , bool need_parens ) <nl> { <nl> + / / / Если есть алиас , то требуются скобки вокруг всего выражения , включая алиас . Потому что запись вида 0 AS x + 0 синтаксически некорректна . <nl> + if ( need_parens & & ! ast . alias . empty ( ) ) <nl> + s < < ' ( ' ; <nl> + <nl> std : : string indent_str = one_line ? " " : std : : string ( 4 * indent , ' ' ) ; <nl> std : : string nl_or_nothing = one_line ? " " : " \ n " ; <nl> <nl> s < < nl_or_nothing < < indent_str < < " ( " < < nl_or_nothing ; <nl> formatAST ( * ast . children [ 0 ] , s , indent + 1 , hilite , one_line ) ; <nl> s < < nl_or_nothing < < indent_str < < " ) " ; <nl> + <nl> + if ( ! ast . alias . empty ( ) ) <nl> + { <nl> + writeAlias ( ast . alias , s , hilite , one_line ) ; <nl> + if ( need_parens ) <nl> + s < < ' ) ' ; <nl> + } <nl> } <nl> <nl> void formatAST ( const ASTCreateQuery & ast , std : : ostream & s , size_t indent , bool hilite , bool one_line , bool need_parens ) <nl> void formatAST ( const ASTInsertQuery & ast , std : : ostream & s , size_t indent , bo <nl> } <nl> } <nl> <nl> - static void writeAlias ( const String & name , std : : ostream & s , bool hilite , bool one_line ) <nl> - { <nl> - s < < ( hilite ? hilite_keyword : " " ) < < " AS " < < ( hilite ? hilite_alias : " " ) ; <nl> - <nl> - WriteBufferFromOStream wb ( s , 32 ) ; <nl> - writeProbablyBackQuotedString ( name , wb ) ; <nl> - wb . next ( ) ; <nl> - <nl> - s < < ( hilite ? hilite_none : " " ) ; <nl> - } <nl> - <nl> void formatAST ( const ASTFunction & ast , std : : ostream & s , size_t indent , bool hilite , bool one_line , bool need_parens ) <nl> { <nl> / / / Если есть алиас , то требуются скобки вокруг всего выражения , включая алиас . Потому что запись вида 0 AS x + 0 синтаксически некорректна . <nl> mmm a / dbms / src / Storages / StorageSystemColumns . cpp <nl> ppp b / dbms / src / Storages / StorageSystemColumns . cpp <nl> <nl> # include < DB / Storages / StorageSystemColumns . h > <nl> + # include < DB / Storages / MergeTree / MergeTreeData . h > <nl> + # include < DB / Storages / StorageMergeTree . h > <nl> + # include < DB / Storages / StorageReplicatedMergeTree . h > <nl> # include < DB / Columns / ColumnString . h > <nl> # include < DB / DataTypes / DataTypeString . h > <nl> + # include < DB / DataTypes / DataTypesNumberFixed . h > <nl> # include < DB / DataStreams / OneBlockInputStream . h > <nl> # include < DB / Common / VirtualColumnUtils . h > <nl> <nl> StorageSystemColumns : : StorageSystemColumns ( const std : : string & name_ ) <nl> { " name " , new DataTypeString } , <nl> { " type " , new DataTypeString } , <nl> { " default_type " , new DataTypeString } , <nl> - { " default_expression " , new DataTypeString } <nl> + { " default_expression " , new DataTypeString } , <nl> + { " bytes " , new DataTypeUInt64 } , <nl> } <nl> { <nl> } <nl> BlockInputStreams StorageSystemColumns : : read ( <nl> ColumnPtr type_column = new ColumnString ; <nl> ColumnPtr default_type_column = new ColumnString ; <nl> ColumnPtr default_expression_column = new ColumnString ; <nl> + ColumnPtr bytes_column = new ColumnUInt64 ; <nl> <nl> size_t rows = filtered_database_column - > size ( ) ; <nl> for ( size_t i = 0 ; i < rows ; + + i ) <nl> BlockInputStreams StorageSystemColumns : : read ( <nl> <nl> NamesAndTypesList columns ; <nl> ColumnDefaults column_defaults ; <nl> + std : : unordered_map < String , size_t > column_sizes ; <nl> <nl> { <nl> StoragePtr storage = storages . at ( std : : make_pair ( database_name , table_name ) ) ; <nl> BlockInputStreams StorageSystemColumns : : read ( <nl> columns = storage - > getColumnsList ( ) ; <nl> columns . insert ( std : : end ( columns ) , std : : begin ( storage - > alias_columns ) , std : : end ( storage - > alias_columns ) ) ; <nl> column_defaults = storage - > column_defaults ; <nl> + <nl> + / * * Данные о размерах столбцов для таблиц семейства MergeTree . <nl> + * NOTE : В дальнейшем можно сделать интерфейс , позволяющий получить размеры столбцов у IStorage . <nl> + * / <nl> + if ( auto storage_concrete = dynamic_cast < StorageMergeTree * > ( storage . get ( ) ) ) <nl> + { <nl> + column_sizes = storage_concrete - > getData ( ) . getColumnSizes ( ) ; <nl> + } <nl> + else if ( auto storage_concrete = dynamic_cast < StorageReplicatedMergeTree * > ( storage . get ( ) ) ) <nl> + { <nl> + column_sizes = storage_concrete - > getData ( ) . getColumnSizes ( ) ; <nl> + <nl> + auto unreplicated_data = storage_concrete - > getUnreplicatedData ( ) ; <nl> + if ( unreplicated_data ) <nl> + { <nl> + auto unreplicated_column_sizes = unreplicated_data - > getColumnSizes ( ) ; <nl> + for ( const auto & name_size : unreplicated_column_sizes ) <nl> + column_sizes [ name_size . first ] + = name_size . second ; <nl> + } <nl> + } <nl> } <nl> <nl> for ( const auto & column : columns ) <nl> BlockInputStreams StorageSystemColumns : : read ( <nl> name_column - > insert ( column . name ) ; <nl> type_column - > insert ( column . type - > getName ( ) ) ; <nl> <nl> - const auto it = column_defaults . find ( column . name ) ; <nl> - if ( it = = std : : end ( column_defaults ) ) <nl> { <nl> - default_type_column - > insertDefault ( ) ; <nl> - default_expression_column - > insertDefault ( ) ; <nl> + const auto it = column_defaults . find ( column . name ) ; <nl> + if ( it = = std : : end ( column_defaults ) ) <nl> + { <nl> + default_type_column - > insertDefault ( ) ; <nl> + default_expression_column - > insertDefault ( ) ; <nl> + } <nl> + else <nl> + { <nl> + default_type_column - > insert ( toString ( it - > second . type ) ) ; <nl> + default_expression_column - > insert ( queryToString ( it - > second . expression ) ) ; <nl> + } <nl> } <nl> - else <nl> + <nl> { <nl> - default_type_column - > insert ( toString ( it - > second . type ) ) ; <nl> - default_expression_column - > insert ( queryToString ( it - > second . expression ) ) ; <nl> + const auto it = column_sizes . find ( column . name ) ; <nl> + if ( it = = std : : end ( column_sizes ) ) <nl> + bytes_column - > insertDefault ( ) ; <nl> + else <nl> + bytes_column - > insert ( it - > second ) ; <nl> } <nl> } <nl> } <nl> BlockInputStreams StorageSystemColumns : : read ( <nl> block . insert ( ColumnWithTypeAndName ( type_column , new DataTypeString , " type " ) ) ; <nl> block . insert ( ColumnWithTypeAndName ( default_type_column , new DataTypeString , " default_type " ) ) ; <nl> block . insert ( ColumnWithTypeAndName ( default_expression_column , new DataTypeString , " default_expression " ) ) ; <nl> + block . insert ( ColumnWithTypeAndName ( bytes_column , new DataTypeUInt64 , " bytes " ) ) ; <nl> <nl> return BlockInputStreams { 1 , new OneBlockInputStream ( block ) } ; <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . 58c9bdf9d01 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00204_extract_url_parameter . reference <nl> @ @ - 0 , 0 + 1 @ @ <nl> + 111 <nl> new file mode 100644 <nl> index 00000000000 . . d6ca5b31333 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00204_extract_url_parameter . sql <nl> @ @ - 0 , 0 + 1 @ @ <nl> + SELECT extractURLParameter ( ' http : / / test . com / ? testq = aaa & q = 111 ' , ' q ' ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 7b3ebbc7519 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00205_scalar_subqueries . reference <nl> <nl> + 1 1 <nl> + 1 <nl> + 1 1 <nl> + ( ' 2015 - 01 - 02 ' , ' Hello ' ) <nl> + ( ' 2015 - 01 - 02 ' , ' Hello ' ) ( ' 2015 - 01 - 02 ' , ' Hello ' ) 1 1 <nl> new file mode 100644 <nl> index 00000000000 . . f924ff291ea <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00205_scalar_subqueries . sql <nl> <nl> + SELECT ( SELECT ( SELECT ( SELECT ( SELECT ( SELECT count ( ) FROM ( SELECT * FROM system . numbers LIMIT 10 ) ) ) ) ) ) = ( SELECT 10 ) , ( ( SELECT 1 , ' Hello ' , [ 1 , 2 ] ) . 3 ) [ 1 ] ; <nl> + SELECT toUInt64 ( ( SELECT 9 ) ) IN ( SELECT number FROM system . numbers LIMIT 10 ) ; <nl> + SELECT ( SELECT toDate ( ' 2015 - 01 - 02 ' ) ) = toDate ( ' 2015 - 01 - 02 ' ) , ' Hello ' = ( SELECT ' Hello ' ) ; <nl> + SELECT ( SELECT toDate ( ' 2015 - 01 - 02 ' ) , ' Hello ' ) ; <nl> + SELECT ( SELECT toDate ( ' 2015 - 01 - 02 ' ) , ' Hello ' ) AS x , x , identity ( ( SELECT 1 ) ) , identity ( ( SELECT 1 ) AS y ) ; <nl> new file mode 100644 <nl> index 00000000000 . . e4e7e38fa36 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00206_empty_array_to_single . reference <nl> <nl> + [ 1 , 2 ] <nl> + [ 0 ] <nl> + [ 4 , 5 , 6 ] <nl> + [ ' ' ] [ ' 0000 - 00 - 00 ' ] [ ' 0000 - 00 - 00 00 : 00 : 00 ' ] <nl> + [ 0 ] [ ' ' ] [ ' 0000 - 00 - 00 00 : 00 : 00 ' ] [ ' 0000 - 00 - 00 ' ] <nl> + [ 0 ] [ ' 0 ' ] [ ' 2015 - 01 - 01 00 : 00 : 00 ' ] [ ' 2015 - 01 - 01 ' ] <nl> + [ 0 , 1 ] [ ' ' ] [ ' 2015 - 01 - 01 00 : 00 : 00 ' , ' 2015 - 01 - 01 00 : 00 : 01 ' ] [ ' 2015 - 01 - 01 ' , ' 2015 - 01 - 02 ' ] <nl> + [ 0 ] [ ' 0 ' ] [ ' 2015 - 01 - 01 00 : 00 : 00 ' , ' 2015 - 01 - 01 00 : 00 : 01 ' , ' 2015 - 01 - 01 00 : 00 : 02 ' ] [ ' 2015 - 01 - 01 ' , ' 2015 - 01 - 02 ' , ' 2015 - 01 - 03 ' ] <nl> + [ 0 ] [ ' ' ] [ ' 2015 - 01 - 01 00 : 00 : 00 ' , ' 2015 - 01 - 01 00 : 00 : 01 ' , ' 2015 - 01 - 01 00 : 00 : 02 ' , ' 2015 - 01 - 01 00 : 00 : 03 ' ] [ ' 0000 - 00 - 00 ' ] <nl> + [ 0 , 1 ] [ ' 0 ' ] [ ' 0000 - 00 - 00 00 : 00 : 00 ' ] [ ' 2015 - 01 - 01 ' ] <nl> + [ 0 ] [ ' ' ] [ ' 2015 - 01 - 01 00 : 00 : 00 ' ] [ ' 2015 - 01 - 01 ' , ' 2015 - 01 - 02 ' ] <nl> + [ 0 ] [ ' 0 ' ] [ ' 2015 - 01 - 01 00 : 00 : 00 ' , ' 2015 - 01 - 01 00 : 00 : 01 ' ] [ ' 2015 - 01 - 01 ' , ' 2015 - 01 - 02 ' , ' 2015 - 01 - 03 ' ] <nl> + [ 0 , 1 ] [ ' ' ] [ ' 2015 - 01 - 01 00 : 00 : 00 ' , ' 2015 - 01 - 01 00 : 00 : 01 ' , ' 2015 - 01 - 01 00 : 00 : 02 ' ] [ ' 0000 - 00 - 00 ' ] <nl> + [ 0 ] [ ' 0 ' ] [ ' 2015 - 01 - 01 00 : 00 : 00 ' , ' 2015 - 01 - 01 00 : 00 : 01 ' , ' 2015 - 01 - 01 00 : 00 : 02 ' , ' 2015 - 01 - 01 00 : 00 : 03 ' ] [ ' 2015 - 01 - 01 ' ] <nl> new file mode 100644 <nl> index 00000000000 . . 0ad2975fa7f <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00206_empty_array_to_single . sql <nl> <nl> + SELECT emptyArrayToSingle ( arrayFilter ( x - > x ! = 99 , arrayJoin ( [ [ 1 , 2 ] , [ 99 ] , [ 4 , 5 , 6 ] ] ) ) ) ; <nl> + SELECT emptyArrayToSingle ( emptyArrayString ( ) ) , emptyArrayToSingle ( emptyArrayDate ( ) ) , emptyArrayToSingle ( emptyArrayDateTime ( ) ) ; <nl> + <nl> + SELECT <nl> + emptyArrayToSingle ( range ( number % 3 ) ) , <nl> + emptyArrayToSingle ( arrayMap ( x - > toString ( x ) , range ( number % 2 ) ) ) , <nl> + emptyArrayToSingle ( arrayMap ( x - > toDateTime ( ' 2015 - 01 - 01 00 : 00 : 00 ' ) + x , range ( number % 5 ) ) ) , <nl> + emptyArrayToSingle ( arrayMap ( x - > toDate ( ' 2015 - 01 - 01 ' ) + x , range ( number % 4 ) ) ) FROM system . numbers LIMIT 10 ; <nl> new file mode 100644 <nl> index 00000000000 . . 10ec6a7a16f <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00207_left_array_join . reference <nl> <nl> + 0 <nl> + 1 <nl> + 2 <nl> + 2 <nl> + 3 <nl> + 4 <nl> + 5 <nl> + 5 <nl> + 6 <nl> + 7 <nl> + 0 [ ] 0 <nl> + 1 [ 0 ] 0 <nl> + 2 [ 0 , 1 ] 0 <nl> + 2 [ 0 , 1 ] 1 <nl> + 3 [ ] 0 <nl> + 4 [ 0 ] 0 <nl> + 5 [ 0 , 1 ] 0 <nl> + 5 [ 0 , 1 ] 1 <nl> + 6 [ ] 0 <nl> + 7 [ 0 ] 0 <nl> + 8 [ 0 , 1 ] 0 <nl> + 8 [ 0 , 1 ] 1 <nl> + 9 [ ] 0 <nl> new file mode 100644 <nl> index 00000000000 . . 8186054c250 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00207_left_array_join . sql <nl> <nl> + SELECT number FROM system . numbers LEFT ARRAY JOIN range ( number % 3 ) AS arr LIMIT 10 ; <nl> + SELECT number , arr , x FROM ( SELECT number , range ( number % 3 ) AS arr FROM system . numbers LIMIT 10 ) LEFT ARRAY JOIN arr AS x ; <nl> new file mode 100644 <nl> index 00000000000 . . d21a7aa01e4 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00208_agg_state_merge . reference <nl> <nl> + 0 15 15 <nl> + 1 14 14 <nl> + 2 14 14 <nl> + 3 15 15 <nl> + 4 9 9 <nl> + 5 9 9 <nl> + 6 9 9 <nl> new file mode 100644 <nl> index 00000000000 . . 3f30f66dd44 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00208_agg_state_merge . sql <nl> @ @ - 0 , 0 + 1 @ @ <nl> + SELECT k % 7 AS k2 , finalizeAggregation ( uniqMergeState ( state ) ) , uniqMerge ( state ) FROM ( SELECT k , uniqState ( x ) AS state FROM ( SELECT number % 11 AS k , intDiv ( number , 7 ) AS x FROM system . numbers LIMIT 100 ) GROUP BY k ) GROUP BY k2 ORDER BY k2 ; <nl> new file mode 100644 <nl> index 00000000000 . . e86726625a1 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00209_insert_select_extremes . reference <nl> <nl> + <nl> + 1 <nl> + 1 <nl> + <nl> + 0 <nl> + <nl> + 0 <nl> + <nl> + 1 <nl> + 1 <nl> + 4 1 1 <nl> new file mode 100644 <nl> index 00000000000 . . 0d632992b67 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00209_insert_select_extremes . sql <nl> <nl> + DROP TABLE IF EXISTS test . test ; <nl> + CREATE TABLE test . test ( x UInt8 ) ENGINE = Log ; <nl> + <nl> + INSERT INTO test . test SELECT 1 AS x ; <nl> + INSERT INTO test . test SELECT 1 AS x SETTINGS extremes = 1 ; <nl> + INSERT INTO test . test SELECT 1 AS x GROUP BY 1 WITH TOTALS ; <nl> + INSERT INTO test . test SELECT 1 AS x GROUP BY 1 WITH TOTALS SETTINGS extremes = 1 ; <nl> + <nl> + SELECT count ( ) , min ( x ) , max ( x ) FROM test . test ; <nl> + <nl> + DROP TABLE test . test ; <nl> new file mode 100644 <nl> index 00000000000 . . 016f3290af0 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00210_insert_select_extremes_http . reference <nl> <nl> + <nl> + 1 <nl> + 1 <nl> new file mode 100755 <nl> index 00000000000 . . e9b82bccfa5 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00210_insert_select_extremes_http . sh <nl> <nl> + # ! / bin / bash <nl> + <nl> + curl - sS http : / / localhost : 8123 / ? extremes = 1 - d @ - < < < " DROP TABLE IF EXISTS test . test " <nl> + curl - sS http : / / localhost : 8123 / ? extremes = 1 - d @ - < < < " CREATE TABLE test . test ( x UInt8 ) ENGINE = Log " <nl> + curl - sS http : / / localhost : 8123 / ? extremes = 1 - d @ - < < < " INSERT INTO test . test SELECT 1 AS x " <nl> + curl - sS http : / / localhost : 8123 / ? extremes = 1 - d @ - < < < " DROP TABLE test . test " <nl>
Merge
ClickHouse/ClickHouse
d4b880dbaf5f0386ce9017d1d93e921ba57d1884
2015-07-29T10:00:21Z
mmm a / lib / Migrator / APIDiffMigratorPass . cpp <nl> ppp b / lib / Migrator / APIDiffMigratorPass . cpp <nl> struct APIDiffMigratorPass : public ASTMigratorPass , public SourceEntityWalker { <nl> } <nl> } <nl> <nl> + / / If a property has changed from nonnull to nullable , we should add ! to the <nl> + / / reference of the property . <nl> + bool handlePropertyTypeChange ( Expr * E ) { <nl> + if ( auto MRE = dyn_cast < MemberRefExpr > ( E ) ) { <nl> + if ( auto * VD = MRE - > getReferencedDecl ( ) . getDecl ( ) ) { <nl> + for ( auto * I : getRelatedDiffItems ( VD ) ) { <nl> + if ( auto * Item = dyn_cast < CommonDiffItem > ( I ) ) { <nl> + if ( Item - > DiffKind = = NodeAnnotation : : WrapOptional & & <nl> + Item - > NodeKind = = SDKNodeKind : : DeclVar ) { <nl> + Editor . insertAfterToken ( E - > getEndLoc ( ) , " ! " ) ; <nl> + return true ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> bool walkToExprPre ( Expr * E ) override { <nl> if ( E - > getSourceRange ( ) . isInvalid ( ) ) <nl> return false ; <nl> struct APIDiffMigratorPass : public ASTMigratorPass , public SourceEntityWalker { <nl> return false ; <nl> if ( handleAttributeReference ( E ) ) <nl> return false ; <nl> + if ( handlePropertyTypeChange ( E ) ) <nl> + return false ; <nl> if ( auto * CE = dyn_cast < CallExpr > ( E ) ) { <nl> auto Fn = CE - > getFn ( ) ; <nl> auto Args = CE - > getArg ( ) ; <nl> mmm a / test / Migrator / Inputs / CallExpr . json <nl> ppp b / test / Migrator / Inputs / CallExpr . json <nl> <nl> " RightUsr " : " " , <nl> " RightComment " : " " , <nl> " ModuleName " : " Cities " <nl> + } , <nl> + { <nl> + " DiffItemKind " : " CommonDiffItem " , <nl> + " NodeKind " : " Var " , <nl> + " NodeAnnotation " : " WrapOptional " , <nl> + " ChildIndex " : " 0 " , <nl> + " LeftUsr " : " s : 6CitiesAAC4nameSSvp " , <nl> + " LeftComment " : " " , <nl> + " RightUsr " : " " , <nl> + " RightComment " : " " , <nl> + " ModuleName " : " Cities " <nl> } <nl> ] <nl> mmm a / test / Migrator / Inputs / Cities . swift <nl> ppp b / test / Migrator / Inputs / Cities . swift <nl> <nl> open class Cities { <nl> var x : Int <nl> + public var name : String = " " <nl> public init ( x : Int ) { self . x = x } <nl> public init ! ( y : Int ) { self . x = y } <nl> open func mooloolaba ( x : Cities , y : Cities ? ) { } <nl> mmm a / test / Migrator / call_expr_result . swift <nl> ppp b / test / Migrator / call_expr_result . swift <nl> func foo ( ) { <nl> let c1 = Cities ( x : 3 ) <nl> _ = Cities . init ( x : 3 ) <nl> _ = c1 . noosa ( ) <nl> - } <nl> \ No newline at end of file <nl> + _ = c1 . name <nl> + bar ( c1 . name ) <nl> + } <nl> + <nl> + func bar ( _ n : String ) { } <nl> mmm a / test / Migrator / call_expr_result . swift . expected <nl> ppp b / test / Migrator / call_expr_result . swift . expected <nl> func foo ( ) { <nl> let c1 = Cities ( x : 3 ) ! <nl> _ = Cities . init ( x : 3 ) ! <nl> _ = c1 . noosa ( ) ! <nl> - } <nl> \ No newline at end of file <nl> + _ = c1 . name ! <nl> + bar ( c1 . name ! ) <nl> + } <nl> + <nl> + func bar ( _ n : String ) { } <nl>
migrator : add ! to property access whose definition changes from nonnull to nullable . rdar : / / 48090648
apple/swift
2be1ca0c8afbc7ccaf1309efbef3d352d7e04f78
2019-02-14T23:28:52Z
mmm a / xbmc / cores / VideoPlayer / VideoRenderers / LinuxRendererGLES . cpp <nl> ppp b / xbmc / cores / VideoPlayer / VideoRenderers / LinuxRendererGLES . cpp <nl> static PFNEGLCLIENTWAITSYNCKHRPROC eglClientWaitSyncKHR ; <nl> <nl> using namespace Shaders ; <nl> <nl> - CLinuxRendererGLES : : YUVBUFFER : : YUVBUFFER ( ) <nl> + CLinuxRendererGLES : : CPictureBuffer : : CPictureBuffer ( ) <nl> { <nl> memset ( & fields , 0 , sizeof ( fields ) ) ; <nl> memset ( & image , 0 , sizeof ( image ) ) ; <nl> CLinuxRendererGLES : : YUVBUFFER : : YUVBUFFER ( ) <nl> loaded = false ; <nl> } <nl> <nl> - CLinuxRendererGLES : : YUVBUFFER : : ~ YUVBUFFER ( ) <nl> - { <nl> - } <nl> + CLinuxRendererGLES : : CPictureBuffer : : ~ CPictureBuffer ( ) = default ; <nl> <nl> CLinuxRendererGLES : : CLinuxRendererGLES ( ) <nl> { <nl> bool CLinuxRendererGLES : : Configure ( const VideoPicture & picture , float fps , unsig <nl> m_renderOrientation = orientation ; <nl> <nl> m_iFlags = GetFlagsChromaPosition ( picture . chroma_position ) | <nl> - GetFlagsColorMatrix ( picture . color_space , picture . iWidth , picture . iHeight ) | <nl> - GetFlagsColorPrimaries ( picture . color_primaries ) | <nl> GetFlagsStereoMode ( picture . stereoMode ) ; <nl> <nl> + m_srcPrimaries = GetSrcPrimaries ( static_cast < AVColorPrimaries > ( picture . color_primaries ) , <nl> + picture . iWidth , picture . iHeight ) ; <nl> + <nl> / / Calculate the input frame aspect ratio . <nl> CalculateFrameAspectRatio ( picture . iDisplayWidth , picture . iDisplayHeight ) ; <nl> SetViewMode ( m_videoSettings . m_ViewMode ) ; <nl> int CLinuxRendererGLES : : NextYV12Texture ( ) <nl> <nl> void CLinuxRendererGLES : : AddVideoPicture ( const VideoPicture & picture , int index , double currentClock ) <nl> { <nl> - YUVBUFFER & buf = m_buffers [ index ] ; <nl> + CPictureBuffer & buf = m_buffers [ index ] ; <nl> buf . videoBuffer = picture . videoBuffer ; <nl> buf . videoBuffer - > Acquire ( ) ; <nl> buf . loaded = false ; <nl> + buf . m_srcPrimaries = static_cast < AVColorPrimaries > ( picture . color_primaries ) ; <nl> + buf . m_srcColSpace = static_cast < AVColorSpace > ( picture . color_space ) ; <nl> + buf . m_srcFullRange = picture . color_range = = 1 ; <nl> + buf . m_srcBits = picture . colorBits ; <nl> + <nl> + buf . hasDisplayMetadata = picture . hasDisplayMetadata ; <nl> + buf . displayMetadata = picture . displayMetadata ; <nl> + buf . lightMetadata = picture . lightMetadata ; <nl> + if ( picture . hasLightMetadata & & picture . lightMetadata . MaxCLL ) <nl> + buf . hasLightMetadata = picture . hasLightMetadata ; <nl> } <nl> <nl> void CLinuxRendererGLES : : ReleaseBuffer ( int idx ) <nl> { <nl> - YUVBUFFER & buf = m_buffers [ idx ] ; <nl> + CPictureBuffer & buf = m_buffers [ idx ] ; <nl> if ( buf . videoBuffer ) <nl> { <nl> buf . videoBuffer - > Release ( ) ; <nl> void CLinuxRendererGLES : : ReleaseBuffer ( int idx ) <nl> <nl> void CLinuxRendererGLES : : CalculateTextureSourceRects ( int source , int num_planes ) <nl> { <nl> - YUVBUFFER & buf = m_buffers [ source ] ; <nl> + CPictureBuffer & buf = m_buffers [ source ] ; <nl> YuvImage * im = & buf . image ; <nl> <nl> / / calculate the source rectangle <nl> void CLinuxRendererGLES : : RenderUpdate ( int index , int index2 , bool clear , unsigne <nl> return ; <nl> } <nl> <nl> - YUVBUFFER & buf = m_buffers [ index ] ; <nl> + CPictureBuffer & buf = m_buffers [ index ] ; <nl> <nl> if ( ! buf . fields [ FIELD_FULL ] [ 0 ] . id ) <nl> return ; <nl> bool CLinuxRendererGLES : : RenderCapture ( CRenderCapture * capture ) <nl> / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> bool CLinuxRendererGLES : : UploadYV12Texture ( int source ) <nl> { <nl> - YUVBUFFER & buf = m_buffers [ source ] ; <nl> + CPictureBuffer & buf = m_buffers [ source ] ; <nl> YuvImage * im = & buf . image ; <nl> <nl> VerifyGLState ( ) ; <nl> bool CLinuxRendererGLES : : CreateYV12Texture ( int index ) <nl> / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> bool CLinuxRendererGLES : : UploadNV12Texture ( int source ) <nl> { <nl> - YUVBUFFER & buf = m_buffers [ source ] ; <nl> + CPictureBuffer & buf = m_buffers [ source ] ; <nl> YuvImage * im = & buf . image ; <nl> <nl> bool deinterlacing ; <nl> bool CLinuxRendererGLES : : UploadNV12Texture ( int source ) <nl> bool CLinuxRendererGLES : : CreateNV12Texture ( int index ) <nl> { <nl> / / since we also want the field textures , pitch must be texture aligned <nl> - YUVBUFFER & buf = m_buffers [ index ] ; <nl> + CPictureBuffer & buf = m_buffers [ index ] ; <nl> YuvImage & im = buf . image ; <nl> <nl> / / Delete any old texture <nl> bool CLinuxRendererGLES : : CreateNV12Texture ( int index ) <nl> } <nl> void CLinuxRendererGLES : : DeleteNV12Texture ( int index ) <nl> { <nl> - YUVBUFFER & buf = m_buffers [ index ] ; <nl> + CPictureBuffer & buf = m_buffers [ index ] ; <nl> YuvImage & im = buf . image ; <nl> <nl> if ( buf . fields [ FIELD_FULL ] [ 0 ] . id = = 0 ) <nl> void CLinuxRendererGLES : : SetTextureFilter ( GLenum method ) <nl> { <nl> for ( int i = 0 ; i < m_NumYV12Buffers ; i + + ) <nl> { <nl> - YUVBUFFER & buf = m_buffers [ i ] ; <nl> + CPictureBuffer & buf = m_buffers [ i ] ; <nl> <nl> for ( int f = FIELD_FULL ; f < = FIELD_BOT ; f + + ) <nl> { <nl> bool CLinuxRendererGLES : : IsGuiLayer ( ) <nl> { <nl> return true ; <nl> } <nl> + <nl> + AVColorPrimaries CLinuxRendererGLES : : GetSrcPrimaries ( AVColorPrimaries srcPrimaries , unsigned int width , unsigned int height ) <nl> + { <nl> + AVColorPrimaries ret = srcPrimaries ; <nl> + if ( ret = = AVCOL_PRI_UNSPECIFIED ) <nl> + { <nl> + if ( width > 1024 | | height > = 600 ) <nl> + ret = AVCOL_PRI_BT709 ; <nl> + else <nl> + ret = AVCOL_PRI_BT470BG ; <nl> + } <nl> + return ret ; <nl> + } <nl> mmm a / xbmc / cores / VideoPlayer / VideoRenderers / LinuxRendererGLES . h <nl> ppp b / xbmc / cores / VideoPlayer / VideoRenderers / LinuxRendererGLES . h <nl> class CLinuxRendererGLES : public CBaseRenderer <nl> virtual void ReleaseShaders ( ) ; <nl> void SetTextureFilter ( GLenum method ) ; <nl> void UpdateVideoFilter ( ) ; <nl> + AVColorPrimaries GetSrcPrimaries ( AVColorPrimaries srcPrimaries , unsigned int width , unsigned int height ) ; <nl> <nl> / / textures <nl> virtual bool UploadTexture ( int index ) ; <nl> class CLinuxRendererGLES : public CBaseRenderer <nl> unsigned pixpertex_y ; <nl> } ; <nl> <nl> - struct YUVBUFFER <nl> + struct CPictureBuffer <nl> { <nl> - YUVBUFFER ( ) ; <nl> - ~ YUVBUFFER ( ) ; <nl> + CPictureBuffer ( ) ; <nl> + ~ CPictureBuffer ( ) ; <nl> <nl> YUVPLANE fields [ MAX_FIELDS ] [ YuvImage : : MAX_PLANES ] ; <nl> YuvImage image ; <nl> <nl> CVideoBuffer * videoBuffer ; <nl> bool loaded ; <nl> + <nl> + AVColorPrimaries m_srcPrimaries ; <nl> + AVColorSpace m_srcColSpace ; <nl> + int m_srcBits = 8 ; <nl> + int m_srcTextureBits = 8 ; <nl> + bool m_srcFullRange ; <nl> + <nl> + bool hasDisplayMetadata = false ; <nl> + AVMasteringDisplayMetadata displayMetadata ; <nl> + bool hasLightMetadata = false ; <nl> + AVContentLightMetadata lightMetadata ; <nl> } ; <nl> <nl> / / YV12 decoder textures <nl> / / field index 0 is full image , 1 is odd scanlines , 2 is even scanlines <nl> - YUVBUFFER m_buffers [ NUM_BUFFERS ] ; <nl> + CPictureBuffer m_buffers [ NUM_BUFFERS ] ; <nl> <nl> void LoadPlane ( YUVPLANE & plane , int type , <nl> unsigned width , unsigned height , <nl> class CLinuxRendererGLES : public CBaseRenderer <nl> ESCALINGMETHOD m_scalingMethod ; <nl> ESCALINGMETHOD m_scalingMethodGui ; <nl> bool m_fullRange ; <nl> + AVColorPrimaries m_srcPrimaries ; <nl> <nl> / / clear colour for " black " bars <nl> float m_clearColour ; <nl>
LinuxRendererGLES : update YUVBUFFER to CPictureBuffer
xbmc/xbmc
bd1ee3d44433f764293ee1fc3957c45b3e1f0b9f
2018-06-13T06:24:05Z
mmm a / arangod / RestHandler / RestAdminLogHandler . cpp <nl> ppp b / arangod / RestHandler / RestAdminLogHandler . cpp <nl> bool RestAdminLogHandler : : isDirect ( ) const { return true ; } <nl> / / / Restricts the result to at most * size * log entries . <nl> / / / <nl> / / / @ RESTQUERYPARAM { offset , number , optional } <nl> - / / / Starts to return log entries skipping the first * offset * log entries . <nl> - / / / * offset * <nl> + / / / Starts to return log entries skipping the first * offset * log entries . * offset * <nl> / / / and * size * can be used for pagination . <nl> / / / <nl> / / / @ RESTQUERYPARAM { search , string , optional } <nl> bool RestAdminLogHandler : : isDirect ( ) const { return true ; } <nl> / / / imposes a chronological order . The default value is * asc * . <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> - / / / Returns fatal , error , warning or info log messages from the server ' s global <nl> - / / / log . <nl> + / / / Returns fatal , error , warning or info log messages from the server ' s global log . <nl> / / / The result is a JSON object with the following attributes : <nl> / / / <nl> / / / - * lid * : a list of log entry identifiers . Each log message is uniquely <nl> bool RestAdminLogHandler : : isDirect ( ) const { return true ; } <nl> / / / <nl> / / / - * level * : a list of the log - levels for all log entries . <nl> / / / <nl> - / / / - * timestamp * : a list of the timestamps as seconds since 1970 - 01 - 01 for all <nl> - / / / log <nl> + / / / - * timestamp * : a list of the timestamps as seconds since 1970 - 01 - 01 for all log <nl> / / / entries . <nl> / / / <nl> / / / - * text * a list of the texts of all log entries <nl> mmm a / arangod / RestHandler / RestBatchHandler . cpp <nl> ppp b / arangod / RestHandler / RestBatchHandler . cpp <nl> RestBatchHandler : : ~ RestBatchHandler ( ) { } <nl> / / / @ startDocuBlock JSF_batch_processing <nl> / / / @ brief executes a batch request <nl> / / / <nl> - / / / @ RESTHEADER { POST / _api / batch , executes a batch request } / / / TODOSWAGGER : <nl> - / / / contentype <nl> + / / / @ RESTHEADER { POST / _api / batch , executes a batch request } / / / TODOSWAGGER : contentype <nl> / / / <nl> / / / @ RESTALLBODYPARAM { body , string , required } <nl> / / / The multipart batch request , consisting of the envelope and the individual <nl> mmm a / arangod / RestHandler / RestCursorHandler . cpp <nl> ppp b / arangod / RestHandler / RestCursorHandler . cpp <nl> triagens : : basics : : Json RestCursorHandler : : buildExtra ( <nl> / / / contains the query string to be executed <nl> / / / <nl> / / / @ RESTBODYPARAM { count , boolean , optional , } <nl> - / / / indicates whether the number of documents in the result set should be <nl> - / / / returned in <nl> + / / / indicates whether the number of documents in the result set should be returned in <nl> / / / the " count " attribute of the result . <nl> / / / Calculating the " count " attribute might in the future have a performance <nl> / / / impact for some queries so this option is turned off by default , and " count " <nl> triagens : : basics : : Json RestCursorHandler : : buildExtra ( <nl> / / / @ RESTBODYPARAM { batchSize , integer , optional , int64 } <nl> / / / maximum number of result documents to be transferred from <nl> / / / the server to the client in one roundtrip . If this attribute is <nl> - / / / not set , a server - controlled default value will be used . A * batchSize * value <nl> - / / / of <nl> + / / / not set , a server - controlled default value will be used . A * batchSize * value of <nl> / / / * 0 * is disallowed . <nl> / / / <nl> / / / @ RESTBODYPARAM { ttl , integer , optional , int64 } <nl> triagens : : basics : : Json RestCursorHandler : : buildExtra ( <nl> / / / <nl> / / / @ RESTBODYPARAM { cache , boolean , optional , } <nl> / / / flag to determine whether the AQL query cache <nl> - / / / shall be used . If set to * false * , then any query cache lookup will be <nl> - / / / skipped <nl> - / / / for the query . If set to * true * , it will lead to the query cache being <nl> - / / / checked <nl> + / / / shall be used . If set to * false * , then any query cache lookup will be skipped <nl> + / / / for the query . If set to * true * , it will lead to the query cache being checked <nl> / / / for the query if the query cache mode is either * on * or * demand * . <nl> / / / <nl> / / / @ RESTBODYPARAM { bindVars , array , optional , object } <nl> triagens : : basics : : Json RestCursorHandler : : buildExtra ( <nl> / / / <nl> / / / @ RESTSTRUCT { fullCount , JSF_post_api_cursor_opts , boolean , optional , } <nl> / / / if set to * true * and the query contains a * LIMIT * clause , then the <nl> - / / / result will contain an extra attribute * extra * with a sub - attribute <nl> - / / / * fullCount * . <nl> - / / / This sub - attribute will contain the number of documents in the result before <nl> - / / / the <nl> - / / / last LIMIT in the query was applied . It can be used to count the number of <nl> - / / / documents that <nl> + / / / result will contain an extra attribute * extra * with a sub - attribute * fullCount * . <nl> + / / / This sub - attribute will contain the number of documents in the result before the <nl> + / / / last LIMIT in the query was applied . It can be used to count the number of documents that <nl> / / / match certain filter criteria , but only return a subset of them , in one go . <nl> - / / / It is thus similar to MySQL ' s * SQL_CALC_FOUND_ROWS * hint . Note that setting <nl> - / / / the option <nl> - / / / will disable a few LIMIT optimizations and may lead to more documents being <nl> - / / / processed , <nl> - / / / and thus make queries run longer . Note that the * fullCount * sub - attribute <nl> - / / / will only <nl> - / / / be present in the result if the query has a LIMIT clause and the LIMIT <nl> - / / / clause is <nl> + / / / It is thus similar to MySQL ' s * SQL_CALC_FOUND_ROWS * hint . Note that setting the option <nl> + / / / will disable a few LIMIT optimizations and may lead to more documents being processed , <nl> + / / / and thus make queries run longer . Note that the * fullCount * sub - attribute will only <nl> + / / / be present in the result if the query has a LIMIT clause and the LIMIT clause is <nl> / / / actually used in the query . <nl> / / / <nl> / / / @ RESTSTRUCT { maxPlans , JSF_post_api_cursor_opts , integer , optional , int64 } <nl> - / / / limits the maximum number of plans that are created by the AQL query <nl> - / / / optimizer . <nl> + / / / limits the maximum number of plans that are created by the AQL query optimizer . <nl> / / / <nl> / / / @ RESTSTRUCT { optimizer . rules , JSF_post_api_cursor_opts , array , optional , string } <nl> / / / a list of to - be - included or to - be - excluded optimizer rules <nl> / / / can be put into this attribute , telling the optimizer to include or exclude <nl> - / / / specific rules . To disable a rule , prefix its name with a ` - ` , to enable a <nl> - / / / rule , prefix it <nl> - / / / with a ` + ` . There is also a pseudo - rule ` all ` , which will match all <nl> - / / / optimizer rules . <nl> + / / / specific rules . To disable a rule , prefix its name with a ` - ` , to enable a rule , prefix it <nl> + / / / with a ` + ` . There is also a pseudo - rule ` all ` , which will match all optimizer rules . <nl> / / / <nl> / / / @ RESTSTRUCT { profile , JSF_post_api_cursor_opts , boolean , optional , } <nl> / / / if set to * true * , then the additional query profiling information <nl> - / / / will be returned in the * extra . stats * return attribute if the query result <nl> - / / / is not <nl> + / / / will be returned in the * extra . stats * return attribute if the query result is not <nl> / / / served from the query cache . <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> triagens : : basics : : Json RestCursorHandler : : buildExtra ( <nl> / / / <nl> / / / @ RESTREPLYBODY { extra , object , optional , } <nl> / / / an optional JSON object with extra information about the query result <nl> - / / / contained in its * stats * sub - attribute . For data - modification queries , the <nl> - / / / * extra . stats * sub - attribute will contain the number of modified documents <nl> - / / / and <nl> + / / / contained in its * stats * sub - attribute . For data - modification queries , the <nl> + / / / * extra . stats * sub - attribute will contain the number of modified documents and <nl> / / / the number of documents that could not be modified <nl> / / / due to an error ( if * ignoreErrors * query option is specified ) <nl> / / / <nl> / / / @ RESTREPLYBODY { cached , boolean , required , } <nl> - / / / a boolean flag indicating whether the query result was served <nl> + / / / a boolean flag indicating whether the query result was served <nl> / / / from the query cache or not . If the query result is served from the query <nl> - / / / cache , the * extra * return attribute will not contain any * stats * <nl> - / / / sub - attribute <nl> + / / / cache , the * extra * return attribute will not contain any * stats * sub - attribute <nl> / / / and no * profile * sub - attribute . <nl> / / / <nl> / / / @ RESTRETURNCODE { 400 } <nl> - / / / is returned if the JSON representation is malformed or the query <nl> - / / / specification is <nl> + / / / is returned if the JSON representation is malformed or the query specification is <nl> / / / missing from the request . <nl> / / / <nl> / / / If the JSON representation is malformed or the query specification is <nl> triagens : : basics : : Json RestCursorHandler : : buildExtra ( <nl> / / / @ RESTREPLYBODY { errorMessage , string , required , string } <nl> / / / a descriptive error message <nl> / / / <nl> - / / / If the query specification is complete , the server will process the query . <nl> - / / / If an <nl> - / / / error occurs during query processing , the server will respond with * HTTP <nl> - / / / 400 * . <nl> + / / / If the query specification is complete , the server will process the query . If an <nl> + / / / error occurs during query processing , the server will respond with * HTTP 400 * . <nl> / / / Again , the body of the response will contain details about the error . <nl> / / / <nl> / / / A [ list of query errors can be found here ] ( . . / ErrorCodes / README . md ) . <nl> triagens : : basics : : Json RestCursorHandler : : buildExtra ( <nl> / / / accessed in the query . <nl> / / / <nl> / / / @ RESTRETURNCODE { 405 } <nl> - / / / The server will respond with * HTTP 405 * if an unsupported HTTP method is <nl> - / / / used . <nl> + / / / The server will respond with * HTTP 405 * if an unsupported HTTP method is used . <nl> / / / <nl> / / / @ EXAMPLES <nl> / / / <nl> triagens : : basics : : Json RestCursorHandler : : buildExtra ( <nl> / / / @ EXAMPLE_ARANGOSH_RUN { RestCursorOptimizerRules } <nl> / / / var url = " / _api / cursor " ; <nl> / / / var body = { <nl> - / / / query : " FOR i IN 1 . . 10 LET a = 1 LET b = 2 FILTER a + b = = 3 " + <nl> - / / / " RETURN i " , <nl> + / / / query : " FOR i IN 1 . . 10 LET a = 1 LET b = 2 FILTER a + b = = 3 RETURN i " , <nl> / / / count : true , <nl> / / / options : { <nl> / / / maxPlans : 1 , <nl> triagens : : basics : : Json RestCursorHandler : : buildExtra ( <nl> / / / logJsonResponse ( response ) ; <nl> / / / @ END_EXAMPLE_ARANGOSH_RUN <nl> / / / <nl> - / / / Bad query - Execute a data - modification query that attempts to remove a <nl> - / / / non - existing <nl> + / / / Bad query - Execute a data - modification query that attempts to remove a non - existing <nl> / / / document <nl> / / / <nl> / / / @ EXAMPLE_ARANGOSH_RUN { RestCursorDeleteQueryFail } <nl> void RestCursorHandler : : createCursor ( ) { <nl> / / / @ startDocuBlock JSF_post_api_cursor_identifier <nl> / / / @ brief return the next results from an existing cursor <nl> / / / <nl> - / / / @ RESTHEADER { PUT / _api / cursor / { cursor - identifier } , Read next batch from <nl> - / / / cursor } <nl> + / / / @ RESTHEADER { PUT / _api / cursor / { cursor - identifier } , Read next batch from cursor } <nl> / / / <nl> / / / @ RESTURLPARAMETERS <nl> / / / <nl> void RestCursorHandler : : createCursor ( ) { <nl> / / / The server will respond with * HTTP 200 * in case of success . <nl> / / / <nl> / / / @ RESTRETURNCODE { 400 } <nl> - / / / If the cursor identifier is omitted , the server will respond with * HTTP <nl> - / / / 404 * . <nl> + / / / If the cursor identifier is omitted , the server will respond with * HTTP 404 * . <nl> / / / <nl> / / / @ RESTRETURNCODE { 404 } <nl> - / / / If no cursor with the specified identifier can be found , the server will <nl> - / / / respond <nl> + / / / If no cursor with the specified identifier can be found , the server will respond <nl> / / / with * HTTP 404 * . <nl> / / / <nl> / / / @ EXAMPLES <nl> mmm a / arangod / RestHandler / RestDocumentHandler . cpp <nl> ppp b / arangod / RestHandler / RestDocumentHandler . cpp <nl> HttpHandler : : status_t RestDocumentHandler : : execute ( ) { <nl> / / / created if it does not yet exist . Other values will be ignored so the <nl> / / / collection must be present for the operation to succeed . <nl> / / / <nl> - / / / * * Note * * : this flag is not supported in a cluster . Using it will result in <nl> - / / / an <nl> + / / / * * Note * * : this flag is not supported in a cluster . Using it will result in an <nl> / / / error . <nl> / / / <nl> / / / @ RESTQUERYPARAM { waitForSync , boolean , optional } <nl> HttpHandler : : status_t RestDocumentHandler : : execute ( ) { <nl> / / / Optionally , the query parameter * waitForSync * can be used to force <nl> / / / synchronization of the document creation operation to disk even in case that <nl> / / / the * waitForSync * flag had been disabled for the entire collection . Thus , <nl> - / / / the * waitForSync * query parameter can be used to force synchronization of <nl> - / / / just <nl> + / / / the * waitForSync * query parameter can be used to force synchronization of just <nl> / / / this specific operations . To use this , set the * waitForSync * parameter to <nl> / / / * true * . If the * waitForSync * parameter is not specified or set to * false * , <nl> / / / then the collection ' s default * waitForSync * behavior is applied . The <nl> bool RestDocumentHandler : : checkDocument ( ) { <nl> / / / using the * if - match * HTTP header . <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> - / / / Completely updates ( i . e . replaces ) the document identified by <nl> - / / / * document - handle * . <nl> + / / / Completely updates ( i . e . replaces ) the document identified by * document - handle * . <nl> / / / If the document exists and can be updated , then a * HTTP 201 * is returned <nl> / / / and the " ETag " header field contains the new revision of the document . <nl> / / / <nl> bool RestDocumentHandler : : checkDocument ( ) { <nl> / / / <nl> / / / The body of the response contains a JSON object with the information about <nl> / / / the handle and the revision . The attribute * _id * contains the known <nl> - / / / * document - handle * of the updated document , * _key * contains the key which <nl> - / / / uniquely identifies a document in a given collection , and the attribute <nl> - / / / * _rev * <nl> + / / / * document - handle * of the updated document , * _key * contains the key which <nl> + / / / uniquely identifies a document in a given collection , and the attribute * _rev * <nl> / / / contains the new document revision . <nl> / / / <nl> / / / If the document does not exist , then a * HTTP 404 * is returned and the <nl> bool RestDocumentHandler : : checkDocument ( ) { <nl> / / / <nl> / / / There are two ways for specifying the targeted document revision id for <nl> / / / conditional replacements ( i . e . replacements that will only be executed if <nl> - / / / the revision id found in the database matches the document revision id <nl> - / / / specified <nl> + / / / the revision id found in the database matches the document revision id specified <nl> / / / in the request ) : <nl> / / / - specifying the target revision in the * rev * URL query parameter <nl> / / / - specifying the target revision in the * if - match * HTTP header <nl> bool RestDocumentHandler : : checkDocument ( ) { <nl> / / / Specifying a target revision is optional , however , if done , only one of the <nl> / / / described mechanisms must be used ( either the * rev * query parameter or the <nl> / / / * if - match * HTTP header ) . <nl> - / / / Regardless which mechanism is used , the parameter needs to contain the <nl> - / / / target <nl> + / / / Regardless which mechanism is used , the parameter needs to contain the target <nl> / / / document revision id as returned in the * _rev * attribute of a document or <nl> / / / by an HTTP * etag * header . <nl> / / / <nl> - / / / For example , to conditionally replace a document based on a specific <nl> - / / / revision <nl> + / / / For example , to conditionally replace a document based on a specific revision <nl> / / / id , you can use the following request : <nl> / / / <nl> / / / <nl> / / / ` PUT / _api / document / document - handle ? rev = etag ` <nl> / / / <nl> / / / <nl> - / / / If a target revision id is provided in the request ( e . g . via the * etag * <nl> - / / / value <nl> + / / / If a target revision id is provided in the request ( e . g . via the * etag * value <nl> / / / in the * rev * URL query parameter above ) , ArangoDB will check that <nl> / / / the revision id of the document found in the database is equal to the target <nl> - / / / revision id provided in the request . If there is a mismatch between the <nl> - / / / revision <nl> + / / / revision id provided in the request . If there is a mismatch between the revision <nl> / / / id , then by default a * HTTP 412 * conflict is returned and no replacement is <nl> / / / performed . <nl> / / / <nl> / / / <nl> - / / / The conditional update behavior can be overridden with the * policy * URL <nl> - / / / query parameter : <nl> + / / / The conditional update behavior can be overridden with the * policy * URL query parameter : <nl> / / / <nl> / / / <nl> / / / ` PUT / _api / document / document - handle ? policy = policy ` <nl> bool RestDocumentHandler : : checkDocument ( ) { <nl> / / / revision id specified in the request . <nl> / / / <nl> / / / If * policy * is set to * last * , then the replacement will succeed , even if the <nl> - / / / revision id found in the database does not match the target revision id <nl> - / / / specified <nl> + / / / revision id found in the database does not match the target revision id specified <nl> / / / in the request . You can use the * last * * policy * to force replacements . <nl> / / / <nl> / / / @ RESTRETURNCODES <nl> bool RestDocumentHandler : : checkDocument ( ) { <nl> / / / var url = " / _api / document / " + document . _id ; <nl> / / / var headers = { " If - Match " : " \ " " + document2 . _rev + " \ " " } ; <nl> / / / <nl> - / / / var response = logCurlRequest ( ' PUT ' , url , ' { " other " : " content " } ' , <nl> - / / / headers ) ; <nl> + / / / var response = logCurlRequest ( ' PUT ' , url , ' { " other " : " content " } ' , headers ) ; <nl> / / / <nl> / / / assert ( response . code = = = 412 ) ; <nl> / / / <nl> bool RestDocumentHandler : : replaceDocument ( ) { return modifyDocument ( false ) ; } <nl> / / / <nl> / / / The body of the response contains a JSON object with the information about <nl> / / / the handle and the revision . The attribute * _id * contains the known <nl> - / / / * document - handle * of the updated document , * _key * contains the key which <nl> - / / / uniquely identifies a document in a given collection , and the attribute <nl> - / / / * _rev * <nl> + / / / * document - handle * of the updated document , * _key * contains the key which <nl> + / / / uniquely identifies a document in a given collection , and the attribute * _rev * <nl> / / / contains the new document revision . <nl> / / / <nl> / / / If the document does not exist , then a * HTTP 404 * is returned and the <nl> bool RestDocumentHandler : : replaceDocument ( ) { return modifyDocument ( false ) ; } <nl> / / / assert ( response . code = = = 202 ) ; <nl> / / / <nl> / / / logJsonResponse ( response ) ; <nl> - / / / var response2 = logCurlRequest ( " PATCH " , url , { " numbers " : { " one " : 1 , <nl> - / / / " two " : 2 , " three " : 3 , " empty " : null } } ) ; <nl> + / / / var response2 = logCurlRequest ( " PATCH " , url , { " numbers " : { " one " : 1 , " two " : 2 , " three " : 3 , " empty " : null } } ) ; <nl> / / / assert ( response2 . code = = = 202 ) ; <nl> / / / logJsonResponse ( response2 ) ; <nl> / / / var response3 = logCurlRequest ( " GET " , url ) ; <nl> / / / assert ( response3 . code = = = 200 ) ; <nl> / / / logJsonResponse ( response3 ) ; <nl> - / / / var response4 = logCurlRequest ( " PATCH " , url + " ? keepNull = false " , { <nl> - / / / " hello " : null , " numbers " : { " four " : 4 } } ) ; <nl> + / / / var response4 = logCurlRequest ( " PATCH " , url + " ? keepNull = false " , { " hello " : null , " numbers " : { " four " : 4 } } ) ; <nl> / / / assert ( response4 . code = = = 202 ) ; <nl> / / / logJsonResponse ( response4 ) ; <nl> / / / var response5 = logCurlRequest ( " GET " , url ) ; <nl> bool RestDocumentHandler : : replaceDocument ( ) { return modifyDocument ( false ) ; } <nl> / / / db . _drop ( cn ) ; <nl> / / / db . _create ( cn ) ; <nl> / / / <nl> - / / / var document = <nl> - / / / db . products . save ( { " inhabitants " : { " china " : 1366980000 , " india " : 1263590000 , " usa " : 319220000 } } ) ; <nl> + / / / var document = db . products . save ( { " inhabitants " : { " china " : 1366980000 , " india " : 1263590000 , " usa " : 319220000 } } ) ; <nl> / / / var url = " / _api / document / " + document . _id ; <nl> / / / <nl> / / / var response = logCurlRequest ( " GET " , url ) ; <nl> / / / assert ( response . code = = = 200 ) ; <nl> / / / logJsonResponse ( response ) ; <nl> / / / <nl> - / / / var response = logCurlRequest ( " PATCH " , url + " ? mergeObjects = true " , { <nl> - / / / " inhabitants " : { " indonesia " : 252164800 , " brazil " : 203553000 } } ) ; <nl> + / / / var response = logCurlRequest ( " PATCH " , url + " ? mergeObjects = true " , { " inhabitants " : { " indonesia " : 252164800 , " brazil " : 203553000 } } ) ; <nl> / / / assert ( response . code = = = 202 ) ; <nl> / / / <nl> / / / var response2 = logCurlRequest ( " GET " , url ) ; <nl> / / / assert ( response2 . code = = = 200 ) ; <nl> / / / logJsonResponse ( response2 ) ; <nl> / / / <nl> - / / / var response3 = logCurlRequest ( " PATCH " , url + " ? mergeObjects = false " , { <nl> - / / / " inhabitants " : { " pakistan " : 188346000 } } ) ; <nl> + / / / var response3 = logCurlRequest ( " PATCH " , url + " ? mergeObjects = false " , { " inhabitants " : { " pakistan " : 188346000 } } ) ; <nl> / / / assert ( response3 . code = = = 202 ) ; <nl> / / / logJsonResponse ( response3 ) ; <nl> / / / <nl> bool RestDocumentHandler : : modifyDocumentCoordinator ( <nl> / / / The body of the response contains a JSON object with the information about <nl> / / / the handle and the revision . The attribute * _id * contains the known <nl> / / / * document - handle * of the removed document , * _key * contains the key which <nl> - / / / uniquely identifies a document in a given collection , and the attribute <nl> - / / / * _rev * <nl> + / / / uniquely identifies a document in a given collection , and the attribute * _rev * <nl> / / / contains the new document revision . <nl> / / / <nl> / / / If the * waitForSync * parameter is not specified or set to <nl> mmm a / arangod / RestHandler / RestEdgeHandler . cpp <nl> ppp b / arangod / RestHandler / RestEdgeHandler . cpp <nl> RestEdgeHandler : : RestEdgeHandler ( HttpRequest * request ) <nl> / / / created if it does not yet exist . Other values will be ignored so the <nl> / / / collection must be present for the operation to succeed . <nl> / / / <nl> - / / / * * Note * * : This flag is not supported in a cluster . Using it will result in <nl> - / / / an <nl> + / / / * * Note * * : This flag is not supported in a cluster . Using it will result in an <nl> / / / error . <nl> / / / <nl> / / / @ RESTQUERYPARAM { waitForSync , boolean , optional } <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / @ RESTQUERYPARAMETERS <nl> / / / <nl> / / / @ RESTQUERYPARAM { rev , string , optional } <nl> - / / / You can conditionally fetch an edge document based on a target revision id <nl> - / / / by <nl> + / / / You can conditionally fetch an edge document based on a target revision id by <nl> / / / using the * rev * query parameter . <nl> / / / <nl> / / / @ RESTHEADERPARAMETERS <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / identical to the specified etag , then an * HTTP 304 * is returned . <nl> / / / <nl> / / / @ RESTHEADERPARAM { If - Match , string , optional } <nl> - / / / You can conditionally fetch an edge document based on a target revision id <nl> - / / / by <nl> + / / / You can conditionally fetch an edge document based on a target revision id by <nl> / / / using the * if - match * HTTP header . <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> / / / Like * GET * , but only returns the header fields and not the body . You <nl> - / / / can use this call to get the current revision of an edge document or check <nl> - / / / if <nl> + / / / can use this call to get the current revision of an edge document or check if <nl> / / / it was deleted . <nl> / / / <nl> / / / @ RESTRETURNCODES <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / Wait until edge document has been synced to disk . <nl> / / / <nl> / / / @ RESTQUERYPARAM { rev , string , optional } <nl> - / / / You can conditionally replace an edge document based on a target revision id <nl> - / / / by <nl> + / / / You can conditionally replace an edge document based on a target revision id by <nl> / / / using the * rev * query parameter . <nl> / / / <nl> / / / @ RESTQUERYPARAM { policy , string , optional } <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / @ RESTHEADERPARAMETERS <nl> / / / <nl> / / / @ RESTHEADERPARAM { If - Match , string , optional } <nl> - / / / You can conditionally replace an edge document based on a target revision id <nl> - / / / by <nl> + / / / You can conditionally replace an edge document based on a target revision id by <nl> / / / using the * if - match * HTTP header . <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> - / / / Completely updates ( i . e . replaces ) the edge document identified by <nl> - / / / * document - handle * . <nl> - / / / If the edge document exists and can be updated , then a * HTTP 201 * is <nl> - / / / returned <nl> + / / / Completely updates ( i . e . replaces ) the edge document identified by * document - handle * . <nl> + / / / If the edge document exists and can be updated , then a * HTTP 201 * is returned <nl> / / / and the " ETag " header field contains the new revision of the edge document . <nl> / / / <nl> / / / If the new edge document passed in the body of the request contains the <nl> / / / * document - handle * in the attribute * _id * and the revision in * _rev * , <nl> / / / these attributes will be ignored . Only the URI and the " ETag " header are <nl> - / / / relevant in order to avoid confusion when using proxies . <nl> + / / / relevant in order to avoid confusion when using proxies . <nl> / / / * * Note * * : The attributes <nl> / / / * _from * and * _to * of an edge are immutable and cannot be updated either . <nl> / / / <nl> / / / Optionally , the query parameter * waitForSync * can be used to force <nl> - / / / synchronization of the edge document replacement operation to disk even in <nl> - / / / case <nl> + / / / synchronization of the edge document replacement operation to disk even in case <nl> / / / that the * waitForSync * flag had been disabled for the entire collection . <nl> / / / Thus , the * waitForSync * query parameter can be used to force synchronization <nl> / / / of just specific operations . To use this , set the * waitForSync * parameter <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / <nl> / / / The body of the response contains a JSON object with the information about <nl> / / / the handle and the revision . The attribute * _id * contains the known <nl> - / / / * document - handle * of the updated edge document , * _key * contains the key <nl> - / / / which <nl> - / / / uniquely identifies a document in a given collection , and the attribute <nl> - / / / * _rev * <nl> + / / / * document - handle * of the updated edge document , * _key * contains the key which <nl> + / / / uniquely identifies a document in a given collection , and the attribute * _rev * <nl> / / / contains the new document revision . <nl> / / / <nl> / / / If the edge document does not exist , then a * HTTP 404 * is returned and the <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / Specifying a target revision is optional , however , if done , only one of the <nl> / / / described mechanisms must be used ( either the * rev * query parameter or the <nl> / / / * if - match * HTTP header ) . <nl> - / / / Regardless which mechanism is used , the parameter needs to contain the <nl> - / / / target <nl> + / / / Regardless which mechanism is used , the parameter needs to contain the target <nl> / / / revision id as returned in the * _rev * attribute of an edge document or <nl> / / / by an HTTP * etag * header . <nl> / / / <nl> - / / / For example , to conditionally replace an edge document based on a specific <nl> - / / / revision <nl> + / / / For example , to conditionally replace an edge document based on a specific revision <nl> / / / id , you can use the following request : <nl> / / / <nl> / / / - PUT / _api / document / * document - handle * ? rev = * etag * <nl> / / / <nl> - / / / If a target revision id is provided in the request ( e . g . via the * etag * <nl> - / / / value <nl> + / / / If a target revision id is provided in the request ( e . g . via the * etag * value <nl> / / / in the * rev * URL query parameter above ) , ArangoDB will check that <nl> - / / / the revision id of the edge document found in the database is equal to the <nl> - / / / target <nl> - / / / revision id provided in the request . If there is a mismatch between the <nl> - / / / revision <nl> + / / / the revision id of the edge document found in the database is equal to the target <nl> + / / / revision id provided in the request . If there is a mismatch between the revision <nl> / / / id , then by default a * HTTP 412 * conflict is returned and no replacement is <nl> / / / performed . <nl> / / / <nl> - / / / The conditional update behavior can be overridden with the * policy * URL <nl> - / / / query parameter : <nl> + / / / The conditional update behavior can be overridden with the * policy * URL query parameter : <nl> / / / <nl> / / / - PUT / _api / document / * document - handle * ? policy = * policy * <nl> / / / <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / revision id specified in the request . <nl> / / / <nl> / / / If * policy * is set to * last * , then the replacement will succeed , even if the <nl> - / / / revision id found in the database does not match the target revision id <nl> - / / / specified <nl> + / / / revision id found in the database does not match the target revision id specified <nl> / / / in the request . You can use the * last * * policy * to force replacements . <nl> / / / <nl> / / / @ RESTRETURNCODES <nl> / / / <nl> / / / @ RESTRETURNCODE { 201 } <nl> - / / / is returned if the edge document was replaced successfully and * waitForSync * <nl> - / / / was <nl> + / / / is returned if the edge document was replaced successfully and * waitForSync * was <nl> / / / * true * . <nl> / / / <nl> / / / @ RESTRETURNCODE { 202 } <nl> - / / / is returned if the edge document was replaced successfully and * waitForSync * <nl> - / / / was <nl> + / / / is returned if the edge document was replaced successfully and * waitForSync * was <nl> / / / * false * . <nl> / / / <nl> / / / @ RESTRETURNCODE { 400 } <nl> - / / / is returned if the body does not contain a valid JSON representation of an <nl> - / / / edge <nl> - / / / document or if applied to a non - edge collection . The response body contains <nl> - / / / an <nl> + / / / is returned if the body does not contain a valid JSON representation of an edge <nl> + / / / document or if applied to a non - edge collection . The response body contains an <nl> / / / error document in this case . <nl> / / / <nl> / / / @ RESTRETURNCODE { 404 } <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / If the intention is to delete existing attributes with the patch command , <nl> / / / the URL query parameter * keepNull * can be used with a value of * false * . <nl> / / / This will modify the behavior of the patch command to remove any attributes <nl> - / / / from the existing edge document that are contained in the patch document <nl> - / / / with an <nl> + / / / from the existing edge document that are contained in the patch document with an <nl> / / / attribute value of * null * . <nl> / / / <nl> / / / @ RESTQUERYPARAM { mergeObjects , boolean , optional } <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / Wait until edge document has been synced to disk . <nl> / / / <nl> / / / @ RESTQUERYPARAM { rev , string , optional } <nl> - / / / You can conditionally patch an edge document based on a target revision id <nl> - / / / by <nl> + / / / You can conditionally patch an edge document based on a target revision id by <nl> / / / using the * rev * query parameter . <nl> / / / <nl> / / / @ RESTQUERYPARAM { policy , string , optional } <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / @ RESTHEADERPARAMETERS <nl> / / / <nl> / / / @ RESTHEADERPARAM { If - Match , string , optional } <nl> - / / / You can conditionally patch an edge document based on a target revision id <nl> - / / / by <nl> + / / / You can conditionally patch an edge document based on a target revision id by <nl> / / / using the * if - match * HTTP header . <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> / / / Partially updates the edge document identified by * document - handle * . <nl> / / / The body of the request must contain a JSON document with the attributes <nl> / / / to patch ( the patch document ) . All attributes from the patch document will <nl> - / / / be added to the existing edge document if they do not yet exist , and <nl> - / / / overwritten <nl> + / / / be added to the existing edge document if they do not yet exist , and overwritten <nl> / / / in the existing edge document if they do exist there . <nl> / / / <nl> / / / Setting an attribute value to * null * in the patch document will cause a <nl> / / / value of * null * be saved for the attribute by default . <nl> / / / <nl> - / / / * * Note * * : Internal attributes such as * _key * , * _from * and * _to * are <nl> - / / / immutable <nl> + / / / * * Note * * : Internal attributes such as * _key * , * _from * and * _to * are immutable <nl> / / / once set and cannot be updated . <nl> / / / <nl> / / / Optionally , the query parameter * waitForSync * can be used to force <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / <nl> / / / The body of the response contains a JSON object with the information about <nl> / / / the handle and the revision . The attribute * _id * contains the known <nl> - / / / * document - handle * of the updated edge document , * _key * contains the key <nl> - / / / which <nl> - / / / uniquely identifies a document in a given collection , and the attribute <nl> - / / / * _rev * <nl> + / / / * document - handle * of the updated edge document , * _key * contains the key which <nl> + / / / uniquely identifies a document in a given collection , and the attribute * _rev * <nl> / / / contains the new document revision . <nl> / / / <nl> / / / If the edge document does not exist , then a * HTTP 404 * is returned and the <nl> / / / body of the response contains an error document . <nl> / / / <nl> - / / / You can conditionally update an edge document based on a target revision id <nl> - / / / by <nl> + / / / You can conditionally update an edge document based on a target revision id by <nl> / / / using either the * rev * query parameter or the * if - match * HTTP header . <nl> / / / To control the update behavior in case there is a revision mismatch , you <nl> / / / can use the * policy * parameter . This is the same as when replacing <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / <nl> / / / @ RESTRETURNCODE { 400 } <nl> / / / is returned if the body does not contain a valid JSON representation or when <nl> - / / / applied on an non - edge collection . The response body contains an error <nl> - / / / document <nl> + / / / applied on an non - edge collection . The response body contains an error document <nl> / / / in this case . <nl> / / / <nl> / / / @ RESTRETURNCODE { 404 } <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / @ RESTQUERYPARAMETERS <nl> / / / <nl> / / / @ RESTQUERYPARAM { rev , string , optional } <nl> - / / / You can conditionally delete an edge document based on a target revision id <nl> - / / / by <nl> + / / / You can conditionally delete an edge document based on a target revision id by <nl> / / / using the * rev * query parameter . <nl> / / / <nl> / / / @ RESTQUERYPARAM { policy , string , optional } <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / @ RESTHEADERPARAMETERS <nl> / / / <nl> / / / @ RESTHEADERPARAM { If - Match , string , optional } <nl> - / / / You can conditionally delete an edge document based on a target revision id <nl> - / / / by <nl> + / / / You can conditionally delete an edge document based on a target revision id by <nl> / / / using the * if - match * HTTP header . <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> / / / The body of the response contains a JSON object with the information about <nl> / / / the handle and the revision . The attribute * _id * contains the known <nl> - / / / * document - handle * of the deleted edge document , * _key * contains the key <nl> - / / / which <nl> - / / / uniquely identifies a document in a given collection , and the attribute <nl> - / / / * _rev * <nl> + / / / * document - handle * of the deleted edge document , * _key * contains the key which <nl> + / / / uniquely identifies a document in a given collection , and the attribute * _rev * <nl> / / / contains the new document revision . <nl> / / / <nl> / / / If the * waitForSync * parameter is not specified or set to <nl> bool RestEdgeHandler : : createDocumentCoordinator ( string const & collname , <nl> / / / @ RESTRETURNCODES <nl> / / / <nl> / / / @ RESTRETURNCODE { 200 } <nl> - / / / is returned if the edge document was deleted successfully and * waitForSync * <nl> - / / / was <nl> + / / / is returned if the edge document was deleted successfully and * waitForSync * was <nl> / / / * true * . <nl> / / / <nl> / / / @ RESTRETURNCODE { 202 } <nl> - / / / is returned if the edge document was deleted successfully and * waitForSync * <nl> - / / / was <nl> + / / / is returned if the edge document was deleted successfully and * waitForSync * was <nl> / / / * false * . <nl> / / / <nl> / / / @ RESTRETURNCODE { 404 } <nl> mmm a / arangod / RestHandler / RestExportHandler . cpp <nl> ppp b / arangod / RestHandler / RestExportHandler . cpp <nl> VPackBuilder RestExportHandler : : buildOptions ( VPackSlice const & slice ) { <nl> / / / <nl> / / / @ RESTBODYPARAM { limit , integer , required , int64 } <nl> / / / an optional limit value , determining the maximum number of documents to <nl> - / / / be included in the cursor . Omitting the * limit * attribute or setting it to 0 <nl> - / / / will <nl> - / / / lead to no limit being used . If a limit is used , it is undefined which <nl> - / / / documents <nl> - / / / from the collection will be included in the export and which will be <nl> - / / / excluded . <nl> + / / / be included in the cursor . Omitting the * limit * attribute or setting it to 0 will <nl> + / / / lead to no limit being used . If a limit is used , it is undefined which documents <nl> + / / / from the collection will be included in the export and which will be excluded . <nl> / / / This is because there is no natural order of documents in a collection . <nl> / / / <nl> / / / @ RESTBODYPARAM { ttl , integer , required , int64 } <nl> VPackBuilder RestExportHandler : : buildOptions ( VPackSlice const & slice ) { <nl> / / / by clients . If not set , a server - defined value will be used . <nl> / / / <nl> / / / @ RESTBODYPARAM { restrict , object , optional , JSF_post_api_export_restrictions } <nl> - / / / an object containing an array of attribute names that will be <nl> + / / / an object containing an array of attribute names that will be <nl> / / / included or excluded when returning result documents . <nl> / / / <nl> - / / / Not specifying * restrict * will by default return all attributes of each <nl> - / / / document . <nl> + / / / Not specifying * restrict * will by default return all attributes of each document . <nl> / / / <nl> / / / @ RESTSTRUCT { type , JSF_post_api_export_restrictions , string , required , string } <nl> - / / / has to be be set to either * include * or * exclude * depending on which you <nl> - / / / want to use <nl> + / / / has to be be set to either * include * or * exclude * depending on which you want to use <nl> / / / <nl> / / / @ RESTSTRUCT { fields , JSF_post_api_export_restrictions , array , required , string } <nl> - / / / Contains an array of attribute names to * include * or * exclude * . Matching of <nl> - / / / attribute names <nl> + / / / Contains an array of attribute names to * include * or * exclude * . Matching of attribute names <nl> / / / for * inclusion * or * exclusion * will be done on the top level only . <nl> / / / Specifying names of nested attributes is not supported at the moment . <nl> / / / <nl> VPackBuilder RestExportHandler : : buildOptions ( VPackSlice const & slice ) { <nl> / / / The name of the collection to export . <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> - / / / A call to this method creates a cursor containing all documents in the <nl> + / / / A call to this method creates a cursor containing all documents in the <nl> / / / specified collection . In contrast to other data - producing APIs , the internal <nl> / / / data structures produced by the export API are more lightweight , so it is <nl> / / / the preferred way to retrieve all documents from a collection . <nl> - / / / <nl> - / / / Documents are returned in a similar manner as in the ` / _api / cursor ` REST <nl> - / / / API . <nl> + / / / <nl> + / / / Documents are returned in a similar manner as in the ` / _api / cursor ` REST API . <nl> / / / If all documents of the collection fit into the first batch , then no cursor <nl> / / / will be created , and the result object ' s * hasMore * attribute will be set to <nl> - / / / * false * . If not all documents fit into the first batch , then the result <nl> + / / / * false * . If not all documents fit into the first batch , then the result <nl> / / / object ' s * hasMore * attribute will be set to * true * , and the * id * attribute <nl> / / / of the result will contain a cursor id . <nl> / / / <nl> / / / The order in which the documents are returned is not specified . <nl> / / / <nl> - / / / By default , only those documents from the collection will be returned that <nl> - / / / are <nl> - / / / stored in the collection ' s datafiles . Documents that are present in the <nl> - / / / write - ahead <nl> + / / / By default , only those documents from the collection will be returned that are <nl> + / / / stored in the collection ' s datafiles . Documents that are present in the write - ahead <nl> / / / log ( WAL ) at the time the export is run will not be exported . <nl> - / / / <nl> + / / / <nl> / / / To export these documents as well , the caller can issue a WAL flush request <nl> - / / / before calling the export API or set the * flush * attribute . Setting the <nl> - / / / * flush * <nl> - / / / option will trigger a WAL flush before the export so documents get copied <nl> - / / / from <nl> + / / / before calling the export API or set the * flush * attribute . Setting the * flush * <nl> + / / / option will trigger a WAL flush before the export so documents get copied from <nl> / / / the WAL to the collection datafiles . <nl> - / / / <nl> + / / / <nl> / / / If the result set can be created by the server , the server will respond with <nl> / / / * HTTP 201 * . The body of the response will contain a JSON object with the <nl> / / / result set . <nl> VPackBuilder RestExportHandler : : buildOptions ( VPackSlice const & slice ) { <nl> / / / <nl> / / / - * code * : the HTTP status code <nl> / / / <nl> - / / / - * result * : an array of result documents ( might be empty if the collection <nl> - / / / was empty ) <nl> + / / / - * result * : an array of result documents ( might be empty if the collection was empty ) <nl> / / / <nl> / / / - * hasMore * : a boolean indicator whether there are more results <nl> / / / available for the cursor on the server <nl> VPackBuilder RestExportHandler : : buildOptions ( VPackSlice const & slice ) { <nl> / / / The body of the response will contain a JSON object with additional error <nl> / / / details . The object has the following attributes : <nl> / / / <nl> - / / / - * error * : boolean flag to indicate that an error occurred ( * true * in this <nl> - / / / case ) <nl> + / / / - * error * : boolean flag to indicate that an error occurred ( * true * in this case ) <nl> / / / <nl> / / / - * code * : the HTTP status code <nl> / / / <nl> VPackBuilder RestExportHandler : : buildOptions ( VPackSlice const & slice ) { <nl> / / / <nl> / / / - * errorMessage * : a descriptive error message <nl> / / / <nl> - / / / Clients should always delete an export cursor result as early as possible <nl> - / / / because a <nl> + / / / Clients should always delete an export cursor result as early as possible because a <nl> / / / lingering export cursor will prevent the underlying collection from being <nl> - / / / compacted or unloaded . By default , unused cursors will be deleted <nl> - / / / automatically <nl> - / / / after a server - defined idle time , and clients can adjust this idle time by <nl> - / / / setting <nl> + / / / compacted or unloaded . By default , unused cursors will be deleted automatically <nl> + / / / after a server - defined idle time , and clients can adjust this idle time by setting <nl> / / / the * ttl * value . <nl> / / / <nl> / / / Note : this API is currently not supported on cluster coordinators . <nl> VPackBuilder RestExportHandler : : buildOptions ( VPackSlice const & slice ) { <nl> / / / is returned if the result set can be created by the server . <nl> / / / <nl> / / / @ RESTRETURNCODE { 400 } <nl> - / / / is returned if the JSON representation is malformed or the query <nl> - / / / specification is <nl> + / / / is returned if the JSON representation is malformed or the query specification is <nl> / / / missing from the request . <nl> / / / <nl> / / / @ RESTRETURNCODE { 404 } <nl> VPackBuilder RestExportHandler : : buildOptions ( VPackSlice const & slice ) { <nl> / / / accessed in the query . <nl> / / / <nl> / / / @ RESTRETURNCODE { 405 } <nl> - / / / The server will respond with * HTTP 405 * if an unsupported HTTP method is <nl> - / / / used . <nl> + / / / The server will respond with * HTTP 405 * if an unsupported HTTP method is used . <nl> / / / <nl> / / / @ RESTRETURNCODE { 501 } <nl> / / / The server will respond with * HTTP 501 * if this API is called on a cluster <nl> mmm a / arangod / RestHandler / RestImportHandler . cpp <nl> ppp b / arangod / RestHandler / RestImportHandler . cpp <nl> int RestImportHandler : : handleSingleDocument ( RestImportTransaction & trx , <nl> / / / <nl> / / / @ RESTQUERYPARAM { createCollectionType , string , optional } <nl> / / / If this parameter has a value of ` document ` or ` edge ` , it will determine <nl> - / / / the type of collection that is going to be created when the <nl> - / / / ` createCollection ` <nl> + / / / the type of collection that is going to be created when the ` createCollection ` <nl> / / / option is set to ` true ` . The default value is ` document ` . <nl> / / / <nl> / / / @ RESTQUERYPARAM { overwrite , boolean , optional } <nl> int RestImportHandler : : handleSingleDocument ( RestImportTransaction & trx , <nl> / / / - * error * : this will not import the current document because of the unique <nl> / / / key constraint violation . This is the default setting . <nl> / / / <nl> - / / / - * update * : this will update an existing document in the database with the <nl> + / / / - * update * : this will update an existing document in the database with the <nl> / / / data specified in the request . Attributes of the existing document that <nl> / / / are not present in the request will be preseved . <nl> / / / <nl> / / / - * replace * : this will replace an existing document in the database with the <nl> - / / / data specified in the request . <nl> + / / / data specified in the request . <nl> / / / <nl> / / / - * ignore * : this will not update an existing document and simply ignore the <nl> / / / error caused by a unique key constraint violation . <nl> / / / <nl> / / / Note that that * update * , * replace * and * ignore * will only work when the <nl> / / / import document in the request contains the * _key * attribute . * update * and <nl> - / / / * replace * may also fail because of secondary unique key constraint <nl> - / / / violations . <nl> + / / / * replace * may also fail because of secondary unique key constraint violations . <nl> / / / <nl> / / / @ RESTQUERYPARAM { complete , boolean , optional } <nl> / / / If set to ` true ` or ` yes ` , it will make the whole import fail if any error <nl> int RestImportHandler : : handleSingleDocument ( RestImportTransaction & trx , <nl> / / / { name : { detailed : " detailed name " , short : " short name " } } <nl> / / / ] ; <nl> / / / <nl> - / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn <nl> - / / / + " & type = list " , body ) ; <nl> + / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn + " & type = list " , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 201 ) ; <nl> / / / var r = JSON . parse ( response . body ) ; <nl> int RestImportHandler : : handleSingleDocument ( RestImportTransaction & trx , <nl> / / / { name : { detailed : " detailed name " , short : " short name " } } <nl> / / / ] ; <nl> / / / <nl> - / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn <nl> - / / / + " & type = auto " , body ) ; <nl> + / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn + " & type = auto " , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 201 ) ; <nl> / / / var r = JSON . parse ( response . body ) ; <nl> int RestImportHandler : : handleSingleDocument ( RestImportTransaction & trx , <nl> / / / { id : " 55932 " , count : 4334 } , <nl> / / / ] ; <nl> / / / <nl> - / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn <nl> - / / / + " & createCollection = true & type = list " , body ) ; <nl> + / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn + " & createCollection = true & type = list " , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 201 ) ; <nl> / / / var r = JSON . parse ( response . body ) ; <nl> int RestImportHandler : : handleSingleDocument ( RestImportTransaction & trx , <nl> / / / ' { " _from " : " products / 332 " , " _to " : " products / abc " , ' + <nl> / / / ' " name " : " other name " } ' ; <nl> / / / <nl> - / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn <nl> - / / / + " & type = documents " , body ) ; <nl> + / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn + " & type = documents " , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 201 ) ; <nl> / / / var r = JSON . parse ( response . body ) ; <nl> int RestImportHandler : : handleSingleDocument ( RestImportTransaction & trx , <nl> / / / <nl> / / / var body = [ { name : " some name " } ] ; <nl> / / / <nl> - / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn <nl> - / / / + " & type = list & details = true " , body ) ; <nl> + / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn + " & type = list & details = true " , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 201 ) ; <nl> / / / var r = JSON . parse ( response . body ) ; <nl> int RestImportHandler : : handleSingleDocument ( RestImportTransaction & trx , <nl> / / / var body = ' { " _key " : " abc " , " value1 " : 25 , " value2 " : " test " } \ n ' + <nl> / / / ' { " _key " : " abc " , " value1 " : " bar " , " value2 " : " baz " } ' ; <nl> / / / <nl> - / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn <nl> - / / / + " & type = documents & complete = true " , body ) ; <nl> + / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn + " & type = documents & complete = true " , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 409 ) ; <nl> / / / <nl> int RestImportHandler : : handleSingleDocument ( RestImportTransaction & trx , <nl> / / / <nl> / / / var body = ' { " name " : " test " } ' ; <nl> / / / <nl> - / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn <nl> - / / / + " & type = documents " , body ) ; <nl> + / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn + " & type = documents " , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 404 ) ; <nl> / / / <nl> int RestImportHandler : : handleSingleDocument ( RestImportTransaction & trx , <nl> / / / <nl> / / / var body = ' { } ' ; <nl> / / / <nl> - / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn <nl> - / / / + " & type = list " , body ) ; <nl> + / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn + " & type = list " , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 400 ) ; <nl> / / / <nl> bool RestImportHandler : : createFromJson ( string const & type ) { <nl> / / / <nl> / / / @ RESTQUERYPARAM { createCollectionType , string , optional } <nl> / / / If this parameter has a value of ` document ` or ` edge ` , it will determine <nl> - / / / the type of collection that is going to be created when the <nl> - / / / ` createCollection ` <nl> + / / / the type of collection that is going to be created when the ` createCollection ` <nl> / / / option is set to ` true ` . The default value is ` document ` . <nl> / / / <nl> / / / @ RESTQUERYPARAM { overwrite , boolean , optional } <nl> bool RestImportHandler : : createFromJson ( string const & type ) { <nl> / / / <nl> / / / @ EXAMPLES <nl> / / / <nl> - / / / Importing two documents , with attributes ` _key ` , ` value1 ` and ` value2 ` each . <nl> - / / / One <nl> + / / / Importing two documents , with attributes ` _key ` , ` value1 ` and ` value2 ` each . One <nl> / / / line in the import data is empty <nl> / / / <nl> / / / @ EXAMPLE_ARANGOSH_RUN { RestImportCsvExample } <nl> bool RestImportHandler : : createFromJson ( string const & type ) { <nl> / / / ' [ " abc " , 25 , " test " ] \ n \ n ' + <nl> / / / ' [ " foo " , " bar " , " baz " ] ' ; <nl> / / / <nl> - / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + <nl> - / / / cn , body ) ; <nl> + / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 201 ) ; <nl> / / / var r = JSON . parse ( response . body ) <nl> bool RestImportHandler : : createFromJson ( string const & type ) { <nl> / / / ' [ " foo " , " bar " ] \ n ' + <nl> / / / ' [ 534 . 55 , true ] ' ; <nl> / / / <nl> - / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn <nl> - / / / + " & createCollection = true " , body ) ; <nl> + / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn + " & createCollection = true " , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 201 ) ; <nl> / / / var r = JSON . parse ( response . body ) <nl> bool RestImportHandler : : createFromJson ( string const & type ) { <nl> / / / ' [ " products / 123 " , " products / 234 " , " some name " ] \ n ' + <nl> / / / ' [ " products / 332 " , " products / abc " , " other name " ] ' ; <nl> / / / <nl> - / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + <nl> - / / / cn , body ) ; <nl> + / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 201 ) ; <nl> / / / var r = JSON . parse ( response . body ) <nl> bool RestImportHandler : : createFromJson ( string const & type ) { <nl> / / / <nl> / / / var body = ' [ " name " ] \ n [ " some name " ] \ n [ " other name " ] ' ; <nl> / / / <nl> - / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn <nl> - / / / + " & details = true " , body ) ; <nl> + / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn + " & details = true " , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 201 ) ; <nl> / / / var r = JSON . parse ( response . body ) <nl> bool RestImportHandler : : createFromJson ( string const & type ) { <nl> / / / ' [ " abc " , 25 , " test " ] \ n ' + <nl> / / / ' [ " abc " , " bar " , " baz " ] ' ; <nl> / / / <nl> - / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn <nl> - / / / + " & details = true " , body ) ; <nl> + / / / var response = logCurlRequestRaw ( ' POST ' , " / _api / import ? collection = " + cn + " & details = true " , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 201 ) ; <nl> / / / var r = JSON . parse ( response . body ) <nl> bool RestImportHandler : : createFromJson ( string const & type ) { <nl> / / / ' [ " abc " , 25 , " test " ] \ n ' + <nl> / / / ' [ " abc " , " bar " , " baz " ] ' ; <nl> / / / <nl> - / / / var response = logCurlRequest ( ' POST ' , " / _api / import ? collection = " + cn + <nl> - / / / " & complete = true " , body ) ; <nl> + / / / var response = logCurlRequest ( ' POST ' , " / _api / import ? collection = " + cn + " & complete = true " , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 409 ) ; <nl> / / / <nl> bool RestImportHandler : : createFromJson ( string const & type ) { <nl> / / / ' [ " abc " , 25 , " test " ] \ n ' + <nl> / / / ' [ " foo " , " bar " , " baz " ] ' ; <nl> / / / <nl> - / / / var response = logCurlRequest ( ' POST ' , " / _api / import ? collection = " + cn , <nl> - / / / body ) ; <nl> + / / / var response = logCurlRequest ( ' POST ' , " / _api / import ? collection = " + cn , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 404 ) ; <nl> / / / <nl> bool RestImportHandler : : createFromJson ( string const & type ) { <nl> / / / <nl> / / / var body = ' { " _key " : " foo " , " value1 " : " bar " } ' ; <nl> / / / <nl> - / / / var response = logCurlRequest ( ' POST ' , " / _api / import ? collection = " + cn , <nl> - / / / body ) ; <nl> + / / / var response = logCurlRequest ( ' POST ' , " / _api / import ? collection = " + cn , body ) ; <nl> / / / <nl> / / / assert ( response . code = = = 400 ) ; <nl> / / / <nl> mmm a / arangod / RestHandler / RestJobHandler . cpp <nl> ppp b / arangod / RestHandler / RestJobHandler . cpp <nl> HttpHandler : : status_t RestJobHandler : : execute ( ) { <nl> / / / @ EXAMPLE_ARANGOSH_RUN { JSF_job_fetch_result_04 } <nl> / / / var url = " / _api / collection " ; <nl> / / / var headers = { ' x - arango - async ' : ' store ' } ; <nl> - / / / var response = logCurlRequest ( ' PUT ' , url , { " name " : <nl> - / / / " this name is invalid " } , <nl> - / / / headers ) ; <nl> + / / / var response = logCurlRequest ( ' PUT ' , url , { " name " : " this name is invalid " } , headers ) ; <nl> / / / <nl> / / / assert ( response . code = = = 202 ) ; <nl> / / / logRawResponse ( response ) ; <nl> void RestJobHandler : : putJob ( ) { <nl> / / / var url = " / _api / cursor " ; <nl> / / / var headers = { ' x - arango - async ' : ' store ' } ; <nl> / / / var postData = { " query " : <nl> - / / / " FOR i IN 1 . . 10 FOR j IN 1 . . 10 LET x = sleep ( 1 . 0 ) " + <nl> - / / / " FILTER i = = 5 & & j = = 5 RETURN 42 " } <nl> + / / / " FOR i IN 1 . . 10 FOR j IN 1 . . 10 LET x = sleep ( 1 . 0 ) FILTER i = = 5 & & j = = 5 RETURN 42 " } <nl> / / / <nl> / / / var response = logCurlRequest ( ' POST ' , url , postData , headers ) ; <nl> / / / assert ( response . code = = = 202 ) ; <nl> void RestJobHandler : : getJobByType ( std : : string const & type ) { <nl> / / / <nl> / / / @ RESTURLPARAM { type , string , required } <nl> / / / The type of jobs to delete . type can be : <nl> - / / / * * all * : Deletes all jobs results . Currently executing or queued async <nl> + / / / * * all * : Deletes all jobs results . Currently executing or queued async <nl> / / / jobs will not be stopped by this call . <nl> - / / / * * expired * : Deletes expired results . To determine the expiration status of <nl> - / / / a <nl> - / / / result , pass the stamp query parameter . stamp needs to be a UNIX <nl> - / / / timestamp , <nl> + / / / * * expired * : Deletes expired results . To determine the expiration status of a <nl> + / / / result , pass the stamp query parameter . stamp needs to be a UNIX timestamp , <nl> / / / and all async job results created at a lower timestamp will be deleted . <nl> / / / * * an actual job - id * : In this case , the call will remove the result of the <nl> / / / specified async job . If the job is currently executing or queued , it will <nl> mmm a / arangod / RestHandler / RestQueryCacheHandler . cpp <nl> ppp b / arangod / RestHandler / RestQueryCacheHandler . cpp <nl> HttpHandler : : status_t RestQueryCacheHandler : : execute ( ) { <nl> / / / @ startDocuBlock DeleteApiQueryCache <nl> / / / @ brief clears the AQL query cache <nl> / / / <nl> - / / / @ RESTHEADER { DELETE / _api / query - cache , Clears any results in the AQL query <nl> - / / / cache } <nl> + / / / @ RESTHEADER { DELETE / _api / query - cache , Clears any results in the AQL query cache } <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> / / / clears the query cache <nl> bool RestQueryCacheHandler : : clearCache ( ) { <nl> / / / @ startDocuBlock GetApiQueryCacheProperties <nl> / / / @ brief returns the global configuration for the AQL query cache <nl> / / / <nl> - / / / @ RESTHEADER { GET / _api / query - cache / properties , Returns the global properties <nl> - / / / for the AQL query cache } <nl> + / / / @ RESTHEADER { GET / _api / query - cache / properties , Returns the global properties for the AQL query cache } <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> / / / Returns the global AQL query cache configuration . The configuration is a <nl> / / / JSON object with the following properties : <nl> - / / / <nl> - / / / - * mode * : the mode the AQL query cache operates in . The mode is one of the <nl> - / / / following <nl> + / / / <nl> + / / / - * mode * : the mode the AQL query cache operates in . The mode is one of the following <nl> / / / values : * off * , * on * or * demand * . <nl> / / / <nl> - / / / - * maxResults * : the maximum number of query results that will be stored per <nl> - / / / database - specific <nl> + / / / - * maxResults * : the maximum number of query results that will be stored per database - specific <nl> / / / cache . <nl> / / / <nl> / / / @ RESTRETURNCODES <nl> bool RestQueryCacheHandler : : readProperties ( ) { <nl> / / / @ startDocuBlock PutApiQueryCacheProperties <nl> / / / @ brief changes the configuration for the AQL query cache <nl> / / / <nl> - / / / @ RESTHEADER { PUT / _api / query - cache / properties , Globally adjusts the AQL query <nl> - / / / result cache properties } <nl> + / / / @ RESTHEADER { PUT / _api / query - cache / properties , Globally adjusts the AQL query result cache properties } <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> / / / After the properties have been changed , the current set of properties will <nl> bool RestQueryCacheHandler : : readProperties ( ) { <nl> / / / Note : changing the properties may invalidate all results in the cache . <nl> / / / The global properties for AQL query cache . <nl> / / / The properties need to be passed in the attribute * properties * in the body <nl> - / / / of the HTTP request . * properties * needs to be a JSON object with the <nl> - / / / following <nl> + / / / of the HTTP request . * properties * needs to be a JSON object with the following <nl> / / / properties : <nl> / / / <nl> / / / @ RESTBODYPARAM { mode , string , required , string } <nl> - / / / the mode the AQL query cache should operate in . Possible values are * off * , <nl> - / / / * on * or * demand * . <nl> + / / / the mode the AQL query cache should operate in . Possible values are * off * , * on * or * demand * . <nl> / / / <nl> / / / @ RESTBODYPARAM { maxResults , integer , required , int64 } <nl> - / / / the maximum number of query results that will be stored per <nl> - / / / database - specific cache . <nl> + / / / the maximum number of query results that will be stored per database - specific cache . <nl> / / / <nl> / / / <nl> / / / @ RESTRETURNCODES <nl> mmm a / arangod / RestHandler / RestQueryHandler . cpp <nl> ppp b / arangod / RestHandler / RestQueryHandler . cpp <nl> HttpHandler : : status_t RestQueryHandler : : execute ( ) { <nl> / / / @ startDocuBlock GetApiQueryProperties <nl> / / / @ brief returns the configuration for the AQL query tracking <nl> / / / <nl> - / / / @ RESTHEADER { GET / _api / query / properties , Returns the properties for the AQL <nl> - / / / query tracking } <nl> + / / / @ RESTHEADER { GET / _api / query / properties , Returns the properties for the AQL query tracking } <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> / / / Returns the current query tracking configuration . The configuration is a <nl> HttpHandler : : status_t RestQueryHandler : : execute ( ) { <nl> / / / * false * , neither queries nor slow queries will be tracked . <nl> / / / <nl> / / / - * trackSlowQueries * : if set to * true * , then slow queries will be tracked <nl> - / / / in the list of slow queries if their runtime exceeds the value set in <nl> - / / / * slowQueryThreshold * . In order for slow queries to be tracked , the <nl> - / / / * enabled * <nl> + / / / in the list of slow queries if their runtime exceeds the value set in <nl> + / / / * slowQueryThreshold * . In order for slow queries to be tracked , the * enabled * <nl> / / / property must also be set to * true * . <nl> / / / <nl> / / / - * maxSlowQueries * : the maximum number of slow queries to keep in the list <nl> HttpHandler : : status_t RestQueryHandler : : execute ( ) { <nl> / / / The value for * slowQueryThreshold * is specified in seconds . <nl> / / / <nl> / / / - * maxQueryStringLength * : the maximum query string length to keep in the <nl> - / / / list of queries . Query strings can have arbitrary lengths , and this <nl> - / / / property <nl> + / / / list of queries . Query strings can have arbitrary lengths , and this property <nl> / / / can be used to save memory in case very long query strings are used . The <nl> / / / value is specified in bytes . <nl> / / / <nl> bool RestQueryHandler : : readQueryProperties ( ) { <nl> / / / @ startDocuBlock GetApiQueryCurrent <nl> / / / @ brief returns a list of currently running AQL queries <nl> / / / <nl> - / / / @ RESTHEADER { GET / _api / query / current , Returns the currently running AQL <nl> - / / / queries } <nl> + / / / @ RESTHEADER { GET / _api / query / current , Returns the currently running AQL queries } <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> - / / / Returns an array containing the AQL queries currently running in the <nl> - / / / selected <nl> + / / / Returns an array containing the AQL queries currently running in the selected <nl> / / / database . Each query is a JSON object with the following attributes : <nl> / / / <nl> / / / - * id * : the query ' s id <nl> bool RestQueryHandler : : deleteQuerySlow ( ) { <nl> / / / @ RESTRETURNCODES <nl> / / / <nl> / / / @ RESTRETURNCODE { 200 } <nl> - / / / The server will respond with * HTTP 200 * when the query was still running <nl> - / / / when <nl> + / / / The server will respond with * HTTP 200 * when the query was still running when <nl> / / / the kill request was executed and the query ' s kill flag was set . <nl> / / / <nl> / / / @ RESTRETURNCODE { 400 } <nl> bool RestQueryHandler : : deleteQuery ( ) { <nl> / / / @ startDocuBlock PutApiQueryProperties <nl> / / / @ brief changes the configuration for the AQL query tracking <nl> / / / <nl> - / / / @ RESTHEADER { PUT / _api / query / properties , Changes the properties for the AQL <nl> - / / / query tracking } <nl> + / / / @ RESTHEADER { PUT / _api / query / properties , Changes the properties for the AQL query tracking } <nl> / / / <nl> / / / @ RESTBODYPARAM { enabled , boolean , required , } <nl> / / / If set to * true * , then queries will be tracked . If set to <nl> bool RestQueryHandler : : replaceProperties ( ) { <nl> / / / <nl> / / / @ EXAMPLE_ARANGOSH_RUN { RestQueryValid } <nl> / / / var url = " / _api / query " ; <nl> - / / / var body = ' { " query " : ' + <nl> - / / / ' " FOR p IN products FILTER p . name = = @ name LIMIT 2 RETURN p . n " } ' ; <nl> + / / / var body = ' { " query " : " FOR p IN products FILTER p . name = = @ name LIMIT 2 RETURN p . n " } ' ; <nl> / / / <nl> / / / var response = logCurlRequest ( ' POST ' , url , body ) ; <nl> / / / <nl> bool RestQueryHandler : : replaceProperties ( ) { <nl> / / / <nl> / / / @ EXAMPLE_ARANGOSH_RUN { RestQueryInvalid } <nl> / / / var url = " / _api / query " ; <nl> - / / / var body = ' { " query " : ' + <nl> - / / / ' " FOR p IN products FILTER p . name = @ name LIMIT 2 RETURN p . n " } ' ; <nl> + / / / var body = ' { " query " : " FOR p IN products FILTER p . name = @ name LIMIT 2 RETURN p . n " } ' ; <nl> / / / <nl> / / / var response = logCurlRequest ( ' POST ' , url , body ) ; <nl> / / / <nl> mmm a / arangod / RestHandler / RestReplicationHandler . cpp <nl> ppp b / arangod / RestHandler / RestReplicationHandler . cpp <nl> uint64_t RestReplicationHandler : : determineChunkSize ( ) const { <nl> / / / @ startDocuBlock JSF_get_api_replication_logger_return_state <nl> / / / @ brief returns the state of the replication logger <nl> / / / <nl> - / / / @ RESTHEADER { GET / _api / replication / logger - state , Return replication logger <nl> - / / / state } <nl> + / / / @ RESTHEADER { GET / _api / replication / logger - state , Return replication logger state } <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> / / / Returns the current state of the server ' s replication logger . The state will <nl> uint64_t RestReplicationHandler : : determineChunkSize ( ) const { <nl> / / / - * lastLogTick * : the tick value of the latest tick the logger has logged . <nl> / / / This value can be used for incremental fetching of log data . <nl> / / / <nl> - / / / - * totalEvents * : total number of events logged since the server was <nl> - / / / started . <nl> - / / / The value is not reset between multiple stops and re - starts of the <nl> - / / / logger . <nl> + / / / - * totalEvents * : total number of events logged since the server was started . <nl> + / / / The value is not reset between multiple stops and re - starts of the logger . <nl> / / / <nl> / / / - * time * : the current date and time on the logger server <nl> / / / <nl> uint64_t RestReplicationHandler : : determineChunkSize ( ) const { <nl> / / / <nl> / / / - * serverId * : the logger server ' s id <nl> / / / <nl> - / / / - * clients * : returns the last fetch status by replication clients connected <nl> - / / / to <nl> - / / / the logger . Each client is returned as a JSON object with the following <nl> - / / / attributes : <nl> + / / / - * clients * : returns the last fetch status by replication clients connected to <nl> + / / / the logger . Each client is returned as a JSON object with the following attributes : <nl> / / / <nl> / / / - * serverId * : server id of client <nl> / / / <nl> - / / / - * lastServedTick * : last tick value served to this client via the <nl> - / / / * logger - follow * API <nl> + / / / - * lastServedTick * : last tick value served to this client via the * logger - follow * API <nl> / / / <nl> - / / / - * time * : date and time when this client last called the * logger - follow * <nl> - / / / API <nl> + / / / - * time * : date and time when this client last called the * logger - follow * API <nl> / / / <nl> / / / @ RESTRETURNCODES <nl> / / / <nl> void RestReplicationHandler : : handleCommandLoggerState ( ) { <nl> / / / @ startDocuBlock JSF_get_api_replication_logger_tick_ranges <nl> / / / @ brief returns the tick value ranges available in the logfiles <nl> / / / <nl> - / / / @ RESTHEADER { GET / _api / replication / logger - tick - ranges , Return the tick ranges <nl> - / / / available in the WAL logfiles } <nl> + / / / @ RESTHEADER { GET / _api / replication / logger - tick - ranges , Return the tick ranges available in the WAL logfiles } <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> / / / Returns the currently available ranges of tick values for all currently <nl> void RestReplicationHandler : : handleCommandLoggerTickRanges ( ) { <nl> / / / @ startDocuBlock JSF_get_api_replication_logger_first_tick <nl> / / / @ brief Return the first available tick value from the server <nl> / / / <nl> - / / / @ RESTHEADER { GET / _api / replication / logger - first - tick , Returns the first <nl> - / / / available tick value } <nl> + / / / @ RESTHEADER { GET / _api / replication / logger - first - tick , Returns the first available tick value } <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> / / / Returns the first available tick value that can be served from the server ' s <nl> void RestReplicationHandler : : handleCommandLoggerFirstTick ( ) { <nl> / / / @ startDocuBlock JSF_delete_batch_replication <nl> / / / @ brief handle a dump batch command <nl> / / / <nl> - / / / @ RESTHEADER { DELETE / _api / replication / batch / { id } , Deletes an existing dump <nl> - / / / batch } <nl> + / / / @ RESTHEADER { DELETE / _api / replication / batch / { id } , Deletes an existing dump batch } <nl> / / / <nl> / / / * * Note * * : These calls are uninteresting to users . <nl> / / / <nl> void RestReplicationHandler : : handleTrampolineCoordinator ( ) { <nl> / / / state as the logger server . <nl> / / / <nl> / / / Clients can call this method repeatedly to incrementally fetch all changes <nl> - / / / from the logger server . In this case , they should provide the * from * value <nl> - / / / so <nl> + / / / from the logger server . In this case , they should provide the * from * value so <nl> / / / they will only get returned the log events since their last fetch . <nl> / / / <nl> - / / / When the * from * query parameter is not used , the logger server will return <nl> - / / / log <nl> + / / / When the * from * query parameter is not used , the logger server will return log <nl> / / / entries starting at the beginning of its replication log . When the * from * <nl> / / / parameter is used , the logger server will only return log entries which have <nl> - / / / higher tick values than the specified * from * value ( note : the log entry with <nl> - / / / a <nl> + / / / higher tick values than the specified * from * value ( note : the log entry with a <nl> / / / tick value equal to * from * will be excluded ) . Use the * from * value when <nl> / / / incrementally fetching log data . <nl> / / / <nl> - / / / The * to * query parameter can be used to optionally restrict the upper bound <nl> - / / / of <nl> - / / / the result to a certain tick value . If used , the result will contain only <nl> - / / / log events <nl> - / / / with tick values up to ( including ) * to * . In incremental fetching , there is <nl> - / / / no <nl> + / / / The * to * query parameter can be used to optionally restrict the upper bound of <nl> + / / / the result to a certain tick value . If used , the result will contain only log events <nl> + / / / with tick values up to ( including ) * to * . In incremental fetching , there is no <nl> / / / need to use the * to * parameter . It only makes sense in special situations , <nl> / / / when only parts of the change log are required . <nl> / / / <nl> - / / / The * chunkSize * query parameter can be used to control the size of the <nl> - / / / result . <nl> + / / / The * chunkSize * query parameter can be used to control the size of the result . <nl> / / / It must be specified in bytes . The * chunkSize * value will only be honored <nl> / / / approximately . Otherwise a too low * chunkSize * value could cause the server <nl> / / / to not be able to put just one log entry into the result and return it . <nl> - / / / Therefore , the * chunkSize * value will only be consulted after a log entry <nl> - / / / has <nl> + / / / Therefore , the * chunkSize * value will only be consulted after a log entry has <nl> / / / been written into the result . If the result size is then bigger than <nl> / / / * chunkSize * , the server will respond with as many log entries as there are <nl> - / / / in the response already . If the result size is still smaller than <nl> - / / / * chunkSize * , <nl> + / / / in the response already . If the result size is still smaller than * chunkSize * , <nl> / / / the server will try to return more data if there ' s more data left to return . <nl> / / / <nl> - / / / If * chunkSize * is not specified , some server - side default value will be <nl> - / / / used . <nl> + / / / If * chunkSize * is not specified , some server - side default value will be used . <nl> / / / <nl> / / / The * Content - Type * of the result is * application / x - arango - dump * . This is an <nl> / / / easy - to - process format , with all log events going onto separate lines in the <nl> void RestReplicationHandler : : handleTrampolineCoordinator ( ) { <nl> / / / <nl> / / / - * data * : the original document data <nl> / / / <nl> - / / / A more detailed description of the individual replication event types and <nl> - / / / their <nl> + / / / A more detailed description of the individual replication event types and their <nl> / / / data structures can be found in @ ref RefManualReplicationEventTypes . <nl> / / / <nl> / / / The response will also contain the following HTTP headers : <nl> / / / <nl> - / / / - * x - arango - replication - active * : whether or not the logger is active . <nl> - / / / Clients <nl> + / / / - * x - arango - replication - active * : whether or not the logger is active . Clients <nl> / / / can use this flag as an indication for their polling frequency . If the <nl> - / / / logger is not active and there are no more replication events available , <nl> - / / / it <nl> + / / / logger is not active and there are no more replication events available , it <nl> / / / might be sensible for a client to abort , or to go to sleep for a long time <nl> / / / and try again later to check whether the logger has been activated . <nl> / / / <nl> / / / - * x - arango - replication - lastincluded * : the tick value of the last included <nl> / / / value in the result . In incremental log fetching , this value can be used <nl> - / / / as the * from * value for the following request . * * Note * * that if the result <nl> - / / / is <nl> - / / / empty , the value will be * 0 * . This value should not be used as * from * <nl> - / / / value <nl> + / / / as the * from * value for the following request . * * Note * * that if the result is <nl> + / / / empty , the value will be * 0 * . This value should not be used as * from * value <nl> / / / by clients in the next request ( otherwise the server would return the log <nl> / / / events from the start of the log again ) . <nl> / / / <nl> / / / - * x - arango - replication - lasttick * : the last tick value the logger server has <nl> / / / logged ( not necessarily included in the result ) . By comparing the the last <nl> - / / / tick and last included tick values , clients have an approximate indication <nl> - / / / of <nl> + / / / tick and last included tick values , clients have an approximate indication of <nl> / / / how many events there are still left to fetch . <nl> / / / <nl> / / / - * x - arango - replication - checkmore * : whether or not there already exists more <nl> - / / / log data which the client could fetch immediately . If there is more log <nl> - / / / data <nl> - / / / available , the client could call * logger - follow * again with an adjusted <nl> - / / / * from * <nl> + / / / log data which the client could fetch immediately . If there is more log data <nl> + / / / available , the client could call * logger - follow * again with an adjusted * from * <nl> / / / value to fetch remaining log entries until there are no more . <nl> / / / <nl> / / / If there isn ' t any more log data to fetch , the client might decide to go <nl> void RestReplicationHandler : : handleTrampolineCoordinator ( ) { <nl> / / / <nl> / / / @ RESTRETURNCODE { 200 } <nl> / / / is returned if the request was executed successfully , and there are log <nl> - / / / events available for the requested range . The response body will not be <nl> - / / / empty <nl> + / / / events available for the requested range . The response body will not be empty <nl> / / / in this case . <nl> / / / <nl> / / / @ RESTRETURNCODE { 204 } <nl> void RestReplicationHandler : : handleTrampolineCoordinator ( ) { <nl> / / / <nl> / / / db . _create ( " products " ) ; <nl> / / / db . products . save ( { " _key " : " p1 " , " name " : " flux compensator " } ) ; <nl> - / / / db . products . save ( { " _key " : " p2 " , " name " : " hybrid hovercraft " , " hp " : <nl> - / / / 5100 } ) ; <nl> + / / / db . products . save ( { " _key " : " p2 " , " name " : " hybrid hovercraft " , " hp " : 5100 } ) ; <nl> / / / db . products . remove ( " p1 " ) ; <nl> / / / db . products . update ( " p2 " , { " name " : " broken hovercraft " } ) ; <nl> / / / db . products . drop ( ) ; <nl> void RestReplicationHandler : : handleTrampolineCoordinator ( ) { <nl> / / / <nl> / / / db . _create ( " products " ) ; <nl> / / / db . products . save ( { " _key " : " p1 " , " name " : " flux compensator " } ) ; <nl> - / / / db . products . save ( { " _key " : " p2 " , " name " : " hybrid hovercraft " , " hp " : <nl> - / / / 5100 } ) ; <nl> + / / / db . products . save ( { " _key " : " p2 " , " name " : " hybrid hovercraft " , " hp " : 5100 } ) ; <nl> / / / db . products . remove ( " p1 " ) ; <nl> / / / db . products . update ( " p2 " , { " name " : " broken hovercraft " } ) ; <nl> / / / db . products . drop ( ) ; <nl> / / / <nl> / / / require ( " internal " ) . wait ( 1 ) ; <nl> - / / / var url = " / _api / replication / logger - follow ? from = " + lastTick + <nl> - / / / " & chunkSize = 400 " ; <nl> + / / / var url = " / _api / replication / logger - follow ? from = " + lastTick + " & chunkSize = 400 " ; <nl> / / / var response = logCurlRequest ( ' GET ' , url ) ; <nl> / / / <nl> / / / assert ( response . code = = = 200 ) ; <nl> void RestReplicationHandler : : handleCommandDetermineOpenTransactions ( ) { <nl> / / / @ startDocuBlock JSF_put_api_replication_inventory <nl> / / / @ brief Returns an overview of collections and their indexes <nl> / / / <nl> - / / / @ RESTHEADER { GET / _api / replication / inventory , Return inventory of collections <nl> - / / / and indexes } <nl> + / / / @ RESTHEADER { GET / _api / replication / inventory , Return inventory of collections and indexes } <nl> / / / <nl> / / / @ RESTQUERYPARAMETERS <nl> / / / <nl> void RestReplicationHandler : : handleCommandDetermineOpenTransactions ( ) { <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> / / / Returns the array of collections and indexes available on the server . This <nl> - / / / array can be used by replication clients to initiate an initial sync with <nl> - / / / the <nl> + / / / array can be used by replication clients to initiate an initial sync with the <nl> / / / server . <nl> / / / <nl> - / / / The response will contain a JSON object with the * collection * and * state * <nl> - / / / and <nl> + / / / The response will contain a JSON object with the * collection * and * state * and <nl> / / / * tick * attributes . <nl> / / / <nl> / / / * collections * is a array of collections with the following sub - attributes : <nl> / / / <nl> / / / - * parameters * : the collection properties <nl> / / / <nl> - / / / - * indexes * : a array of the indexes of a the collection . Primary indexes and <nl> - / / / edges indexes <nl> + / / / - * indexes * : a array of the indexes of a the collection . Primary indexes and edges indexes <nl> / / / are not included in this array . <nl> / / / <nl> - / / / The * state * attribute contains the current state of the replication logger . <nl> - / / / It <nl> + / / / The * state * attribute contains the current state of the replication logger . It <nl> / / / contains the following sub - attributes : <nl> / / / <nl> - / / / - * running * : whether or not the replication logger is currently active . <nl> - / / / Note : <nl> + / / / - * running * : whether or not the replication logger is currently active . Note : <nl> / / / since ArangoDB 2 . 2 , the value will always be * true * <nl> / / / <nl> - / / / - * lastLogTick * : the value of the last tick the replication logger has <nl> - / / / written <nl> + / / / - * lastLogTick * : the value of the last tick the replication logger has written <nl> / / / <nl> / / / - * time * : the current time on the server <nl> / / / <nl> - / / / Replication clients should note the * lastLogTick * value returned . They can <nl> - / / / then <nl> - / / / fetch collections ' data using the dump method up to the value of <nl> - / / / lastLogTick , and <nl> + / / / Replication clients should note the * lastLogTick * value returned . They can then <nl> + / / / fetch collections ' data using the dump method up to the value of lastLogTick , and <nl> / / / query the continuous replication log for log events after this tick value . <nl> / / / <nl> / / / To create a full copy of the collections on the server , a replication client <nl> / / / can execute these steps : <nl> / / / <nl> - / / / - call the * / inventory * API method . This returns the * lastLogTick * value and <nl> - / / / the <nl> + / / / - call the * / inventory * API method . This returns the * lastLogTick * value and the <nl> / / / array of collections and indexes from the server . <nl> / / / <nl> - / / / - for each collection returned by * / inventory * , create the collection <nl> - / / / locally and <nl> - / / / call * / dump * to stream the collection data to the client , up to the value <nl> - / / / of <nl> + / / / - for each collection returned by * / inventory * , create the collection locally and <nl> + / / / call * / dump * to stream the collection data to the client , up to the value of <nl> / / / * lastLogTick * . <nl> - / / / After that , the client can create the indexes on the collections as they <nl> - / / / were <nl> + / / / After that , the client can create the indexes on the collections as they were <nl> / / / reported by * / inventory * . <nl> / / / <nl> - / / / If the clients wants to continuously stream replication log events from the <nl> - / / / logger <nl> + / / / If the clients wants to continuously stream replication log events from the logger <nl> / / / server , the following additional steps need to be carried out : <nl> / / / <nl> - / / / - the client should call * / logger - follow * initially to fetch the first batch <nl> - / / / of <nl> - / / / replication events that were logged after the client ' s call to <nl> - / / / * / inventory * . <nl> - / / / <nl> - / / / The call to * / logger - follow * should use a * from * parameter with the value <nl> - / / / of the <nl> - / / / * lastLogTick * as reported by * / inventory * . The call to * / logger - follow * <nl> - / / / will return the <nl> - / / / * x - arango - replication - lastincluded * which will contain the last tick value <nl> - / / / included <nl> + / / / - the client should call * / logger - follow * initially to fetch the first batch of <nl> + / / / replication events that were logged after the client ' s call to * / inventory * . <nl> + / / / <nl> + / / / The call to * / logger - follow * should use a * from * parameter with the value of the <nl> + / / / * lastLogTick * as reported by * / inventory * . The call to * / logger - follow * will return the <nl> + / / / * x - arango - replication - lastincluded * which will contain the last tick value included <nl> / / / in the response . <nl> / / / <nl> - / / / - the client can then continuously call * / logger - follow * to incrementally <nl> - / / / fetch new <nl> + / / / - the client can then continuously call * / logger - follow * to incrementally fetch new <nl> / / / replication events that occurred after the last transfer . <nl> / / / <nl> - / / / Calls should use a * from * parameter with the value of the <nl> - / / / * x - arango - replication - lastincluded * <nl> - / / / header of the previous response . If there are no more replication events , <nl> - / / / the <nl> - / / / response will be empty and clients can go to sleep for a while and try <nl> - / / / again <nl> + / / / Calls should use a * from * parameter with the value of the * x - arango - replication - lastincluded * <nl> + / / / header of the previous response . If there are no more replication events , the <nl> + / / / response will be empty and clients can go to sleep for a while and try again <nl> / / / later . <nl> / / / <nl> / / / * * Note * * : on a coordinator , this request must have the query parameter <nl> void RestReplicationHandler : : handleCommandInventory ( ) { <nl> / / / @ startDocuBlock JSF_get_api_replication_cluster_inventory <nl> / / / @ brief returs an overview of collections and indexes in a cluster <nl> / / / <nl> - / / / @ RESTHEADER { GET / _api / replication / clusterInventory , Return cluster inventory <nl> - / / / of collections and indexes } <nl> + / / / @ RESTHEADER { GET / _api / replication / clusterInventory , Return cluster inventory of collections and indexes } <nl> / / / <nl> / / / @ RESTQUERYPARAMETERS <nl> / / / <nl> void RestReplicationHandler : : handleCommandRemoveKeys ( ) { <nl> / / / Produce an error when dumped edges refer to now - unknown collections . <nl> / / / <nl> / / / @ RESTQUERYPARAM { ticks , boolean , optional } <nl> - / / / Whether or not to include tick values in the dump . The default value is <nl> - / / / * true * . <nl> + / / / Whether or not to include tick values in the dump . The default value is * true * . <nl> / / / <nl> / / / @ RESTQUERYPARAM { flush , boolean , optional } <nl> / / / Whether or not to flush the WAL before dumping . The default value is * true * . <nl> void RestReplicationHandler : : handleCommandRemoveKeys ( ) { <nl> / / / @ RESTDESCRIPTION <nl> / / / Returns the data from the collection for the requested range . <nl> / / / <nl> - / / / When the * from * query parameter is not used , collection events are returned <nl> - / / / from <nl> - / / / the beginning . When the * from * parameter is used , the result will only <nl> - / / / contain <nl> - / / / collection entries which have higher tick values than the specified * from * <nl> - / / / value <nl> + / / / When the * from * query parameter is not used , collection events are returned from <nl> + / / / the beginning . When the * from * parameter is used , the result will only contain <nl> + / / / collection entries which have higher tick values than the specified * from * value <nl> / / / ( note : the log entry with a tick value equal to * from * will be excluded ) . <nl> / / / <nl> - / / / The * to * query parameter can be used to optionally restrict the upper bound <nl> - / / / of <nl> + / / / The * to * query parameter can be used to optionally restrict the upper bound of <nl> / / / the result to a certain tick value . If used , the result will only contain <nl> / / / collection entries with tick values up to ( including ) * to * . <nl> / / / <nl> - / / / The * chunkSize * query parameter can be used to control the size of the <nl> - / / / result . <nl> + / / / The * chunkSize * query parameter can be used to control the size of the result . <nl> / / / It must be specified in bytes . The * chunkSize * value will only be honored <nl> / / / approximately . Otherwise a too low * chunkSize * value could cause the server <nl> / / / to not be able to put just one entry into the result and return it . <nl> / / / Therefore , the * chunkSize * value will only be consulted after an entry has <nl> / / / been written into the result . If the result size is then bigger than <nl> / / / * chunkSize * , the server will respond with as many entries as there are <nl> - / / / in the response already . If the result size is still smaller than <nl> - / / / * chunkSize * , <nl> + / / / in the response already . If the result size is still smaller than * chunkSize * , <nl> / / / the server will try to return more data if there ' s more data left to return . <nl> / / / <nl> - / / / If * chunkSize * is not specified , some server - side default value will be <nl> - / / / used . <nl> + / / / If * chunkSize * is not specified , some server - side default value will be used . <nl> / / / <nl> / / / The * Content - Type * of the result is * application / x - arango - dump * . This is an <nl> / / / easy - to - process format , with all entries going onto separate lines in the <nl> void RestReplicationHandler : : handleCommandRemoveKeys ( ) { <nl> / / / <nl> / / / - * tick * : the operation ' s tick attribute <nl> / / / <nl> - / / / - * key * : the key of the document / edge or the key used in the deletion <nl> - / / / operation <nl> + / / / - * key * : the key of the document / edge or the key used in the deletion operation <nl> / / / <nl> / / / - * rev * : the revision id of the document / edge or the deletion operation <nl> / / / <nl> void RestReplicationHandler : : handleCommandRemoveKeys ( ) { <nl> / / / <nl> / / / - 2302 : document / edge deletion <nl> / / / <nl> - / / / * * Note * * : there will be no distinction between inserts and updates when <nl> - / / / calling this method . <nl> + / / / * * Note * * : there will be no distinction between inserts and updates when calling this method . <nl> / / / <nl> / / / @ RESTRETURNCODES <nl> / / / <nl> / / / @ RESTRETURNCODE { 200 } <nl> - / / / is returned if the request was executed successfully and data was returned . <nl> - / / / The header <nl> - / / / ` x - arango - replication - lastincluded ` is set to the tick of the last document <nl> - / / / returned . <nl> + / / / is returned if the request was executed successfully and data was returned . The header <nl> + / / / ` x - arango - replication - lastincluded ` is set to the tick of the last document returned . <nl> / / / <nl> / / / @ RESTRETURNCODE { 204 } <nl> - / / / is returned if the request was executed successfully , but there was no <nl> - / / / content available . <nl> + / / / is returned if the request was executed successfully , but there was no content available . <nl> / / / The header ` x - arango - replication - lastincluded ` is ` 0 ` in this case . <nl> / / / <nl> / / / @ RESTRETURNCODE { 400 } <nl> void RestReplicationHandler : : handleCommandDump ( ) { <nl> / / / @ startDocuBlock JSF_put_api_replication_makeSlave <nl> / / / @ brief Changes role to slave <nl> / / / <nl> - / / / @ RESTHEADER { PUT / _api / replication / make - slave , Turn the server into a slave <nl> - / / / of another } <nl> + / / / @ RESTHEADER { PUT / _api / replication / make - slave , Turn the server into a slave of another } <nl> / / / <nl> / / / @ RESTBODYPARAM { endpoint , string , required , string } <nl> / / / the master endpoint to connect to ( e . g . " tcp : / / 192 . 168 . 173 . 13 : 8529 " ) . <nl> void RestReplicationHandler : : handleCommandDump ( ) { <nl> / / / <nl> / / / @ RESTBODYPARAM { autoResyncRetries , integer , optional , int64 } <nl> / / / number of resynchronization retries that will be performed in a row when <nl> - / / / automatic resynchronization is enabled and kicks in . Setting this to * 0 * <nl> - / / / will <nl> + / / / automatic resynchronization is enabled and kicks in . Setting this to * 0 * will <nl> / / / effectively disable * autoResync * . Setting it to some other value will limit <nl> - / / / the number of retries that are performed . This helps preventing endless <nl> - / / / retries <nl> + / / / the number of retries that are performed . This helps preventing endless retries <nl> / / / in case resynchronizations always fail . <nl> / / / <nl> / / / @ RESTBODYPARAM { initialSyncMaxWaitTime , integer , optional , int64 } <nl> / / / the maximum wait time ( in seconds ) that the initial synchronization will <nl> / / / wait for a response from the master when fetching initial collection data . <nl> - / / / This wait time can be used to control after what time the initial <nl> - / / / synchronization <nl> + / / / This wait time can be used to control after what time the initial synchronization <nl> / / / will give up waiting for a response and fail . This value is relevant even <nl> / / / for continuous replication when * autoResync * is set to * true * because this <nl> / / / may re - start the initial synchronization when the master cannot provide <nl> void RestReplicationHandler : : handleCommandDump ( ) { <nl> / / / @ RESTBODYPARAM { idleMaxWaitTime , integer , optional , int64 } <nl> / / / the maximum wait time ( in seconds ) that the applier will intentionally idle <nl> / / / before fetching more log data from the master in case the master has <nl> - / / / already sent all its log data and there have been previous log fetch <nl> - / / / attempts <nl> + / / / already sent all its log data and there have been previous log fetch attempts <nl> / / / that resulted in no more log data . This wait time can be used to control the <nl> / / / maximum frequency with which the replication applier sends HTTP log fetch <nl> / / / requests to the master in case there is no write activity on the master for <nl> void RestReplicationHandler : : handleCommandDump ( ) { <nl> / / / at start of its continuous replication if the start tick from the dump phase <nl> / / / is still present on the master . If not , then there would be data loss . If <nl> / / / * requireFromPresent * is * true * , the replication applier will abort with an <nl> - / / / appropriate error message . If set to * false * , then the replication applier <nl> - / / / will <nl> + / / / appropriate error message . If set to * false * , then the replication applier will <nl> / / / still start , and ignore the data loss . <nl> / / / <nl> / / / @ RESTBODYPARAM { verbose , boolean , optional , } <nl> void RestReplicationHandler : : handleCommandDump ( ) { <nl> / / / problems only . <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> - / / / Starts a full data synchronization from a remote endpoint into the local <nl> - / / / ArangoDB <nl> + / / / Starts a full data synchronization from a remote endpoint into the local ArangoDB <nl> / / / database and afterwards starts the continuous replication . <nl> / / / The operation works on a per - database level . <nl> / / / <nl> / / / All local database data will be removed prior to the synchronization . <nl> / / / <nl> - / / / In case of success , the body of the response is a JSON object with the <nl> - / / / following <nl> + / / / In case of success , the body of the response is a JSON object with the following <nl> / / / attributes : <nl> / / / <nl> / / / - * state * : a JSON object with the following sub - attributes : <nl> void RestReplicationHandler : : handleCommandDump ( ) { <nl> / / / replication log the applier has processed . <nl> / / / <nl> / / / Regularly , the last applied and last processed tick values should be <nl> - / / / identical . For transactional operations , the replication applier will <nl> - / / / first <nl> + / / / identical . For transactional operations , the replication applier will first <nl> / / / process incoming log events before applying them , so the processed tick <nl> / / / value might be higher than the applied tick value . This will be the case <nl> / / / until the applier encounters the * transaction commit * log event for the <nl> void RestReplicationHandler : : handleCommandDump ( ) { <nl> / / / <nl> / / / - * time * : the time on the applier server . <nl> / / / <nl> - / / / - * totalRequests * : the total number of requests the applier has made to <nl> - / / / the <nl> + / / / - * totalRequests * : the total number of requests the applier has made to the <nl> / / / endpoint . <nl> / / / <nl> - / / / - * totalFailedConnects * : the total number of failed connection attempts <nl> - / / / the <nl> + / / / - * totalFailedConnects * : the total number of failed connection attempts the <nl> / / / applier has made . <nl> / / / <nl> / / / - * totalEvents * : the total number of log events the applier has processed . <nl> / / / <nl> - / / / - * totalOperationsExcluded * : the total number of log events excluded <nl> - / / / because <nl> + / / / - * totalOperationsExcluded * : the total number of log events excluded because <nl> / / / of * restrictCollections * . <nl> / / / <nl> - / / / - * progress * : a JSON object with details about the replication applier <nl> - / / / progress . <nl> + / / / - * progress * : a JSON object with details about the replication applier progress . <nl> / / / It contains the following sub - attributes if there is progress to report : <nl> / / / <nl> / / / - * message * : a textual description of the progress <nl> void RestReplicationHandler : : handleCommandDump ( ) { <nl> / / / <nl> / / / - * failedConnects * : the current number of failed connection attempts <nl> / / / <nl> - / / / - * lastError * : a JSON object with details about the last error that <nl> - / / / happened on <nl> - / / / the applier . It contains the following sub - attributes if there was an <nl> - / / / error : <nl> + / / / - * lastError * : a JSON object with details about the last error that happened on <nl> + / / / the applier . It contains the following sub - attributes if there was an error : <nl> / / / <nl> / / / - * errorNum * : a numerical error code <nl> / / / <nl> void RestReplicationHandler : : handleCommandDump ( ) { <nl> / / / - * endpoint * : the endpoint the applier is connected to ( if applier is <nl> / / / active ) or will connect to ( if applier is currently inactive ) <nl> / / / <nl> - / / / - * database * : the name of the database the applier is connected to ( if <nl> - / / / applier is <nl> + / / / - * database * : the name of the database the applier is connected to ( if applier is <nl> / / / active ) or will connect to ( if applier is currently inactive ) <nl> / / / <nl> / / / WARNING : calling this method will sychronize data from the collections found <nl> void RestReplicationHandler : : handleCommandDump ( ) { <nl> / / / Use with caution ! <nl> / / / <nl> / / / Please also keep in mind that this command may take a long time to complete <nl> - / / / and return . This is because it will first do a full data synchronization <nl> - / / / with <nl> + / / / and return . This is because it will first do a full data synchronization with <nl> / / / the master , which will take time roughly proportional to the amount of data . <nl> / / / <nl> / / / * * Note * * : this method is not supported on a coordinator in a cluster . <nl> void RestReplicationHandler : : handleCommandMakeSlave ( ) { <nl> / / / @ startDocuBlock JSF_put_api_replication_synchronize <nl> / / / @ brief start a replication <nl> / / / <nl> - / / / @ RESTHEADER { PUT / _api / replication / sync , Synchronize data from a remote <nl> - / / / endpoint } <nl> + / / / @ RESTHEADER { PUT / _api / replication / sync , Synchronize data from a remote endpoint } <nl> / / / <nl> / / / @ RESTBODYPARAM { endpoint , string , required , string } <nl> / / / the master endpoint to connect to ( e . g . " tcp : / / 192 . 168 . 173 . 13 : 8529 " ) . <nl> void RestReplicationHandler : : handleCommandMakeSlave ( ) { <nl> / / / <nl> / / / @ RESTBODYPARAM { restrictCollections , array , optional , string } <nl> / / / an optional array of collections for use with <nl> - / / / * restrictType * . If * restrictType * is * include * , only the specified <nl> - / / / collections <nl> + / / / * restrictType * . If * restrictType * is * include * , only the specified collections <nl> / / / will be sychronised . If * restrictType * is * exclude * , all but the specified <nl> / / / collections will be synchronized . <nl> / / / <nl> / / / @ RESTBODYPARAM { initialSyncMaxWaitTime , integer , optional , int64 } <nl> / / / the maximum wait time ( in seconds ) that the initial synchronization will <nl> / / / wait for a response from the master when fetching initial collection data . <nl> - / / / This wait time can be used to control after what time the initial <nl> - / / / synchronization <nl> + / / / This wait time can be used to control after what time the initial synchronization <nl> / / / will give up waiting for a response and fail . <nl> / / / This value will be ignored if set to * 0 * . <nl> / / / <nl> void RestReplicationHandler : : handleCommandMakeSlave ( ) { <nl> / / / Starts a full data synchronization from a remote endpoint into the local <nl> / / / ArangoDB database . <nl> / / / <nl> - / / / The * sync * method can be used by replication clients to connect an ArangoDB <nl> - / / / database <nl> - / / / to a remote endpoint , fetch the remote list of collections and indexes , and <nl> - / / / collection <nl> - / / / data . It will thus create a local backup of the state of data at the remote <nl> - / / / ArangoDB <nl> + / / / The * sync * method can be used by replication clients to connect an ArangoDB database <nl> + / / / to a remote endpoint , fetch the remote list of collections and indexes , and collection <nl> + / / / data . It will thus create a local backup of the state of data at the remote ArangoDB <nl> / / / database . * sync * works on a per - database level . <nl> / / / <nl> - / / / * sync * will first fetch the list of collections and indexes from the remote <nl> - / / / endpoint . <nl> - / / / It does so by calling the * inventory * API of the remote database . It will <nl> - / / / then purge <nl> - / / / data in the local ArangoDB database , and after start will transfer <nl> - / / / collection data <nl> - / / / from the remote database to the local ArangoDB database . It will extract <nl> - / / / data from the <nl> - / / / remote database by calling the remote database ' s * dump * API until all data <nl> - / / / are fetched . <nl> - / / / <nl> - / / / In case of success , the body of the response is a JSON object with the <nl> - / / / following <nl> + / / / * sync * will first fetch the list of collections and indexes from the remote endpoint . <nl> + / / / It does so by calling the * inventory * API of the remote database . It will then purge <nl> + / / / data in the local ArangoDB database , and after start will transfer collection data <nl> + / / / from the remote database to the local ArangoDB database . It will extract data from the <nl> + / / / remote database by calling the remote database ' s * dump * API until all data are fetched . <nl> + / / / <nl> + / / / In case of success , the body of the response is a JSON object with the following <nl> / / / attributes : <nl> / / / <nl> - / / / - * collections * : an array of collections that were transferred from the <nl> - / / / endpoint <nl> + / / / - * collections * : an array of collections that were transferred from the endpoint <nl> / / / <nl> / / / - * lastLogTick * : the last log tick on the endpoint at the time the transfer <nl> - / / / was started . Use this value as the * from * value when starting the <nl> - / / / continuous <nl> + / / / was started . Use this value as the * from * value when starting the continuous <nl> / / / synchronization later . <nl> / / / <nl> / / / WARNING : calling this method will sychronize data from the collections found <nl> void RestReplicationHandler : : handleCommandServerId ( ) { <nl> / / / @ startDocuBlock JSF_put_api_replication_applier <nl> / / / @ brief fetch the current replication configuration <nl> / / / <nl> - / / / @ RESTHEADER { GET / _api / replication / applier - config , Return configuration of <nl> - / / / replication applier } <nl> + / / / @ RESTHEADER { GET / _api / replication / applier - config , Return configuration of replication applier } <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> / / / Returns the configuration of the replication applier . <nl> void RestReplicationHandler : : handleCommandServerId ( ) { <nl> / / / The body of the response is a JSON object with the configuration . The <nl> / / / following attributes may be present in the configuration : <nl> / / / <nl> - / / / - * endpoint * : the logger server to connect to ( e . g . <nl> - / / / " tcp : / / 192 . 168 . 173 . 13 : 8529 " ) . <nl> + / / / - * endpoint * : the logger server to connect to ( e . g . " tcp : / / 192 . 168 . 173 . 13 : 8529 " ) . <nl> / / / <nl> / / / - * database * : the name of the database to connect to ( e . g . " _system " ) . <nl> / / / <nl> - / / / - * username * : an optional ArangoDB username to use when connecting to the <nl> - / / / endpoint . <nl> + / / / - * username * : an optional ArangoDB username to use when connecting to the endpoint . <nl> / / / <nl> / / / - * password * : the password to use when connecting to the endpoint . <nl> / / / <nl> void RestReplicationHandler : : handleCommandServerId ( ) { <nl> / / / will make in a row . If the applier cannot establish a connection to the <nl> / / / endpoint in this number of attempts , it will stop itself . <nl> / / / <nl> - / / / - * connectTimeout * : the timeout ( in seconds ) when attempting to connect to <nl> - / / / the <nl> + / / / - * connectTimeout * : the timeout ( in seconds ) when attempting to connect to the <nl> / / / endpoint . This value is used for each connection attempt . <nl> / / / <nl> - / / / - * requestTimeout * : the timeout ( in seconds ) for individual requests to the <nl> - / / / endpoint . <nl> + / / / - * requestTimeout * : the timeout ( in seconds ) for individual requests to the endpoint . <nl> / / / <nl> / / / - * chunkSize * : the requested maximum size for log transfer packets that <nl> / / / is used when the endpoint is contacted . <nl> void RestReplicationHandler : : handleCommandServerId ( ) { <nl> / / / - * adaptivePolling * : whether or not the replication applier will use <nl> / / / adaptive polling . <nl> / / / <nl> - / / / - * includeSystem * : whether or not system collection operations will be <nl> - / / / applied <nl> + / / / - * includeSystem * : whether or not system collection operations will be applied <nl> / / / <nl> / / / - * autoResync * : whether or not the slave should perform a full automatic <nl> / / / resynchronization with the master in case the master cannot serve log data <nl> void RestReplicationHandler : : handleCommandServerId ( ) { <nl> / / / value <nl> / / / can be found . <nl> / / / <nl> - / / / - * autoResyncRetries * : number of resynchronization retries that will be <nl> - / / / performed <nl> - / / / in a row when automatic resynchronization is enabled and kicks in . Setting <nl> - / / / this <nl> - / / / to * 0 * will effectively disable * autoResync * . Setting it to some other <nl> - / / / value <nl> - / / / will limit the number of retries that are performed . This helps preventing <nl> - / / / endless <nl> + / / / - * autoResyncRetries * : number of resynchronization retries that will be performed <nl> + / / / in a row when automatic resynchronization is enabled and kicks in . Setting this <nl> + / / / to * 0 * will effectively disable * autoResync * . Setting it to some other value <nl> + / / / will limit the number of retries that are performed . This helps preventing endless <nl> / / / retries in case resynchronizations always fail . <nl> / / / <nl> - / / / - * initialSyncMaxWaitTime * : the maximum wait time ( in seconds ) that the <nl> - / / / initial <nl> - / / / synchronization will wait for a response from the master when fetching <nl> - / / / initial <nl> + / / / - * initialSyncMaxWaitTime * : the maximum wait time ( in seconds ) that the initial <nl> + / / / synchronization will wait for a response from the master when fetching initial <nl> / / / collection data . <nl> - / / / This wait time can be used to control after what time the initial <nl> - / / / synchronization <nl> + / / / This wait time can be used to control after what time the initial synchronization <nl> / / / will give up waiting for a response and fail . This value is relevant even <nl> / / / for continuous replication when * autoResync * is set to * true * because this <nl> / / / may re - start the initial synchronization when the master cannot provide <nl> void RestReplicationHandler : : handleCommandServerId ( ) { <nl> / / / connection problems . <nl> / / / This value will be ignored if set to * 0 * . <nl> / / / <nl> - / / / - * idleMinWaitTime * : the minimum wait time ( in seconds ) that the applier <nl> - / / / will <nl> + / / / - * idleMinWaitTime * : the minimum wait time ( in seconds ) that the applier will <nl> / / / intentionally idle before fetching more log data from the master in case <nl> / / / the master has already sent all its log data . This wait time can be used <nl> / / / to control the frequency with which the replication applier sends HTTP log <nl> - / / / fetch requests to the master in case there is no write activity on the <nl> - / / / master . <nl> + / / / fetch requests to the master in case there is no write activity on the master . <nl> / / / This value will be ignored if set to * 0 * . <nl> / / / <nl> - / / / - * idleMaxWaitTime * : the maximum wait time ( in seconds ) that the applier <nl> - / / / will <nl> - / / / intentionally idle before fetching more log data from the master in case <nl> - / / / the <nl> + / / / - * idleMaxWaitTime * : the maximum wait time ( in seconds ) that the applier will <nl> + / / / intentionally idle before fetching more log data from the master in case the <nl> / / / master has already sent all its log data and there have been previous log <nl> - / / / fetch attempts that resulted in no more log data . This wait time can be <nl> - / / / used <nl> - / / / to control the maximum frequency with which the replication applier sends <nl> - / / / HTTP <nl> + / / / fetch attempts that resulted in no more log data . This wait time can be used <nl> + / / / to control the maximum frequency with which the replication applier sends HTTP <nl> / / / log fetch requests to the master in case there is no write activity on the <nl> - / / / master for longer periods . This configuration value will only be used if <nl> - / / / the <nl> + / / / master for longer periods . This configuration value will only be used if the <nl> / / / option * adaptivePolling * is set to * true * . <nl> / / / This value will be ignored if set to * 0 * . <nl> / / / <nl> - / / / - * requireFromPresent * : if set to * true * , then the replication applier will <nl> - / / / check <nl> - / / / at start whether the start tick from which it starts or resumes <nl> - / / / replication is <nl> + / / / - * requireFromPresent * : if set to * true * , then the replication applier will check <nl> + / / / at start whether the start tick from which it starts or resumes replication is <nl> / / / still present on the master . If not , then there would be data loss . If <nl> / / / * requireFromPresent * is * true * , the replication applier will abort with an <nl> - / / / appropriate error message . If set to * false * , then the replication applier <nl> - / / / will <nl> + / / / appropriate error message . If set to * false * , then the replication applier will <nl> / / / still start , and ignore the data loss . <nl> / / / <nl> - / / / - * verbose * : if set to * true * , then a log line will be emitted for all <nl> - / / / operations <nl> + / / / - * verbose * : if set to * true * , then a log line will be emitted for all operations <nl> / / / performed by the replication applier . This should be used for debugging <nl> / / / replication <nl> / / / problems only . <nl> / / / <nl> / / / - * restrictType * : the configuration for * restrictCollections * <nl> / / / <nl> - / / / - * restrictCollections * : the optional array of collections to include or <nl> - / / / exclude , <nl> + / / / - * restrictCollections * : the optional array of collections to include or exclude , <nl> / / / based on the setting of * restrictType * <nl> / / / <nl> / / / @ RESTRETURNCODES <nl> void RestReplicationHandler : : handleCommandApplierGetConfig ( ) { <nl> / / / @ startDocuBlock JSF_put_api_replication_applier_adjust <nl> / / / @ brief set configuration values of an applier <nl> / / / <nl> - / / / @ RESTHEADER { PUT / _api / replication / applier - config , Adjust configuration of <nl> - / / / replication applier } <nl> + / / / @ RESTHEADER { PUT / _api / replication / applier - config , Adjust configuration of replication applier } <nl> / / / <nl> / / / @ RESTBODYPARAM { endpoint , string , required , string } <nl> - / / / the logger server to connect to ( e . g . " tcp : / / 192 . 168 . 173 . 13 : 8529 " ) . The <nl> - / / / endpoint must be specified . <nl> + / / / the logger server to connect to ( e . g . " tcp : / / 192 . 168 . 173 . 13 : 8529 " ) . The endpoint must be specified . <nl> / / / <nl> / / / @ RESTBODYPARAM { database , string , required , string } <nl> - / / / the name of the database on the endpoint . If not specified , defaults to the <nl> - / / / current local database name . <nl> + / / / the name of the database on the endpoint . If not specified , defaults to the current local database name . <nl> / / / <nl> / / / @ RESTBODYPARAM { username , string , optional , string } <nl> / / / an optional ArangoDB username to use when connecting to the endpoint . <nl> void RestReplicationHandler : : handleCommandApplierGetConfig ( ) { <nl> / / / This value will be ignored if set to * 0 * . <nl> / / / <nl> / / / @ RESTBODYPARAM { idleMaxWaitTime , integer , optional , int64 } <nl> - / / / the maximum wait time ( in seconds ) that the applier will intentionally idle <nl> - / / / before fetching more log data from the master in case the master has <nl> - / / / already sent all its log data and there have been previous log fetch <nl> - / / / attempts <nl> + / / / the maximum wait time ( in seconds ) that the applier will intentionally idle <nl> + / / / before fetching more log data from the master in case the master has <nl> + / / / already sent all its log data and there have been previous log fetch attempts <nl> / / / that resulted in no more log data . This wait time can be used to control the <nl> / / / maximum frequency with which the replication applier sends HTTP log fetch <nl> / / / requests to the master in case there is no write activity on the master for <nl> void RestReplicationHandler : : handleCommandApplierGetConfig ( ) { <nl> / / / <nl> / / / @ RESTBODYPARAM { requireFromPresent , boolean , required , } <nl> / / / if set to * true * , then the replication applier will check <nl> - / / / at start whether the start tick from which it starts or resumes replication <nl> - / / / is <nl> - / / / still present on the master . If not , then there would be data loss . If <nl> + / / / at start whether the start tick from which it starts or resumes replication is <nl> + / / / still present on the master . If not , then there would be data loss . If <nl> / / / * requireFromPresent * is * true * , the replication applier will abort with an <nl> - / / / appropriate error message . If set to * false * , then the replication applier <nl> - / / / will <nl> + / / / appropriate error message . If set to * false * , then the replication applier will <nl> / / / still start , and ignore the data loss . <nl> / / / <nl> / / / @ RESTBODYPARAM { verbose , boolean , required , } <nl> - / / / if set to * true * , then a log line will be emitted for all operations <nl> - / / / performed by the replication applier . This should be used for debugging <nl> - / / / replication <nl> + / / / if set to * true * , then a log line will be emitted for all operations <nl> + / / / performed by the replication applier . This should be used for debugging replication <nl> / / / problems only . <nl> / / / <nl> / / / @ RESTBODYPARAM { restrictType , string , required , string } <nl> - / / / the configuration for * restrictCollections * ; Has to be either * include * or <nl> - / / / * exclude * <nl> + / / / the configuration for * restrictCollections * ; Has to be either * include * or * exclude * <nl> / / / <nl> / / / @ RESTBODYPARAM { restrictCollections , array , optional , string } <nl> / / / the array of collections to include or exclude , <nl> void RestReplicationHandler : : handleCommandApplierGetConfig ( ) { <nl> / / / will be saved immediately but only become active with the next start of the <nl> / / / applier . <nl> / / / <nl> - / / / In case of success , the body of the response is a JSON object with the <nl> - / / / updated <nl> + / / / In case of success , the body of the response is a JSON object with the updated <nl> / / / configuration . <nl> / / / <nl> / / / @ RESTRETURNCODES <nl> void RestReplicationHandler : : handleCommandApplierSetConfig ( ) { <nl> / / / @ RESTQUERYPARAMETERS <nl> / / / <nl> / / / @ RESTQUERYPARAM { from , string , optional } <nl> - / / / The remote * lastLogTick * value from which to start applying . If not <nl> - / / / specified , <nl> + / / / The remote * lastLogTick * value from which to start applying . If not specified , <nl> / / / the last saved tick from the previous applier run is used . If there is no <nl> / / / previous applier state saved , the applier will start at the beginning of the <nl> / / / logger server ' s log . <nl> void RestReplicationHandler : : handleCommandApplierStop ( ) { <nl> / / / @ startDocuBlock JSF_get_api_replication_applier_state <nl> / / / @ brief output the current status of the replication <nl> / / / <nl> - / / / @ RESTHEADER { GET / _api / replication / applier - state , State of the replication <nl> - / / / applier } <nl> + / / / @ RESTHEADER { GET / _api / replication / applier - state , State of the replication applier } <nl> / / / <nl> / / / @ RESTDESCRIPTION <nl> / / / Returns the state of the replication applier , regardless of whether the <nl> void RestReplicationHandler : : handleCommandApplierStop ( ) { <nl> / / / replication log the applier has processed . <nl> / / / <nl> / / / Regularly , the last applied and last processed tick values should be <nl> - / / / identical . For transactional operations , the replication applier will <nl> - / / / first <nl> + / / / identical . For transactional operations , the replication applier will first <nl> / / / process incoming log events before applying them , so the processed tick <nl> / / / value might be higher than the applied tick value . This will be the case <nl> / / / until the applier encounters the * transaction commit * log event for the <nl> void RestReplicationHandler : : handleCommandApplierStop ( ) { <nl> / / / <nl> / / / - * time * : the time on the applier server . <nl> / / / <nl> - / / / - * totalRequests * : the total number of requests the applier has made to <nl> - / / / the <nl> + / / / - * totalRequests * : the total number of requests the applier has made to the <nl> / / / endpoint . <nl> / / / <nl> - / / / - * totalFailedConnects * : the total number of failed connection attempts <nl> - / / / the <nl> + / / / - * totalFailedConnects * : the total number of failed connection attempts the <nl> / / / applier has made . <nl> / / / <nl> / / / - * totalEvents * : the total number of log events the applier has processed . <nl> / / / <nl> - / / / - * totalOperationsExcluded * : the total number of log events excluded <nl> - / / / because <nl> + / / / - * totalOperationsExcluded * : the total number of log events excluded because <nl> / / / of * restrictCollections * . <nl> / / / <nl> - / / / - * progress * : a JSON object with details about the replication applier <nl> - / / / progress . <nl> + / / / - * progress * : a JSON object with details about the replication applier progress . <nl> / / / It contains the following sub - attributes if there is progress to report : <nl> / / / <nl> / / / - * message * : a textual description of the progress <nl> void RestReplicationHandler : : handleCommandApplierStop ( ) { <nl> / / / <nl> / / / - * failedConnects * : the current number of failed connection attempts <nl> / / / <nl> - / / / - * lastError * : a JSON object with details about the last error that <nl> - / / / happened on <nl> - / / / the applier . It contains the following sub - attributes if there was an <nl> - / / / error : <nl> + / / / - * lastError * : a JSON object with details about the last error that happened on <nl> + / / / the applier . It contains the following sub - attributes if there was an error : <nl> / / / <nl> / / / - * errorNum * : a numerical error code <nl> / / / <nl> void RestReplicationHandler : : handleCommandApplierStop ( ) { <nl> / / / - * endpoint * : the endpoint the applier is connected to ( if applier is <nl> / / / active ) or will connect to ( if applier is currently inactive ) <nl> / / / <nl> - / / / - * database * : the name of the database the applier is connected to ( if <nl> - / / / applier is <nl> + / / / - * database * : the name of the database the applier is connected to ( if applier is <nl> / / / active ) or will connect to ( if applier is currently inactive ) <nl> / / / <nl> / / / @ RESTRETURNCODES <nl> mmm a / arangod / RestHandler / RestSimpleHandler . cpp <nl> ppp b / arangod / RestHandler / RestSimpleHandler . cpp <nl> bool RestSimpleHandler : : wasCanceled ( ) { <nl> / / / <nl> / / / The body of the response contains a JSON object with information how many <nl> / / / documents were removed ( and how many were not ) . The * removed * attribute will <nl> - / / / contain the number of actually removed documents . The * ignored * attribute <nl> - / / / will contain the number of keys in the request for which no matching <nl> - / / / document <nl> + / / / contain the number of actually removed documents . The * ignored * attribute <nl> + / / / will contain the number of keys in the request for which no matching document <nl> / / / could be found . <nl> / / / <nl> / / / @ RESTRETURNCODES <nl> / / / <nl> / / / @ RESTRETURNCODE { 200 } <nl> - / / / is returned if the operation was carried out successfully . The number of <nl> - / / / removed <nl> + / / / is returned if the operation was carried out successfully . The number of removed <nl> / / / documents may still be 0 in this case if none of the specified document keys <nl> / / / were found in the collection . <nl> / / / <nl> bool RestSimpleHandler : : wasCanceled ( ) { <nl> / / / The response body contains an error document in this case . <nl> / / / <nl> / / / @ RESTRETURNCODE { 405 } <nl> - / / / is returned if the operation was called with a different HTTP METHOD than <nl> - / / / PUT . <nl> + / / / is returned if the operation was called with a different HTTP METHOD than PUT . <nl> / / / <nl> / / / @ EXAMPLES <nl> / / / <nl> void RestSimpleHandler : : removeByKeys ( VPackSlice const & slice ) { <nl> / / / The response body contains an error document in this case . <nl> / / / <nl> / / / @ RESTRETURNCODE { 405 } <nl> - / / / is returned if the operation was called with a different HTTP METHOD than <nl> - / / / PUT . <nl> + / / / is returned if the operation was called with a different HTTP METHOD than PUT . <nl> / / / <nl> / / / @ EXAMPLES <nl> / / / <nl> mmm a / arangod / RestHandler / RestSimpleQueryHandler . cpp <nl> ppp b / arangod / RestHandler / RestSimpleQueryHandler . cpp <nl> HttpHandler : : status_t RestSimpleQueryHandler : : execute ( ) { <nl> / / / - * limit * : The maximal amount of documents to return . The * skip * <nl> / / / is applied before the * limit * restriction . ( optional ) <nl> / / / <nl> - / / / Returns a cursor containing the result , see [ Http <nl> - / / / Cursor ] ( . . / HttpAqlQueryCursor / README . md ) for details . <nl> + / / / Returns a cursor containing the result , see [ Http Cursor ] ( . . / HttpAqlQueryCursor / README . md ) for details . <nl> / / / <nl> / / / @ RESTRETURNCODES <nl> / / / <nl> mmm a / arangod / V8Server / v8 - collection . cpp <nl> ppp b / arangod / V8Server / v8 - collection . cpp <nl> static TRI_doc_collection_info_t * GetFigures ( TRI_vocbase_col_t * collection ) { <nl> / / / ` collection . figures ( ) ` <nl> / / / <nl> / / / Returns an object containing statistics about the collection . <nl> - / / / * * Note * * : Retrieving the figures will always load the collection into <nl> + / / / * * Note * * : Retrieving the figures will always load the collection into <nl> / / / memory . <nl> / / / <nl> - / / / * * alive . count * : The number of currently active documents in all datafiles <nl> - / / / and <nl> + / / / * * alive . count * : The number of currently active documents in all datafiles and <nl> / / / journals of the collection . Documents that are contained in the <nl> / / / write - ahead log only are not reported in this figure . <nl> / / / * * alive . size * : The total size in bytes used by all active documents of the <nl> static TRI_doc_collection_info_t * GetFigures ( TRI_vocbase_col_t * collection ) { <nl> / / / deprecated and kept for compatibility reasons only . The value will always <nl> / / / be 0 since ArangoDB 2 . 0 and higher . <nl> / / / * * shapefiles . fileSize * : The total filesize of the shape files . This <nl> - / / / value is deprecated and kept for compatibility reasons only . The value <nl> - / / / will <nl> + / / / value is deprecated and kept for compatibility reasons only . The value will <nl> / / / always be 0 in ArangoDB 2 . 0 and higher . <nl> / / / * * shapes . count * : The total number of shapes used in the collection . <nl> - / / / This includes shapes that are not in use anymore . Shapes that are <nl> - / / / contained <nl> + / / / This includes shapes that are not in use anymore . Shapes that are contained <nl> / / / in the write - ahead log only are not reported in this figure . <nl> / / / * * shapes . size * : The total size of all shapes ( in bytes ) . This includes <nl> / / / shapes that are not in use anymore . Shapes that are contained in the <nl> / / / write - ahead log only are not reported in this figure . <nl> / / / * * attributes . count * : The total number of attributes used in the <nl> - / / / collection . Note : the value includes data of attributes that are not in <nl> - / / / use <nl> + / / / collection . Note : the value includes data of attributes that are not in use <nl> / / / anymore . Attributes that are contained in the write - ahead log only are <nl> / / / not reported in this figure . <nl> / / / * * attributes . size * : The total size of the attribute data ( in bytes ) . <nl> / / / Note : the value includes data of attributes that are not in use anymore . <nl> - / / / Attributes that are contained in the write - ahead log only are not <nl> + / / / Attributes that are contained in the write - ahead log only are not <nl> / / / reported in this figure . <nl> / / / * * indexes . count * : The total number of indexes defined for the <nl> / / / collection , including the pre - defined indexes ( e . g . primary index ) . <nl> static TRI_doc_collection_info_t * GetFigures ( TRI_vocbase_col_t * collection ) { <nl> / / / that JavaScript code currently holds . This information can be used for <nl> / / / debugging compaction and unload issues . <nl> / / / * * waitingFor * : An optional string value that contains information about <nl> - / / / which object type is at the head of the collection ' s cleanup queue . This <nl> + / / / which object type is at the head of the collection ' s cleanup queue . This <nl> / / / information can be used for debugging compaction and unload issues . <nl> - / / / * * compactionStatus . time * : The point in time the compaction for the <nl> - / / / collection <nl> + / / / * * compactionStatus . time * : The point in time the compaction for the collection <nl> / / / was last executed . This information can be used for debugging compaction <nl> / / / issues . <nl> - / / / * * compactionStatus . message * : The action that was performed when the <nl> - / / / compaction <nl> - / / / was last run for the collection . This information can be used for <nl> - / / / debugging <nl> + / / / * * compactionStatus . message * : The action that was performed when the compaction <nl> + / / / was last run for the collection . This information can be used for debugging <nl> / / / compaction issues . <nl> / / / <nl> / / / * * Note * * : collection data that are stored in the write - ahead log only are <nl> - / / / not reported in the results . When the write - ahead log is collected , <nl> - / / / documents <nl> - / / / might be added to journals and datafiles of the collection , which may modify <nl> - / / / the figures of the collection . Also note that ` waitingFor ` and <nl> - / / / ` compactionStatus ` <nl> + / / / not reported in the results . When the write - ahead log is collected , documents <nl> + / / / might be added to journals and datafiles of the collection , which may modify <nl> + / / / the figures of the collection . Also note that ` waitingFor ` and ` compactionStatus ` <nl> / / / may be empty when called on a coordinator in a cluster . <nl> / / / <nl> / / / Additionally , the filesizes of collection and index parameter JSON files are <nl> static TRI_doc_collection_info_t * GetFigures ( TRI_vocbase_col_t * collection ) { <nl> / / / <nl> / / / That means that the figures reported do not reflect the actual disk <nl> / / / usage of the collection with 100 % accuracy . The actual disk usage of <nl> - / / / a collection is normally slightly higher than the sum of the reported <nl> - / / / * fileSize * values . Still the sum of the * fileSize * values can still be <nl> + / / / a collection is normally slightly higher than the sum of the reported <nl> + / / / * fileSize * values . Still the sum of the * fileSize * values can still be <nl> / / / used as a lower bound approximation of the disk usage . <nl> / / / <nl> / / / @ EXAMPLES <nl> static void JS_RenameVocbaseCol ( <nl> / / / document is overwritten . <nl> / / / <nl> / / / ` collection . replace ( document , data , true , waitForSync ) ` or <nl> - / / / ` collection . replace ( document , data , overwrite : true , waitForSync : true or <nl> - / / / false ) ` <nl> + / / / ` collection . replace ( document , data , overwrite : true , waitForSync : true or false ) ` <nl> / / / <nl> / / / The optional * waitForSync * parameter can be used to force <nl> / / / synchronization of the document replacement operation to disk even in case <nl> static void JS_RotateVocbaseCol ( <nl> / / / @ startDocuBlock documentsCollectionUpdate <nl> / / / ` collection . update ( document , data , overwrite , keepNull , waitForSync ) ` or <nl> / / / ` collection . update ( document , data , <nl> - / / / overwrite : true or false , keepNull : true or false , waitForSync : true or <nl> - / / / false ) ` <nl> + / / / overwrite : true or false , keepNull : true or false , waitForSync : true or false ) ` <nl> / / / <nl> / / / Updates an existing * document * . The * document * must be a document in <nl> / / / the current collection . This document is then patched with the <nl> static void JS_CompletionsVocbase ( <nl> / / / deleted . <nl> / / / <nl> / / / ` db . _remove ( document , true , waitForSync ) ` or <nl> - / / / ` db . _remove ( document , { overwrite : true or false , waitForSync : true or <nl> - / / / false } ) ` <nl> + / / / ` db . _remove ( document , { overwrite : true or false , waitForSync : true or false } ) ` <nl> / / / <nl> / / / The optional * waitForSync * parameter can be used to force synchronization <nl> / / / of the document deletion operation to disk even in case that the <nl> mmm a / arangod / V8Server / v8 - query . cpp <nl> ppp b / arangod / V8Server / v8 - query . cpp <nl> static void FulltextQuery ( SingleCollectionReadOnlyTransaction & trx , <nl> / / / @ startDocuBlock collectionFulltext <nl> / / / ` collection . fulltext ( attribute , query ) ` <nl> / / / <nl> - / / / The * fulltext * simple query functions performs a fulltext search on the <nl> - / / / specified <nl> + / / / The * fulltext * simple query functions performs a fulltext search on the specified <nl> / / / * attribute * and the specified * query * . <nl> / / / <nl> / / / Details about the fulltext query syntax can be found below . <nl> / / / <nl> - / / / Note : the * fulltext * simple query function is * * deprecated * * as of ArangoDB <nl> - / / / 2 . 6 . <nl> + / / / Note : the * fulltext * simple query function is * * deprecated * * as of ArangoDB 2 . 6 . <nl> / / / The function may be removed in future versions of ArangoDB . The preferred <nl> - / / / way for executing fulltext queries is to use an AQL query using the <nl> - / / / * FULLTEXT * <nl> + / / / way for executing fulltext queries is to use an AQL query using the * FULLTEXT * <nl> / / / [ AQL function ] ( . . / Aql / FulltextFunctions . md ) as follows : <nl> / / / <nl> - / / / FOR doc IN FULLTEXT ( @ @ collection , @ attributeName , @ queryString , @ limit ) <nl> + / / / FOR doc IN FULLTEXT ( @ @ collection , @ attributeName , @ queryString , @ limit ) <nl> / / / RETURN doc <nl> / / / <nl> / / / @ EXAMPLES <nl> static void JS_WithinQuery ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> / / / <nl> / / / Looks up the documents in the specified collection using the array of keys <nl> / / / provided . All documents for which a matching key was specified in the * keys * <nl> - / / / array and that exist in the collection will be returned . <nl> - / / / Keys for which no document can be found in the underlying collection are <nl> - / / / ignored , <nl> + / / / array and that exist in the collection will be returned . <nl> + / / / Keys for which no document can be found in the underlying collection are ignored , <nl> / / / and no exception will be thrown for them . <nl> / / / <nl> / / / @ EXAMPLES <nl> mmm a / arangod / V8Server / v8 - vocbase . cpp <nl> ppp b / arangod / V8Server / v8 - vocbase . cpp <nl> static void JS_IsSystemDatabase ( <nl> / / / Changing the database might be disallowed in some contexts , for example <nl> / / / server - side actions ( including Foxx ) . <nl> / / / <nl> - / / / When performing this command from arangosh , the current credentials <nl> - / / / ( username <nl> + / / / When performing this command from arangosh , the current credentials ( username <nl> / / / and password ) will be re - used . These credentials might not be valid to <nl> / / / connect to the database specified by * name * . Additionally , the database <nl> / / / only be accessed from certain endpoints only . In this case , switching the <nl> static void CreateDatabaseCoordinator ( <nl> / / / object can contain the following attributes : <nl> / / / <nl> / / / * * username * : the user name as a string . This attribute is mandatory . <nl> - / / / * * passwd * : the user password as a string . If not specified , then it <nl> - / / / defaults <nl> + / / / * * passwd * : the user password as a string . If not specified , then it defaults <nl> / / / to the empty string . <nl> / / / * * active * : a boolean flag indicating whether the user account should be <nl> / / / active or not . The default value is * true * . <nl> static void CreateDatabaseCoordinator ( <nl> / / / with an empty string password . This ensures that the new database will be <nl> / / / accessible via HTTP after it is created . <nl> / / / <nl> - / / / You can create users in a database if no initial user is specified . Switch <nl> - / / / into the new database ( username and password must be identical to the <nl> - / / / current <nl> + / / / You can create users in a database if no initial user is specified . Switch <nl> + / / / into the new database ( username and password must be identical to the current <nl> / / / session ) and add or modify users with the following commands . <nl> / / / <nl> / / / ` ` ` js <nl> static void CreateDatabaseCoordinator ( <nl> / / / Alternatively , you can specify user data directly . For example : <nl> / / / <nl> / / / ` ` ` js <nl> - / / / db . _createDatabase ( " newDB " , [ ] , [ { username : " newUser " , passwd : " 123456 " , <nl> - / / / active : true } ] ) <nl> + / / / db . _createDatabase ( " newDB " , [ ] , [ { username : " newUser " , passwd : " 123456 " , active : true } ] ) <nl> / / / ` ` ` <nl> / / / <nl> / / / Those methods can only be used from within the * _system * database . <nl> mmm a / arangod / V8Server / v8 - vocindex . cpp <nl> ppp b / arangod / V8Server / v8 - vocindex . cpp <nl> static void CreateCollectionCoordinator ( <nl> / / / <nl> / / / * * sparse * * can be * true * or * false * . <nl> / / / <nl> - / / / For * hash * , and * skiplist * the sparsity can be controlled , * fulltext * and <nl> - / / / * geo * <nl> + / / / For * hash * , and * skiplist * the sparsity can be controlled , * fulltext * and * geo * <nl> / / / are [ sparse ] ( WhichIndex . md ) by definition . <nl> / / / <nl> / / / * * unique * * can be * true * or * false * and is supported by * hash * or * skiplist * <nl> static void CreateVocBase ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args , <nl> / / / target shard for documents . Documents are sent to shards based on the <nl> / / / values they have in their shard key attributes . The values of all shard <nl> / / / key attributes in a document are hashed , and the hash value is used to <nl> - / / / determine the target shard . Note that values of shard key attributes <nl> - / / / cannot <nl> + / / / determine the target shard . Note that values of shard key attributes cannot <nl> / / / be changed once set . <nl> / / / This option is meaningless in a single server setup . <nl> / / / <nl> static void CreateVocBase ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args , <nl> / / / <nl> / / / ` db . _create ( collection - name , properties , type ) ` <nl> / / / <nl> - / / / Specifies the optional * type * of the collection , it can either be * document * <nl> - / / / or * edge * . On default it is document . Instead of giving a type you can also <nl> - / / / use <nl> + / / / Specifies the optional * type * of the collection , it can either be * document * <nl> + / / / or * edge * . On default it is document . Instead of giving a type you can also use <nl> / / / * db . _createEdgeCollection * or * db . _createDocumentCollection * . <nl> / / / <nl> / / / @ EXAMPLES <nl> static void JS_CreateDocumentCollectionVocbase ( <nl> / / / <nl> / / / * * waitForSync * ( optional , default * false * ) : If * true * creating <nl> / / / a document will only return after the data was synced to disk . <nl> - / / / * * journalSize * ( optional , default is <nl> + / / / * * journalSize * ( optional , default is <nl> / / / " configuration parameter " ) : The maximal size of <nl> / / / a journal or datafile . Note that this also limits the maximal <nl> / / / size of a single object and must be at least 1MB . <nl> mmm a / lib / V8 / v8 - utils . cpp <nl> ppp b / lib / V8 / v8 - utils . cpp <nl> static void JS_MakeAbsolute ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> / / / lexically sorted order . Throws an exception if the directory cannot be <nl> / / / traversed ( or path is not a directory ) . <nl> / / / <nl> - / / / * * Note * * : this means that list ( " x " ) of a directory containing " a " and " b " <nl> - / / / would <nl> + / / / * * Note * * : this means that list ( " x " ) of a directory containing " a " and " b " would <nl> / / / return [ " a " , " b " ] , not [ " x / a " , " x / b " ] . <nl> / / / @ endDocuBlock <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> static void JS_MarkNonce ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> / / / ` fs . mtime ( filename ) ` <nl> / / / <nl> / / / Returns the last modification date of the specified file . The date is <nl> - / / / returned as a Unix timestamp ( number of seconds elapsed since January 1 <nl> - / / / 1970 ) . <nl> + / / / returned as a Unix timestamp ( number of seconds elapsed since January 1 1970 ) . <nl> / / / @ endDocuBlock <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl>
Revert formattings that break documentation parts .
arangodb/arangodb
7fc4b356e4824c8795372d29b860a629fe355723
2016-01-07T14:12:04Z
mmm a / BUILD . gn <nl> ppp b / BUILD . gn <nl> v8_source_set ( " cppgc_base " ) { <nl> " include / cppgc / custom - space . h " , <nl> " include / cppgc / garbage - collected . h " , <nl> " include / cppgc / heap . h " , <nl> - " include / cppgc / internal / accessors . h " , <nl> " include / cppgc / internal / api - contants . h " , <nl> " include / cppgc / internal / atomic - entry - flag . h " , <nl> " include / cppgc / internal / compiler - specific . h " , <nl> v8_source_set ( " cppgc_base " ) { <nl> " src / heap / cppgc / heap - object - header - inl . h " , <nl> " src / heap / cppgc / heap - object - header . cc " , <nl> " src / heap / cppgc / heap - object - header . h " , <nl> + " src / heap / cppgc / heap - page - inl . h " , <nl> " src / heap / cppgc / heap - page . cc " , <nl> " src / heap / cppgc / heap - page . h " , <nl> " src / heap / cppgc / heap - space . cc " , <nl> deleted file mode 100644 <nl> index ee0a0042fe0 . . 00000000000 <nl> mmm a / include / cppgc / internal / accessors . h <nl> ppp / dev / null <nl> <nl> - / / Copyright 2020 the V8 project authors . All rights reserved . <nl> - / / Use of this source code is governed by a BSD - style license that can be <nl> - / / found in the LICENSE file . <nl> - <nl> - # ifndef INCLUDE_CPPGC_INTERNAL_ACCESSORS_H_ <nl> - # define INCLUDE_CPPGC_INTERNAL_ACCESSORS_H_ <nl> - <nl> - # include " cppgc / internal / api - constants . h " <nl> - <nl> - namespace cppgc { <nl> - <nl> - class Heap ; <nl> - <nl> - namespace internal { <nl> - <nl> - inline cppgc : : Heap * GetHeapFromPayload ( const void * payload ) { <nl> - return * reinterpret_cast < cppgc : : Heap * * > ( <nl> - ( ( reinterpret_cast < uintptr_t > ( payload ) & api_constants : : kPageBaseMask ) + <nl> - api_constants : : kGuardPageSize ) + <nl> - api_constants : : kHeapOffset ) ; <nl> - } <nl> - <nl> - } / / namespace internal <nl> - } / / namespace cppgc <nl> - <nl> - # endif / / INCLUDE_CPPGC_INTERNAL_ACCESSORS_H_ <nl> mmm a / include / cppgc / internal / api - constants . h <nl> ppp b / include / cppgc / internal / api - constants . h <nl> static constexpr size_t kFullyConstructedBitFieldOffsetFromPayload = <nl> / / Mask for in - construction bit . <nl> static constexpr size_t kFullyConstructedBitMask = size_t { 1 } ; <nl> <nl> - / / Page constants used to align pointers to page begin . <nl> static constexpr size_t kPageSize = size_t { 1 } < < 17 ; <nl> - static constexpr size_t kPageAlignment = kPageSize ; <nl> - static constexpr size_t kPageBaseMask = ~ ( kPageAlignment - 1 ) ; <nl> - static constexpr size_t kGuardPageSize = 4096 ; <nl> - <nl> - / / Offset of the Heap backref . <nl> - static constexpr size_t kHeapOffset = 0 ; <nl> <nl> static constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2 ; <nl> <nl> mmm a / include / cppgc / internal / prefinalizer - handler . h <nl> ppp b / include / cppgc / internal / prefinalizer - handler . h <nl> class V8_EXPORT PreFinalizerRegistrationDispatcher final { <nl> public : <nl> using PreFinalizerCallback = bool ( * ) ( const LivenessBroker & , void * ) ; <nl> struct PreFinalizer { <nl> - void * object_ ; <nl> - PreFinalizerCallback callback_ ; <nl> + void * object ; <nl> + PreFinalizerCallback callback ; <nl> <nl> bool operator = = ( const PreFinalizer & other ) ; <nl> } ; <nl> <nl> - static void RegisterPrefinalizer ( cppgc : : Heap * heap , <nl> - PreFinalizer prefinalzier ) ; <nl> + static void RegisterPrefinalizer ( PreFinalizer pre_finalizer ) ; <nl> } ; <nl> <nl> } / / namespace internal <nl> mmm a / include / cppgc / prefinalizer . h <nl> ppp b / include / cppgc / prefinalizer . h <nl> <nl> # ifndef INCLUDE_CPPGC_PREFINALIZER_H_ <nl> # define INCLUDE_CPPGC_PREFINALIZER_H_ <nl> <nl> - # include " cppgc / internal / accessors . h " <nl> # include " cppgc / internal / compiler - specific . h " <nl> # include " cppgc / internal / prefinalizer - handler . h " <nl> # include " cppgc / liveness - broker . h " <nl> class PrefinalizerRegistration final { <nl> " USING_PRE_FINALIZER ( T ) must be defined . " ) ; <nl> <nl> cppgc : : internal : : PreFinalizerRegistrationDispatcher : : RegisterPrefinalizer ( <nl> - internal : : GetHeapFromPayload ( self ) , { self , T : : InvokePreFinalizer } ) ; <nl> + { self , T : : InvokePreFinalizer } ) ; <nl> } <nl> <nl> void * operator new ( size_t , void * location ) = delete ; <nl> new file mode 100644 <nl> index 00000000000 . . a416a62e492 <nl> mmm / dev / null <nl> ppp b / src / heap / cppgc / heap - page - inl . h <nl> <nl> + / / Copyright 2020 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + # ifndef V8_HEAP_CPPGC_HEAP_PAGE_INL_H_ <nl> + # define V8_HEAP_CPPGC_HEAP_PAGE_INL_H_ <nl> + <nl> + # include " src / heap / cppgc / heap - page . h " <nl> + <nl> + namespace cppgc { <nl> + namespace internal { <nl> + <nl> + / / static <nl> + BasePage * BasePage : : FromPayload ( void * payload ) { <nl> + return reinterpret_cast < BasePage * > ( <nl> + ( reinterpret_cast < uintptr_t > ( payload ) & kPageBaseMask ) + kGuardPageSize ) ; <nl> + } <nl> + <nl> + / / static <nl> + const BasePage * BasePage : : FromPayload ( const void * payload ) { <nl> + return reinterpret_cast < const BasePage * > ( <nl> + ( reinterpret_cast < uintptr_t > ( const_cast < void * > ( payload ) ) & <nl> + kPageBaseMask ) + <nl> + kGuardPageSize ) ; <nl> + } <nl> + <nl> + } / / namespace internal <nl> + } / / namespace cppgc <nl> + <nl> + # endif / / V8_HEAP_CPPGC_HEAP_PAGE_INL_H_ <nl> mmm a / src / heap / cppgc / heap - page . cc <nl> ppp b / src / heap / cppgc / heap - page . cc <nl> const HeapObjectHeader * ObjectHeaderFromInnerAddressImpl ( const BasePage * page , <nl> <nl> } / / namespace <nl> <nl> - STATIC_ASSERT ( kPageSize = = api_constants : : kPageAlignment ) ; <nl> - <nl> - / / static <nl> - BasePage * BasePage : : FromPayload ( void * payload ) { <nl> - return reinterpret_cast < BasePage * > ( <nl> - ( reinterpret_cast < uintptr_t > ( payload ) & kPageBaseMask ) + kGuardPageSize ) ; <nl> - } <nl> - <nl> - / / static <nl> - const BasePage * BasePage : : FromPayload ( const void * payload ) { <nl> - return reinterpret_cast < const BasePage * > ( <nl> - ( reinterpret_cast < uintptr_t > ( const_cast < void * > ( payload ) ) & <nl> - kPageBaseMask ) + <nl> - kGuardPageSize ) ; <nl> - } <nl> - <nl> / / static <nl> BasePage * BasePage : : FromInnerAddress ( const Heap * heap , void * address ) { <nl> return const_cast < BasePage * > ( <nl> BasePage : : BasePage ( Heap * heap , BaseSpace * space , PageType type ) <nl> : heap_ ( heap ) , space_ ( space ) , type_ ( type ) { <nl> DCHECK_EQ ( 0u , ( reinterpret_cast < uintptr_t > ( this ) - kGuardPageSize ) & <nl> kPageOffsetMask ) ; <nl> - DCHECK_EQ ( reinterpret_cast < void * > ( & heap_ ) , <nl> - FromPayload ( this ) + api_constants : : kHeapOffset ) ; <nl> DCHECK_EQ ( & heap_ - > raw_heap ( ) , space_ - > raw_heap ( ) ) ; <nl> } <nl> <nl> mmm a / src / heap / cppgc / heap - page . h <nl> ppp b / src / heap / cppgc / heap - page . h <nl> class PageBackend ; <nl> <nl> class V8_EXPORT_PRIVATE BasePage { <nl> public : <nl> - static BasePage * FromPayload ( void * ) ; <nl> - static const BasePage * FromPayload ( const void * ) ; <nl> + static inline BasePage * FromPayload ( void * ) ; <nl> + static inline const BasePage * FromPayload ( const void * ) ; <nl> <nl> static BasePage * FromInnerAddress ( const Heap * , void * ) ; <nl> static const BasePage * FromInnerAddress ( const Heap * , const void * ) ; <nl> mmm a / src / heap / cppgc / heap - space . cc <nl> ppp b / src / heap / cppgc / heap - space . cc <nl> <nl> <nl> # include " src / base / logging . h " <nl> # include " src / base / platform / mutex . h " <nl> - # include " src / heap / cppgc / heap - page . h " <nl> + # include " src / heap / cppgc / heap - page - inl . h " <nl> # include " src / heap / cppgc / object - start - bitmap - inl . h " <nl> <nl> namespace cppgc { <nl> mmm a / src / heap / cppgc / heap . cc <nl> ppp b / src / heap / cppgc / heap . cc <nl> <nl> # include " src / heap / cppgc / gc - invoker . h " <nl> # include " src / heap / cppgc / heap - object - header - inl . h " <nl> # include " src / heap / cppgc / heap - object - header . h " <nl> - # include " src / heap / cppgc / heap - page . h " <nl> + # include " src / heap / cppgc / heap - page - inl . h " <nl> # include " src / heap / cppgc / heap - visitor . h " <nl> # include " src / heap / cppgc / page - memory . h " <nl> # include " src / heap / cppgc / stack . h " <nl> mmm a / src / heap / cppgc / marker . cc <nl> ppp b / src / heap / cppgc / marker . cc <nl> <nl> <nl> # include " include / cppgc / internal / process - heap . h " <nl> # include " src / heap / cppgc / heap - object - header - inl . h " <nl> + # include " src / heap / cppgc / heap - page - inl . h " <nl> # include " src / heap / cppgc / heap - visitor . h " <nl> # include " src / heap / cppgc / heap . h " <nl> # include " src / heap / cppgc / marking - visitor . h " <nl> mmm a / src / heap / cppgc / marking - visitor . cc <nl> ppp b / src / heap / cppgc / marking - visitor . cc <nl> <nl> # include " src / heap / cppgc / marking - visitor . h " <nl> <nl> # include " include / cppgc / garbage - collected . h " <nl> - # include " include / cppgc / internal / accessors . h " <nl> # include " src / heap / cppgc / heap - object - header - inl . h " <nl> - # include " src / heap / cppgc / heap - page . h " <nl> + # include " src / heap / cppgc / heap - page - inl . h " <nl> # include " src / heap / cppgc / heap . h " <nl> # include " src / heap / cppgc / page - memory - inl . h " <nl> # include " src / heap / cppgc / sanitizers . h " <nl> mmm a / src / heap / cppgc / object - allocator - inl . h <nl> ppp b / src / heap / cppgc / object - allocator - inl . h <nl> <nl> # include " src / base / logging . h " <nl> # include " src / heap / cppgc / heap - object - header - inl . h " <nl> # include " src / heap / cppgc / heap - object - header . h " <nl> - # include " src / heap / cppgc / heap - page . h " <nl> + # include " src / heap / cppgc / heap - page - inl . h " <nl> # include " src / heap / cppgc / object - allocator . h " <nl> # include " src / heap / cppgc / object - start - bitmap - inl . h " <nl> # include " src / heap / cppgc / object - start - bitmap . h " <nl> mmm a / src / heap / cppgc / pointer - policies . cc <nl> ppp b / src / heap / cppgc / pointer - policies . cc <nl> <nl> / / found in the LICENSE file . <nl> <nl> # include " include / cppgc / internal / pointer - policies . h " <nl> - # include " include / cppgc / internal / persistent - node . h " <nl> <nl> + # include " include / cppgc / internal / persistent - node . h " <nl> # include " src / base / macros . h " <nl> - # include " src / heap / cppgc / heap - page . h " <nl> + # include " src / heap / cppgc / heap - page - inl . h " <nl> # include " src / heap / cppgc / heap . h " <nl> <nl> namespace cppgc { <nl> mmm a / src / heap / cppgc / prefinalizer - handler . cc <nl> ppp b / src / heap / cppgc / prefinalizer - handler . cc <nl> <nl> # include < memory > <nl> <nl> # include " src / base / platform / platform . h " <nl> + # include " src / heap / cppgc / heap - page - inl . h " <nl> # include " src / heap / cppgc / heap . h " <nl> <nl> namespace cppgc { <nl> namespace internal { <nl> <nl> / / static <nl> void PreFinalizerRegistrationDispatcher : : RegisterPrefinalizer ( <nl> - cppgc : : Heap * heap , PreFinalizer prefinalzier ) { <nl> - internal : : Heap : : From ( heap ) - > prefinalizer_handler ( ) - > RegisterPrefinalizer ( <nl> - prefinalzier ) ; <nl> + PreFinalizer pre_finalizer ) { <nl> + BasePage : : FromPayload ( pre_finalizer . object ) <nl> + - > heap ( ) <nl> + - > prefinalizer_handler ( ) <nl> + - > RegisterPrefinalizer ( pre_finalizer ) ; <nl> } <nl> <nl> bool PreFinalizerRegistrationDispatcher : : PreFinalizer : : operator = = ( <nl> const PreFinalizer & other ) { <nl> - return ( object_ = = other . object_ ) & & ( callback_ = = other . callback_ ) ; <nl> + return ( object = = other . object ) & & ( callback = = other . callback ) ; <nl> } <nl> <nl> PreFinalizerHandler : : PreFinalizerHandler ( ) <nl> PreFinalizerHandler : : PreFinalizerHandler ( ) <nl> { <nl> } <nl> <nl> - void PreFinalizerHandler : : RegisterPrefinalizer ( PreFinalizer prefinalizer ) { <nl> + void PreFinalizerHandler : : RegisterPrefinalizer ( PreFinalizer pre_finalizer ) { <nl> DCHECK ( CurrentThreadIsCreationThread ( ) ) ; <nl> DCHECK_EQ ( ordered_pre_finalizers_ . end ( ) , <nl> std : : find ( ordered_pre_finalizers_ . begin ( ) , <nl> - ordered_pre_finalizers_ . end ( ) , prefinalizer ) ) ; <nl> - ordered_pre_finalizers_ . push_back ( prefinalizer ) ; <nl> + ordered_pre_finalizers_ . end ( ) , pre_finalizer ) ) ; <nl> + ordered_pre_finalizers_ . push_back ( pre_finalizer ) ; <nl> } <nl> <nl> void PreFinalizerHandler : : InvokePreFinalizers ( ) { <nl> void PreFinalizerHandler : : InvokePreFinalizers ( ) { <nl> std : : remove_if ( ordered_pre_finalizers_ . rbegin ( ) , <nl> ordered_pre_finalizers_ . rend ( ) , <nl> [ liveness_broker ] ( const PreFinalizer & pf ) { <nl> - return ( pf . callback_ ) ( liveness_broker , pf . object_ ) ; <nl> + return ( pf . callback ) ( liveness_broker , pf . object ) ; <nl> } ) <nl> . base ( ) ) ; <nl> ordered_pre_finalizers_ . shrink_to_fit ( ) ; <nl> mmm a / src / heap / cppgc / prefinalizer - handler . h <nl> ppp b / src / heap / cppgc / prefinalizer - handler . h <nl> class PreFinalizerHandler final { <nl> <nl> PreFinalizerHandler ( ) ; <nl> <nl> - void RegisterPrefinalizer ( PreFinalizer prefinalzier ) ; <nl> + void RegisterPrefinalizer ( PreFinalizer pre_finalizer ) ; <nl> <nl> void InvokePreFinalizers ( ) ; <nl> <nl> mmm a / src / heap / cppgc / write - barrier . cc <nl> ppp b / src / heap / cppgc / write - barrier . cc <nl> <nl> # include " include / cppgc / internal / pointer - policies . h " <nl> # include " src / heap / cppgc / heap - object - header - inl . h " <nl> # include " src / heap / cppgc / heap - object - header . h " <nl> - # include " src / heap / cppgc / heap - page . h " <nl> + # include " src / heap / cppgc / heap - page - inl . h " <nl> # include " src / heap / cppgc / heap . h " <nl> # include " src / heap / cppgc / marker . h " <nl> # include " src / heap / cppgc / marking - visitor . h " <nl> mmm a / test / unittests / heap / cppgc / concurrent - sweeper - unittest . cc <nl> ppp b / test / unittests / heap / cppgc / concurrent - sweeper - unittest . cc <nl> <nl> # include " include / v8 - platform . h " <nl> # include " src / heap / cppgc / globals . h " <nl> # include " src / heap / cppgc / heap - object - header - inl . h " <nl> - # include " src / heap / cppgc / heap - page . h " <nl> + # include " src / heap / cppgc / heap - page - inl . h " <nl> # include " src / heap / cppgc / heap - space . h " <nl> # include " src / heap / cppgc / heap - visitor . h " <nl> # include " src / heap / cppgc / page - memory - inl . h " <nl> mmm a / test / unittests / heap / cppgc / custom - spaces - unittest . cc <nl> ppp b / test / unittests / heap / cppgc / custom - spaces - unittest . cc <nl> <nl> <nl> # include " include / cppgc / allocation . h " <nl> # include " include / cppgc / custom - space . h " <nl> - # include " src / heap / cppgc / heap - page . h " <nl> + # include " src / heap / cppgc / heap - page - inl . h " <nl> # include " src / heap / cppgc / raw - heap . h " <nl> # include " test / unittests / heap / cppgc / tests . h " <nl> <nl> mmm a / test / unittests / heap / cppgc / heap - page - unittest . cc <nl> ppp b / test / unittests / heap / cppgc / heap - page - unittest . cc <nl> <nl> # include < algorithm > <nl> <nl> # include " include / cppgc / allocation . h " <nl> - # include " include / cppgc / internal / accessors . h " <nl> # include " include / cppgc / persistent . h " <nl> # include " src / base / macros . h " <nl> # include " src / heap / cppgc / globals . h " <nl> # include " src / heap / cppgc / heap - object - header - inl . h " <nl> # include " src / heap / cppgc / heap - object - header . h " <nl> + # include " src / heap / cppgc / heap - page - inl . h " <nl> # include " src / heap / cppgc / page - memory - inl . h " <nl> # include " src / heap / cppgc / page - memory . h " <nl> # include " src / heap / cppgc / raw - heap . h " <nl> class GCed : public GarbageCollected < GCed < Size > > { <nl> <nl> } / / namespace <nl> <nl> - TEST_F ( PageTest , GetHeapForAllocatedObject ) { <nl> - auto * gced = MakeGarbageCollected < GCed < 1 > > ( GetAllocationHandle ( ) ) ; <nl> - EXPECT_EQ ( GetHeap ( ) , GetHeapFromPayload ( gced ) ) ; <nl> - } <nl> - <nl> TEST_F ( PageTest , SpaceIndexing ) { <nl> RawHeap & heap = GetRawHeap ( ) ; <nl> size_t space = 0u ; <nl> mmm a / test / unittests / heap / cppgc / sweeper - unittest . cc <nl> ppp b / test / unittests / heap / cppgc / sweeper - unittest . cc <nl> <nl> # include " src / heap / cppgc / globals . h " <nl> # include " src / heap / cppgc / heap - object - header - inl . h " <nl> # include " src / heap / cppgc / heap - object - header . h " <nl> - # include " src / heap / cppgc / heap - page . h " <nl> + # include " src / heap / cppgc / heap - page - inl . h " <nl> # include " src / heap / cppgc / heap - visitor . h " <nl> # include " src / heap / cppgc / heap . h " <nl> # include " src / heap / cppgc / page - memory - inl . h " <nl>
cppgc : Rework pre - finalizer registration
v8/v8
154627bff4a1634ed32890c77e2a8f6490171561
2020-06-11T20:12:21Z
mmm a / python / mxnet / base . py <nl> ppp b / python / mxnet / base . py <nl> <nl> <nl> import sys <nl> import ctypes <nl> - from builtins import bytes <nl> import numpy as np <nl> import atexit <nl> from . import libinfo <nl> def c_str ( string ) : <nl> str : c_char_p <nl> A char pointer that can be passed to C API <nl> " " " <nl> - return ctypes . c_char_p ( bytes ( string , ' utf - 8 ' ) ) <nl> + return ctypes . c_char_p ( string . encode ( ' utf - 8 ' ) ) <nl> <nl> <nl> def c_array ( ctype , values ) : <nl> mmm a / python / mxnet / recordio . py <nl> ppp b / python / mxnet / recordio . py <nl> def write ( self , buf ) : <nl> <nl> Parameters <nl> mmmmmmmmm - <nl> - buf : string <nl> + buf : string ( python2 ) , bytes ( python3 ) <nl> buffer to write . <nl> " " " <nl> assert self . writable <nl> check_call ( _LIB . MXRecordIOWriterWriteRecord ( self . handle , <nl> - c_str ( buf ) , <nl> + ctypes . c_char_p ( buf ) , <nl> ctypes . c_size_t ( len ( buf ) ) ) ) <nl> <nl> def read ( self ) : <nl> mmm a / tests / python / unittest / test_recordio . py <nl> ppp b / tests / python / unittest / test_recordio . py <nl> <nl> # pylint : skip - file <nl> + import sys <nl> import mxnet as mx <nl> import tempfile <nl> import random <nl> - from builtins import bytes <nl> <nl> def test_recordio ( ) : <nl> frec = tempfile . mktemp ( ) <nl> - N = 10 <nl> + N = 255 <nl> <nl> writer = mx . recordio . MXRecordIO ( frec , ' w ' ) <nl> for i in range ( N ) : <nl> - writer . write ( str ( i ) ) <nl> + if sys . version_info [ 0 ] < 3 : <nl> + writer . write ( str ( chr ( i ) ) ) <nl> + else : <nl> + writer . write ( bytes ( str ( chr ( i ) ) , ' utf - 8 ' ) ) <nl> del writer <nl> <nl> reader = mx . recordio . MXRecordIO ( frec , ' r ' ) <nl> for i in range ( N ) : <nl> res = reader . read ( ) <nl> - assert res = = bytes ( str ( i ) , ' utf - 8 ' ) <nl> - <nl> + if sys . version_info [ 0 ] < 3 : <nl> + assert res = = str ( chr ( i ) ) <nl> + else : <nl> + assert res = = bytes ( str ( chr ( i ) ) , ' utf - 8 ' ) <nl> <nl> def test_indexed_recordio ( ) : <nl> fidx = tempfile . mktemp ( ) <nl> frec = tempfile . mktemp ( ) <nl> - N = 10 <nl> + N = 255 <nl> <nl> writer = mx . recordio . MXIndexedRecordIO ( fidx , frec , ' w ' ) <nl> for i in range ( N ) : <nl> - writer . write_idx ( i , str ( i ) ) <nl> + if sys . version_info [ 0 ] < 3 : <nl> + writer . write_idx ( i , str ( chr ( i ) ) ) <nl> + else : <nl> + writer . write_idx ( i , bytes ( str ( chr ( i ) ) , ' utf - 8 ' ) ) <nl> del writer <nl> <nl> reader = mx . recordio . MXIndexedRecordIO ( fidx , frec , ' r ' ) <nl> keys = reader . keys ( ) <nl> assert sorted ( keys ) = = [ i for i in range ( N ) ] <nl> random . shuffle ( keys ) <nl> - for k in keys : <nl> - res = reader . read_idx ( k ) <nl> - assert res = = bytes ( str ( k ) , ' utf - 8 ' ) <nl> + for i in keys : <nl> + res = reader . read_idx ( i ) <nl> + if sys . version_info [ 0 ] < 3 : <nl> + assert res = = str ( chr ( i ) ) <nl> + else : <nl> + assert res = = bytes ( str ( chr ( i ) ) , ' utf - 8 ' ) <nl> <nl> if __name__ = = ' __main__ ' : <nl> test_recordio ( ) <nl>
fix test
apache/incubator-mxnet
c8989e86ac3bb0905d164e9b98bb0d99b31a5168
2016-08-01T01:54:03Z
mmm a / contrib / Python / cntk / context . py <nl> ppp b / contrib / Python / cntk / context . py <nl> <nl> CNTK_TEMPLATE_DIR , " cntk_train_template . cntk " ) <nl> CNTK_TEST_TEMPLATE_PATH = os . path . join ( <nl> CNTK_TEMPLATE_DIR , " cntk_test_template . cntk " ) <nl> - CNTK_PREDICT_TEMPLATE_PATH = os . path . join ( <nl> - CNTK_TEMPLATE_DIR , " cntk_predict_template . cntk " ) <nl> + CNTK_INFER_TEMPLATE_PATH = os . path . join ( <nl> + CNTK_TEMPLATE_DIR , " cntk_infer_template . cntk " ) <nl> CNTK_EVAL_TEMPLATE_PATH = os . path . join ( <nl> CNTK_TEMPLATE_DIR , " cntk_eval_template . cntk " ) <nl> CNTK_TRAIN_CONFIG_FILENAME = " train . cntk " <nl> CNTK_TEST_CONFIG_FILENAME = " test . cntk " <nl> - CNTK_PREDICT_CONFIG_FILENAME = " predict . cntk " <nl> + CNTK_INFER_CONFIG_FILENAME = " infer . cntk " <nl> CNTK_EVAL_CONFIG_FILENAME = " eval . cntk " <nl> CNTK_OUTPUT_FILENAME = " out . txt " <nl> <nl> def test ( self , input_reader = None ) : <nl> pass <nl> <nl> @ abstractmethod <nl> - def predict ( self , input_reader = None ) : <nl> + def infer ( self , input_reader = None ) : <nl> ' ' ' <nl> Abstract method for the action write . It evaluated the trained model on <nl> the data provided by the reader . <nl> : param input_reader : map from input nodes to readers <nl> <nl> - Returns the predicted output <nl> + Returns the inferred output <nl> ' ' ' <nl> pass <nl> <nl> def _generate_test_config ( self , input_reader ) : <nl> } <nl> return tmpl % tmpl_dict <nl> <nl> - def _generate_predict_config ( self , input_reader ) : <nl> + def _generate_infer_config ( self , input_reader ) : <nl> ' ' ' <nl> Generates the configuration file for the write action . <nl> It uses the context ' s trained model . <nl> : param input_reader : a map from input nodes to their readers <nl> ' ' ' <nl> - tmpl = open ( CNTK_PREDICT_TEMPLATE_PATH , " r " ) . read ( ) <nl> + tmpl = open ( CNTK_INFER_TEMPLATE_PATH , " r " ) . read ( ) <nl> model_filename = os . path . join ( self . directory , ' Models ' , self . name ) <nl> output_filename_base = os . path . join ( <nl> self . directory , ' Outputs ' , self . name ) <nl> def test ( self , input_reader = None ) : <nl> return Context . _parse_test_result ( output ) <nl> <nl> <nl> - def predict ( self , input_reader = None ) : <nl> + def infer ( self , input_reader = None ) : <nl> ' ' ' <nl> Run the write action locally , use the trained model of this context . <nl> : param input_reader : map from input nodes to readers <nl> <nl> - Returns the predicted output <nl> + Returns the inferred output <nl> ' ' ' <nl> - config_content = self . _generate_predict_config ( input_reader ) <nl> - return self . _call_cntk ( CNTK_PREDICT_CONFIG_FILENAME , config_content ) <nl> + config_content = self . _generate_infer_config ( input_reader ) <nl> + return self . _call_cntk ( CNTK_INFER_CONFIG_FILENAME , config_content ) <nl> <nl> def eval ( self , node , input_reader = None , backward_pass = False , input_name = None ) : <nl> ' ' ' <nl> mmm a / contrib / Python / cntk / examples / LogReg / logreg . py <nl> ppp b / contrib / Python / cntk / examples / LogReg / logreg . py <nl> <nl> + # Copyright ( c ) Microsoft . All rights reserved . <nl> + <nl> + # Licensed under the MIT license . See LICENSE . md file in the project root <nl> + # for full license information . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + " " " <nl> + Example of logictic regression implementation <nl> + " " " <nl> + <nl> import os <nl> import sys <nl> sys . path . insert ( 0 , os . path . join ( os . path . dirname ( __file__ ) , ' . . ' , ' . . ' , ' . . ' ) ) <nl> <nl> test_file = os . path . join ( cur_dir , " Test - 3Classes . txt " ) <nl> mapping_file = os . path . join ( cur_dir , " SimpleMapping - 3Classes . txt " ) <nl> <nl> - def train_eval_logreg ( criterion_name = None , eval_name = None ) : <nl> + def train_eval_logistic_regression ( criterion_name = None , eval_name = None ) : <nl> X = Input ( 2 ) <nl> y = Input ( 3 ) <nl> <nl> def train_eval_logreg ( criterion_name = None , eval_name = None ) : <nl> <nl> out = Times ( W , X ) + b <nl> out . tag = ' output ' <nl> - ce = CrossEntropyWithSoftmax ( y , out , var_name = criterion_name ) <nl> + ce = CrossEntropyWithSoftmax ( y , out ) <nl> + ce . var_name = criterion_name <nl> ce . tag = ' criterion ' <nl> - eval = SquareError ( y , out , var_name = eval_name ) <nl> + eval = SquareError ( y , out ) <nl> eval . tag = ' eval ' <nl> + eval . var_name = eval_name <nl> <nl> - my_sgd = SGD ( <nl> + my_sgd = SGDParams ( <nl> epoch_size = 0 , minibatch_size = 25 , learning_ratesPerMB = 0 . 1 , max_epochs = 3 ) <nl> <nl> with Context ( ' demo ' , clean_up = False ) as ctx : <nl> def train_eval_logreg ( criterion_name = None , eval_name = None ) : <nl> <nl> return result <nl> <nl> - def test_logreg ( ) : <nl> - result = train_eval_logreg ( ' crit_node ' , ' eval_node ' ) <nl> + def test_logistic_regression ( ) : <nl> + result = train_eval_logistic_regression ( ' crit_node ' , ' eval_node ' ) <nl> <nl> assert result [ ' SamplesSeen ' ] = = 500 <nl> assert np . allclose ( result [ ' Perplexity ' ] , 1 . 2216067 ) <nl> def test_logreg ( ) : <nl> assert np . allclose ( result [ ' eval_node ' ] , 27 . 558445 ) <nl> <nl> if __name__ = = " __main__ " : <nl> - print ( train_eval_logreg ( ) ) <nl> + print ( train_eval_logistic_regression ( ) ) <nl> mmm a / contrib / Python / cntk / examples / MNIST / mnist_one_layer . py <nl> ppp b / contrib / Python / cntk / examples / MNIST / mnist_one_layer . py <nl> <nl> + # Copyright ( c ) Microsoft . All rights reserved . <nl> + <nl> + # Licensed under the MIT license . See LICENSE . md file in the project root <nl> + # for full license information . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + " " " <nl> + Example of a one layer neural network for MNNIST classification problem <nl> + " " " <nl> + <nl> import sys <nl> import os <nl> sys . path . insert ( 0 , os . path . join ( os . path . dirname ( __file__ ) , ' . . ' , ' . . ' , ' . . ' ) ) <nl> def add_dnn_layer ( in_dim , out_dim , x , param_scale ) : <nl> ec . tag = ' criterion ' <nl> <nl> # Build the optimizer ( settings are scaled down ) <nl> - my_sgd = SGD ( epoch_size = 600 , minibatch_size = 32 , <nl> + my_sgd = SGDParams ( epoch_size = 600 , minibatch_size = 32 , <nl> learning_ratesPerMB = 0 . 1 , max_epochs = 5 , momentum_per_mb = 0 ) <nl> <nl> # Create a context or re - use if already there <nl> with Context ( ' mnist_one_layer ' , clean_up = False ) as ctx : <nl> # CNTK actions <nl> ctx . train ( ec , my_sgd , { features : f_reader , labels : l_reader } ) <nl> - ctx . predict ( { features : f_reader_t , labels : l_reader_t } ) <nl> + ctx . infer ( { features : f_reader_t , labels : l_reader_t } ) <nl> print ( ctx . test ( { features : f_reader_t , labels : l_reader_t } ) ) <nl> <nl> mmm a / contrib / Python / cntk / graph . py <nl> ppp b / contrib / Python / cntk / graph . py <nl> def __init__ ( self , name , params = None , var_name = None , reader = None ) : <nl> def _is_input ( self ) : <nl> return isinstance ( self , InputComputationNodeBase ) <nl> <nl> + # operator overload for ( + ) where self is the left operand <nl> def __add__ ( self , other ) : <nl> - if not isinstance ( other , ComputationNode ) : <nl> - # TODO : in case of non - scalars we have to pull in a reader <nl> + if not isinstance ( other , ComputationNode ) : <nl> other = constant ( other ) <nl> return Plus ( self , other ) <nl> <nl> + # operator overload for ( + ) where self is the right operand <nl> def __radd__ ( self , other ) : <nl> - if not isinstance ( other , ComputationNode ) : <nl> - # TODO : in case of non - scalars we have to pull in a reader <nl> + if not isinstance ( other , ComputationNode ) : <nl> other = constant ( other ) <nl> return Plus ( other , self ) <nl> - <nl> + <nl> + # operator overload for ( - ) where self is the left operand <nl> def __sub__ ( self , other ) : <nl> if not isinstance ( other , ComputationNode ) : <nl> - # TODO : in case of non - scalars we have to pull in a reader <nl> other = constant ( other ) <nl> return Minus ( self , other ) <nl> <nl> + # operator overload for ( - ) where self is the right operand <nl> def __rsub__ ( self , other ) : <nl> - if not isinstance ( other , ComputationNode ) : <nl> - # TODO : in case of non - scalars we have to pull in a reader <nl> + if not isinstance ( other , ComputationNode ) : <nl> other = constant ( other ) <nl> return Minus ( other , self ) <nl> <nl> + # operator overload for ( * ) where self is the left operand <nl> def __mul__ ( self , other ) : <nl> if not isinstance ( other , ComputationNode ) : <nl> - # TODO : in case of non - scalars we have to pull in a reader <nl> other = constant ( other ) <nl> return ElementTimes ( self , other ) <nl> <nl> + # operator overload for ( * ) where self is the right operand <nl> def __rmul__ ( self , other ) : <nl> if not isinstance ( other , ComputationNode ) : <nl> - # TODO : in case of non - scalars we have to pull in a reader <nl> other = constant ( other ) <nl> return ElementTimes ( other , self ) <nl> <nl> + # operator overload for ( @ ) where self is the left operand <nl> def __matmul__ ( self , other ) : <nl> if not isinstance ( other , ComputationNode ) : <nl> - # TODO : in case of non - scalars we have to pull in a reader <nl> other = constant ( other ) <nl> # NOTE supported in Python 3 . 5 <nl> return Times ( self , other ) <nl> <nl> + # operator overload for ( @ ) where self is the right operand <nl> def __rmatmul__ ( self , other ) : <nl> - if not isinstance ( other , ComputationNode ) : <nl> - # TODO : in case of non - scalars we have to pull in a reader <nl> + if not isinstance ( other , ComputationNode ) : <nl> other = constant ( other ) <nl> # NOTE supported in Python 3 . 5 <nl> return Times ( other , self ) <nl> <nl> + # operator overload for ( \ ) where self is the left operand <nl> def __truediv__ ( self , other ) : <nl> if not isinstance ( other , ComputationNode ) : <nl> - # TODO : in case of non - scalars we have to pull in a reader <nl> other = constant ( other ) <nl> self . __div__ = self . __truediv__ <nl> return ElementDivide ( self , other ) <nl> <nl> + # operator overload for ( \ ) where self is the right operand <nl> def __rtruediv__ ( self , other ) : <nl> if not isinstance ( other , ComputationNode ) : <nl> - # TODO : in case of non - scalars we have to pull in a reader <nl> other = constant ( other ) <nl> self . __rdiv__ = self . __rtruediv__ <nl> return ElementDivide ( other , self ) <nl> mmm a / contrib / Python / cntk / ops / cntk1 . py <nl> ppp b / contrib / Python / cntk / ops / cntk1 . py <nl> def __init__ ( self , aMatrix , anotherMatrix , name = ' ElementDivide ' , var_name = None ) : <nl> self . aMatrix = aMatrix <nl> self . anotherMatrix = anotherMatrix <nl> <nl> - <nl> - class Print ( ComputationNode ) : <nl> - <nl> - def __init__ ( self , value , format = ' ' , name = ' Print ' , var_name = None ) : <nl> - super ( Print , self ) . __init__ ( <nl> - params = [ ' value ' , ' format ' ] , name = name , var_name = var_name ) <nl> - self . value = value <nl> - self . format = format <nl> - self . params_with_defaults = [ ' format ' ] <nl> - <nl> - <nl> - class Fail ( ComputationNode ) : <nl> - <nl> - def __init__ ( self , what , name = ' Fail ' , var_name = None ) : <nl> - super ( Fail , self ) . __init__ ( <nl> - params = [ ' what ' ] , name = name , var_name = var_name ) <nl> - self . what = what <nl> - self . params_with_defaults = [ ] <nl> - <nl> - <nl> - class Format ( ComputationNode ) : <nl> - <nl> - def __init__ ( self , value , format , name = ' Format ' , var_name = None ) : <nl> - super ( Format , self ) . __init__ ( <nl> - params = [ ' value ' , ' format ' ] , name = name , var_name = var_name ) <nl> - self . value = value <nl> - self . format = format <nl> - self . params_with_defaults = [ ] <nl> - <nl> - <nl> - class Replace ( ComputationNode ) : <nl> - <nl> - def __init__ ( self , s , from_ , to , name = ' Replace ' , var_name = None ) : <nl> - super ( Replace , self ) . __init__ ( <nl> - params = [ ' s ' , ' from_ ' , ' to ' ] , name = name , var_name = var_name ) <nl> - self . s = s <nl> - self . from_ = from_ <nl> - self . to = to <nl> - self . params_with_defaults = [ ] <nl> - <nl> - <nl> - class Substr ( ComputationNode ) : <nl> - <nl> - def __init__ ( self , s , begin , num , name = ' Substr ' , var_name = None ) : <nl> - super ( Substr , self ) . __init__ ( <nl> - params = [ ' s ' , ' begin ' , ' num ' ] , name = name , var_name = var_name ) <nl> - self . s = s <nl> - self . begin = begin <nl> - self . num = num <nl> - self . params_with_defaults = [ ] <nl> - <nl> - <nl> - class Chr ( ComputationNode ) : <nl> - <nl> - def __init__ ( self , c , name = ' Chr ' , var_name = None ) : <nl> - super ( Chr , self ) . __init__ ( params = [ ' c ' ] , name = name , var_name = var_name ) <nl> - self . c = c <nl> - self . params_with_defaults = [ ] <nl> - <nl> - <nl> - class Length ( ComputationNode ) : <nl> - <nl> - def __init__ ( self , x , name = ' Length ' , var_name = None ) : <nl> - super ( Length , self ) . __init__ ( <nl> - params = [ ' x ' ] , name = name , var_name = var_name ) <nl> - self . x = x <nl> - self . params_with_defaults = [ ] <nl> - <nl> - <nl> class Ceil ( ComputationNode ) : <nl> <nl> def __init__ ( self , x , name = ' Ceil ' , var_name = None ) : <nl> mmm a / contrib / Python / cntk / ops / linear . py <nl> ppp b / contrib / Python / cntk / ops / linear . py <nl> <nl> # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> " " " <nl> - Linear algebra operations <nl> + Linear algebra operations . For every operation we explain how the forward and backward <nl> + passes are computed . For the backward pass we just explain the scalar case which is the building <nl> + block for computing tensor gradients using the chain rule . For tensors , the backward pass of a node <nl> + is computed as follows : for each element in the output tensor , its gradient with respect to the <nl> + given input tensor is computed , then , the resulting tensors are added up . <nl> " " " <nl> <nl> from cntk . ops . cntk1 import Times , Plus , ElementDivide , ElementTimes <nl> def plus ( left_operand , right_operand , name = None ) : <nl> " " " <nl> Tensor addition operation . The output of this operation is the sum of the <nl> two input tensors . It supports broadcasting . In case of scalars its backward <nl> - pass returns 1 . In case of tensors , the backward pass is computed as follows : <nl> - for each element in the output tensor , it computes the its gradient with <nl> - respect to the given input tensor , then it sums all the resulting tensors . <nl> + pass propagates the received gradient . <nl> <nl> Args : <nl> left_operand : left side tensor <nl> def element_times ( left_operand , right_operand , name = None ) : <nl> " " " <nl> Element - wise multiplication operation . The output of this operation is the <nl> element - wise product of the two input tensors . It supports broadcasting . In <nl> - case of scalars its backward pass to left_operand returns right_operand and <nl> - vice versa . In case of tensors , the backward pass is computed as follows : <nl> - for each element in the output tensor , it computes the its gradient with <nl> - respect to the given input tensor , then it sums all the resulting tensors . <nl> - <nl> + case of scalars its backward pass to left_operand propagates right_operand <nl> + times the received gradient and vice versa . <nl> Args : <nl> left_operand : left side tensor <nl> right_operand : right side tensor <nl> def element_divide ( left_operand , right_operand , name = None ) : <nl> " " " <nl> Element - wise division operation . The output of this operation is the <nl> element - wise division of the two input tensors . It supports broadcasting . In <nl> - case of scalars its backward pass to left_operand returns 1 / right_operand and <nl> - the backward pass to right_operand returns ( - left_operand / right_operand ^ 2 ) . <nl> - In case of tensors , the backward pass is computed as follows : <nl> - for each element in the output tensor , it computes the its gradient with <nl> - respect to the given input tensor , then it sums all the resulting tensors . <nl> + case of scalars its backward pass to left_operand propagates 1 / right_operand <nl> + times the received gradient , and the backward pass to right_operand propagates <nl> + ( - left_operand / right_operand ^ 2 ) times the received gradient . <nl> <nl> Args : <nl> left_operand : left side tensor <nl> def times ( left_operand , right_operand , name = None ) : <nl> " " " <nl> Tensor times operation . The output of this operation is the <nl> tensor product of the two input tensors . It supports broadcasting . In <nl> - case of scalars its backward pass to left_operand returns right_operand and <nl> - vice versa . In case of tensors , the backward pass is computed as follows : <nl> - for each element in the output tensor , it computes the its gradient with <nl> - respect to the given input tensor , then it sums all the resulting tensors . <nl> + case of scalars its backward pass to left_operand propagates right_operand <nl> + times the received gradient and vice versa . <nl> <nl> Args : <nl> left_operand : left side tensor <nl> mmm a / contrib / Python / cntk / optimizer . py <nl> ppp b / contrib / Python / cntk / optimizer . py <nl> <nl> - class SGD ( dict ) : <nl> + class SGDParams ( dict ) : <nl> <nl> " " " This is the Stochastic Gradien Descent optimizer used to train the networks <nl> " " " <nl> class SGD ( dict ) : <nl> def __init__ ( self , epoch_size = 0 , minibatch_size = 1 , learning_ratesPerMB = " 0 . 1 " , <nl> learning_rates_per_sample = None , momentum_per_mb = " 0 . 9 " , <nl> momentum_per_sample = None , max_epochs = 5 , dropout_rate = None ) : <nl> - " " " SGD constructor <nl> + " " " SGDParmas constructor <nl> <nl> : param epoch_size : the number of samples to use in each epoch . An intermediate <nl> model and other check point information are saved for each epoch . When set <nl> def __init__ ( self , epoch_size = 0 , minibatch_size = 1 , learning_ratesPerMB = " 0 . 1 " , <nl> self [ " dropoutRate " ] = dropout_rate <nl> <nl> def generate_config ( self ) : <nl> - " " " Generate the SGD configuration block <nl> + " " " Generate the SGDParams configuration block <nl> " " " <nl> <nl> config = [ ] <nl> similarity index 100 % <nl> rename from contrib / Python / cntk / templates / cntk_predict_template . cntk <nl> rename to contrib / Python / cntk / templates / cntk_infer_template . cntk <nl> mmm a / contrib / Python / cntk / utils / _fetch_ops . py <nl> ppp b / contrib / Python / cntk / utils / _fetch_ops . py <nl> <nl> REGEX_COMMENT = re . compile ( r ' / \ * . * \ * / ' ) <nl> <nl> OPERANDS_TO_IGNORE = { " tag = ' ' " } <nl> - OPERATORS_TO_IGNORE = { ' ConstantFromString ' , ' ElementDivide ' } <nl> + OPERATORS_TO_IGNORE = { ' Print ' , ' Fail ' , ' Format ' , ' Replace ' , ' Substr ' , ' Chr ' , ' Length ' , ' ConstantFromString ' , ' ElementDivide ' } <nl> <nl> INPUT_NODES = [ ' Input ' , ' SparseInput ' ] <nl> IMAGE_INPUT_NODES = [ ' ImageInput ' , ' SparseImageInput ' ] <nl>
address CR comments num4
microsoft/CNTK
43b992a0d13e6cf8721bb34e322e980c3834fe24
2016-04-14T14:25:14Z
mmm a / dbms / src / Dictionaries / HashedDictionary . h <nl> ppp b / dbms / src / Dictionaries / HashedDictionary . h <nl> <nl> # include " IDictionary . h " <nl> # include " IDictionarySource . h " <nl> <nl> + / * * This dictionary stores all content in a hash table in memory <nl> + * ( a separate Key - > Value map for each attribute ) <nl> + * Two variants of hash table is supported : a fast HashMap and memory efficient sparse_hash_map . <nl> + * / <nl> <nl> namespace DB <nl> { <nl>
Comments are the must .
ClickHouse/ClickHouse
ab9a7be45ad647e7be832a61f31f03ae90ff9a40
2019-09-22T02:09:40Z
mmm a / src / bootstrapper . cc <nl> ppp b / src / bootstrapper . cc <nl> void Genesis : : InitializeGlobal ( Handle < JSGlobalObject > global_object , <nl> Handle < JSFunction > boolean_fun = <nl> InstallFunction ( global , " Boolean " , JS_VALUE_TYPE , JSValue : : kSize , <nl> isolate - > initial_object_prototype ( ) , <nl> - Builtins : : kIllegal ) ; <nl> + Builtins : : kBooleanConstructor ) ; <nl> + boolean_fun - > shared ( ) - > DontAdaptArguments ( ) ; <nl> + boolean_fun - > shared ( ) - > set_construct_stub ( <nl> + * isolate - > builtins ( ) - > BooleanConstructor_ConstructStub ( ) ) ; <nl> + boolean_fun - > shared ( ) - > set_length ( 1 ) ; <nl> InstallWithIntrinsicDefaultProto ( isolate , boolean_fun , <nl> Context : : BOOLEAN_FUNCTION_INDEX ) ; <nl> } <nl> mmm a / src / builtins . cc <nl> ppp b / src / builtins . cc <nl> BUILTIN ( ReflectSetPrototypeOf ) { <nl> } <nl> <nl> <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / ES6 section 19 . 3 Boolean Objects <nl> + <nl> + <nl> + / / ES6 section 19 . 3 . 1 . 1 Boolean ( value ) for the [ [ Call ] ] case . <nl> + BUILTIN ( BooleanConstructor ) { <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < Object > value = args . atOrUndefined ( isolate , 1 ) ; <nl> + return isolate - > heap ( ) - > ToBoolean ( value - > BooleanValue ( ) ) ; <nl> + } <nl> + <nl> + <nl> + / / ES6 section 19 . 3 . 1 . 1 Boolean ( value ) for the [ [ Construct ] ] case . <nl> + BUILTIN ( BooleanConstructor_ConstructStub ) { <nl> + HandleScope scope ( isolate ) ; <nl> + Handle < Object > value = args . atOrUndefined ( isolate , 1 ) ; <nl> + Handle < JSFunction > target = args . target < JSFunction > ( ) ; <nl> + Handle < JSReceiver > new_target = Handle < JSReceiver > : : cast ( args . new_target ( ) ) ; <nl> + DCHECK ( * target = = target - > native_context ( ) - > boolean_function ( ) ) ; <nl> + Handle < Map > initial_map ; <nl> + ASSIGN_RETURN_FAILURE_ON_EXCEPTION ( <nl> + isolate , initial_map , <nl> + JSFunction : : GetDerivedMap ( isolate , target , new_target ) ) ; <nl> + Handle < JSValue > result = Handle < JSValue > : : cast ( <nl> + isolate - > factory ( ) - > NewJSObjectFromMap ( initial_map ) ) ; <nl> + result - > set_value ( isolate - > heap ( ) - > ToBoolean ( value - > BooleanValue ( ) ) ) ; <nl> + return * result ; <nl> + } <nl> + <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / ES6 section 20 . 3 Date Objects <nl> <nl> mmm a / src / builtins . h <nl> ppp b / src / builtins . h <nl> inline bool operator & ( BuiltinExtraArguments lhs , BuiltinExtraArguments rhs ) { <nl> V ( ArrayBufferConstructor_ConstructStub , kTargetAndNewTarget ) \ <nl> V ( ArrayBufferIsView , kNone ) \ <nl> \ <nl> + V ( BooleanConstructor , kNone ) \ <nl> + V ( BooleanConstructor_ConstructStub , kTargetAndNewTarget ) \ <nl> + \ <nl> V ( DateConstructor , kNone ) \ <nl> V ( DateConstructor_ConstructStub , kTargetAndNewTarget ) \ <nl> V ( DateNow , kNone ) \ <nl> mmm a / src / compiler / js - intrinsic - lowering . cc <nl> ppp b / src / compiler / js - intrinsic - lowering . cc <nl> Reduction JSIntrinsicLowering : : Reduce ( Node * node ) { <nl> return ReduceIsJSReceiver ( node ) ; <nl> case Runtime : : kInlineIsSmi : <nl> return ReduceIsSmi ( node ) ; <nl> - case Runtime : : kInlineJSValueGetValue : <nl> - return ReduceJSValueGetValue ( node ) ; <nl> case Runtime : : kInlineMathClz32 : <nl> return ReduceMathClz32 ( node ) ; <nl> case Runtime : : kInlineMathFloor : <nl> Reduction JSIntrinsicLowering : : ReduceIsSmi ( Node * node ) { <nl> } <nl> <nl> <nl> - Reduction JSIntrinsicLowering : : ReduceJSValueGetValue ( Node * node ) { <nl> - Node * value = NodeProperties : : GetValueInput ( node , 0 ) ; <nl> - Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> - Node * control = NodeProperties : : GetControlInput ( node ) ; <nl> - return Change ( node , simplified ( ) - > LoadField ( AccessBuilder : : ForValue ( ) ) , value , <nl> - effect , control ) ; <nl> - } <nl> - <nl> - <nl> Reduction JSIntrinsicLowering : : ReduceMathClz32 ( Node * node ) { <nl> return Change ( node , machine ( ) - > Word32Clz ( ) ) ; <nl> } <nl> mmm a / src / compiler / js - intrinsic - lowering . h <nl> ppp b / src / compiler / js - intrinsic - lowering . h <nl> class JSIntrinsicLowering final : public AdvancedReducer { <nl> Reduction ReduceIsInstanceType ( Node * node , InstanceType instance_type ) ; <nl> Reduction ReduceIsJSReceiver ( Node * node ) ; <nl> Reduction ReduceIsSmi ( Node * node ) ; <nl> - Reduction ReduceJSValueGetValue ( Node * node ) ; <nl> Reduction ReduceMathClz32 ( Node * node ) ; <nl> Reduction ReduceMathFloor ( Node * node ) ; <nl> Reduction ReduceMathSqrt ( Node * node ) ; <nl> mmm a / src / crankshaft / hydrogen . cc <nl> ppp b / src / crankshaft / hydrogen . cc <nl> void HOptimizedGraphBuilder : : GenerateValueOf ( CallRuntime * call ) { <nl> } <nl> <nl> <nl> - void HOptimizedGraphBuilder : : GenerateJSValueGetValue ( CallRuntime * call ) { <nl> - DCHECK ( call - > arguments ( ) - > length ( ) = = 1 ) ; <nl> - CHECK_ALIVE ( VisitForValue ( call - > arguments ( ) - > at ( 0 ) ) ) ; <nl> - HValue * value = Pop ( ) ; <nl> - HInstruction * result = Add < HLoadNamedField > ( <nl> - value , nullptr , <nl> - HObjectAccess : : ForObservableJSObjectOffset ( JSValue : : kValueOffset ) ) ; <nl> - return ast_context ( ) - > ReturnInstruction ( result , call - > id ( ) ) ; <nl> - } <nl> - <nl> - <nl> void HOptimizedGraphBuilder : : GenerateIsDate ( CallRuntime * call ) { <nl> DCHECK_EQ ( 1 , call - > arguments ( ) - > length ( ) ) ; <nl> CHECK_ALIVE ( VisitForValue ( call - > arguments ( ) - > at ( 0 ) ) ) ; <nl> void HOptimizedGraphBuilder : : GenerateTwoByteSeqStringSetChar ( <nl> } <nl> <nl> <nl> - void HOptimizedGraphBuilder : : GenerateSetValueOf ( CallRuntime * call ) { <nl> - DCHECK ( call - > arguments ( ) - > length ( ) = = 2 ) ; <nl> - CHECK_ALIVE ( VisitForValue ( call - > arguments ( ) - > at ( 0 ) ) ) ; <nl> - CHECK_ALIVE ( VisitForValue ( call - > arguments ( ) - > at ( 1 ) ) ) ; <nl> - HValue * value = Pop ( ) ; <nl> - HValue * object = Pop ( ) ; <nl> - <nl> - / / Check if object is a JSValue . <nl> - IfBuilder if_objectisvalue ( this ) ; <nl> - if_objectisvalue . If < HHasInstanceTypeAndBranch > ( object , JS_VALUE_TYPE ) ; <nl> - if_objectisvalue . Then ( ) ; <nl> - { <nl> - / / Create in - object property store to kValueOffset . <nl> - Add < HStoreNamedField > ( object , <nl> - HObjectAccess : : ForObservableJSObjectOffset ( JSValue : : kValueOffset ) , <nl> - value ) ; <nl> - if ( ! ast_context ( ) - > IsEffect ( ) ) { <nl> - Push ( value ) ; <nl> - } <nl> - Add < HSimulate > ( call - > id ( ) , FIXED_SIMULATE ) ; <nl> - } <nl> - if_objectisvalue . Else ( ) ; <nl> - { <nl> - / / Nothing to do in this case . <nl> - if ( ! ast_context ( ) - > IsEffect ( ) ) { <nl> - Push ( value ) ; <nl> - } <nl> - Add < HSimulate > ( call - > id ( ) , FIXED_SIMULATE ) ; <nl> - } <nl> - if_objectisvalue . End ( ) ; <nl> - if ( ! ast_context ( ) - > IsEffect ( ) ) { <nl> - Drop ( 1 ) ; <nl> - } <nl> - return ast_context ( ) - > ReturnValue ( value ) ; <nl> - } <nl> - <nl> - <nl> / / Fast support for charCodeAt ( n ) . <nl> void HOptimizedGraphBuilder : : GenerateStringCharCodeAt ( CallRuntime * call ) { <nl> DCHECK ( call - > arguments ( ) - > length ( ) = = 2 ) ; <nl> mmm a / src / crankshaft / hydrogen . h <nl> ppp b / src / crankshaft / hydrogen . h <nl> class HOptimizedGraphBuilder : public HGraphBuilder , public AstVisitor { <nl> F ( IsJSProxy ) \ <nl> F ( Call ) \ <nl> F ( ValueOf ) \ <nl> - F ( SetValueOf ) \ <nl> F ( IsDate ) \ <nl> F ( StringCharFromCode ) \ <nl> F ( StringCharAt ) \ <nl> class HOptimizedGraphBuilder : public HGraphBuilder , public AstVisitor { <nl> / * ES6 Iterators * / \ <nl> F ( CreateIterResultObject ) \ <nl> / * Arrays * / \ <nl> - F ( HasFastPackedElements ) \ <nl> - / * JSValue * / \ <nl> - F ( JSValueGetValue ) <nl> + F ( HasFastPackedElements ) <nl> <nl> # define GENERATOR_DECLARATION ( Name ) void Generate # # Name ( CallRuntime * call ) ; <nl> FOR_EACH_HYDROGEN_INTRINSIC ( GENERATOR_DECLARATION ) <nl> mmm a / src / full - codegen / arm / full - codegen - arm . cc <nl> ppp b / src / full - codegen / arm / full - codegen - arm . cc <nl> void FullCodeGenerator : : EmitTwoByteSeqStringSetChar ( CallRuntime * expr ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitSetValueOf ( CallRuntime * expr ) { <nl> - ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> - DCHECK ( args - > length ( ) = = 2 ) ; <nl> - VisitForStackValue ( args - > at ( 0 ) ) ; / / Load the object . <nl> - VisitForAccumulatorValue ( args - > at ( 1 ) ) ; / / Load the value . <nl> - __ pop ( r1 ) ; / / r0 = value . r1 = object . <nl> - <nl> - Label done ; <nl> - / / If the object is a smi , return the value . <nl> - __ JumpIfSmi ( r1 , & done ) ; <nl> - <nl> - / / If the object is not a value type , return the value . <nl> - __ CompareObjectType ( r1 , r2 , r2 , JS_VALUE_TYPE ) ; <nl> - __ b ( ne , & done ) ; <nl> - <nl> - / / Store the value . <nl> - __ str ( r0 , FieldMemOperand ( r1 , JSValue : : kValueOffset ) ) ; <nl> - / / Update the write barrier . Save the value as it will be <nl> - / / overwritten by the write barrier code and is needed afterward . <nl> - __ mov ( r2 , r0 ) ; <nl> - __ RecordWriteField ( <nl> - r1 , JSValue : : kValueOffset , r2 , r3 , kLRHasBeenSaved , kDontSaveFPRegs ) ; <nl> - <nl> - __ bind ( & done ) ; <nl> - context ( ) - > Plug ( r0 ) ; <nl> - } <nl> - <nl> - <nl> void FullCodeGenerator : : EmitToInteger ( CallRuntime * expr ) { <nl> ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> DCHECK_EQ ( 1 , args - > length ( ) ) ; <nl> mmm a / src / full - codegen / arm64 / full - codegen - arm64 . cc <nl> ppp b / src / full - codegen / arm64 / full - codegen - arm64 . cc <nl> void FullCodeGenerator : : EmitTwoByteSeqStringSetChar ( CallRuntime * expr ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitSetValueOf ( CallRuntime * expr ) { <nl> - ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> - DCHECK ( args - > length ( ) = = 2 ) ; <nl> - VisitForStackValue ( args - > at ( 0 ) ) ; / / Load the object . <nl> - VisitForAccumulatorValue ( args - > at ( 1 ) ) ; / / Load the value . <nl> - __ Pop ( x1 ) ; <nl> - / / x0 = value . <nl> - / / x1 = object . <nl> - <nl> - Label done ; <nl> - / / If the object is a smi , return the value . <nl> - __ JumpIfSmi ( x1 , & done ) ; <nl> - <nl> - / / If the object is not a value type , return the value . <nl> - __ JumpIfNotObjectType ( x1 , x10 , x11 , JS_VALUE_TYPE , & done ) ; <nl> - <nl> - / / Store the value . <nl> - __ Str ( x0 , FieldMemOperand ( x1 , JSValue : : kValueOffset ) ) ; <nl> - / / Update the write barrier . Save the value as it will be <nl> - / / overwritten by the write barrier code and is needed afterward . <nl> - __ Mov ( x10 , x0 ) ; <nl> - __ RecordWriteField ( <nl> - x1 , JSValue : : kValueOffset , x10 , x11 , kLRHasBeenSaved , kDontSaveFPRegs ) ; <nl> - <nl> - __ Bind ( & done ) ; <nl> - context ( ) - > Plug ( x0 ) ; <nl> - } <nl> - <nl> - <nl> void FullCodeGenerator : : EmitToInteger ( CallRuntime * expr ) { <nl> ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> DCHECK_EQ ( 1 , args - > length ( ) ) ; <nl> mmm a / src / full - codegen / full - codegen . h <nl> ppp b / src / full - codegen / full - codegen . h <nl> class FullCodeGenerator : public AstVisitor { <nl> F ( IsJSProxy ) \ <nl> F ( Call ) \ <nl> F ( ValueOf ) \ <nl> - F ( SetValueOf ) \ <nl> F ( IsDate ) \ <nl> F ( StringCharFromCode ) \ <nl> F ( StringCharAt ) \ <nl> mmm a / src / full - codegen / ia32 / full - codegen - ia32 . cc <nl> ppp b / src / full - codegen / ia32 / full - codegen - ia32 . cc <nl> void FullCodeGenerator : : EmitTwoByteSeqStringSetChar ( CallRuntime * expr ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitSetValueOf ( CallRuntime * expr ) { <nl> - ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> - DCHECK ( args - > length ( ) = = 2 ) ; <nl> - <nl> - VisitForStackValue ( args - > at ( 0 ) ) ; / / Load the object . <nl> - VisitForAccumulatorValue ( args - > at ( 1 ) ) ; / / Load the value . <nl> - __ pop ( ebx ) ; / / eax = value . ebx = object . <nl> - <nl> - Label done ; <nl> - / / If the object is a smi , return the value . <nl> - __ JumpIfSmi ( ebx , & done , Label : : kNear ) ; <nl> - <nl> - / / If the object is not a value type , return the value . <nl> - __ CmpObjectType ( ebx , JS_VALUE_TYPE , ecx ) ; <nl> - __ j ( not_equal , & done , Label : : kNear ) ; <nl> - <nl> - / / Store the value . <nl> - __ mov ( FieldOperand ( ebx , JSValue : : kValueOffset ) , eax ) ; <nl> - <nl> - / / Update the write barrier . Save the value as it will be <nl> - / / overwritten by the write barrier code and is needed afterward . <nl> - __ mov ( edx , eax ) ; <nl> - __ RecordWriteField ( ebx , JSValue : : kValueOffset , edx , ecx , kDontSaveFPRegs ) ; <nl> - <nl> - __ bind ( & done ) ; <nl> - context ( ) - > Plug ( eax ) ; <nl> - } <nl> - <nl> - <nl> void FullCodeGenerator : : EmitToInteger ( CallRuntime * expr ) { <nl> ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> DCHECK_EQ ( 1 , args - > length ( ) ) ; <nl> mmm a / src / full - codegen / mips / full - codegen - mips . cc <nl> ppp b / src / full - codegen / mips / full - codegen - mips . cc <nl> void FullCodeGenerator : : EmitTwoByteSeqStringSetChar ( CallRuntime * expr ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitSetValueOf ( CallRuntime * expr ) { <nl> - ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> - DCHECK ( args - > length ( ) = = 2 ) ; <nl> - <nl> - VisitForStackValue ( args - > at ( 0 ) ) ; / / Load the object . <nl> - VisitForAccumulatorValue ( args - > at ( 1 ) ) ; / / Load the value . <nl> - __ pop ( a1 ) ; / / v0 = value . a1 = object . <nl> - <nl> - Label done ; <nl> - / / If the object is a smi , return the value . <nl> - __ JumpIfSmi ( a1 , & done ) ; <nl> - <nl> - / / If the object is not a value type , return the value . <nl> - __ GetObjectType ( a1 , a2 , a2 ) ; <nl> - __ Branch ( & done , ne , a2 , Operand ( JS_VALUE_TYPE ) ) ; <nl> - <nl> - / / Store the value . <nl> - __ sw ( v0 , FieldMemOperand ( a1 , JSValue : : kValueOffset ) ) ; <nl> - / / Update the write barrier . Save the value as it will be <nl> - / / overwritten by the write barrier code and is needed afterward . <nl> - __ mov ( a2 , v0 ) ; <nl> - __ RecordWriteField ( <nl> - a1 , JSValue : : kValueOffset , a2 , a3 , kRAHasBeenSaved , kDontSaveFPRegs ) ; <nl> - <nl> - __ bind ( & done ) ; <nl> - context ( ) - > Plug ( v0 ) ; <nl> - } <nl> - <nl> - <nl> void FullCodeGenerator : : EmitToInteger ( CallRuntime * expr ) { <nl> ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> DCHECK_EQ ( 1 , args - > length ( ) ) ; <nl> mmm a / src / full - codegen / mips64 / full - codegen - mips64 . cc <nl> ppp b / src / full - codegen / mips64 / full - codegen - mips64 . cc <nl> void FullCodeGenerator : : EmitTwoByteSeqStringSetChar ( CallRuntime * expr ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitSetValueOf ( CallRuntime * expr ) { <nl> - ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> - DCHECK ( args - > length ( ) = = 2 ) ; <nl> - <nl> - VisitForStackValue ( args - > at ( 0 ) ) ; / / Load the object . <nl> - VisitForAccumulatorValue ( args - > at ( 1 ) ) ; / / Load the value . <nl> - __ pop ( a1 ) ; / / v0 = value . a1 = object . <nl> - <nl> - Label done ; <nl> - / / If the object is a smi , return the value . <nl> - __ JumpIfSmi ( a1 , & done ) ; <nl> - <nl> - / / If the object is not a value type , return the value . <nl> - __ GetObjectType ( a1 , a2 , a2 ) ; <nl> - __ Branch ( & done , ne , a2 , Operand ( JS_VALUE_TYPE ) ) ; <nl> - <nl> - / / Store the value . <nl> - __ sd ( v0 , FieldMemOperand ( a1 , JSValue : : kValueOffset ) ) ; <nl> - / / Update the write barrier . Save the value as it will be <nl> - / / overwritten by the write barrier code and is needed afterward . <nl> - __ mov ( a2 , v0 ) ; <nl> - __ RecordWriteField ( <nl> - a1 , JSValue : : kValueOffset , a2 , a3 , kRAHasBeenSaved , kDontSaveFPRegs ) ; <nl> - <nl> - __ bind ( & done ) ; <nl> - context ( ) - > Plug ( v0 ) ; <nl> - } <nl> - <nl> - <nl> void FullCodeGenerator : : EmitToInteger ( CallRuntime * expr ) { <nl> ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> DCHECK_EQ ( 1 , args - > length ( ) ) ; <nl> mmm a / src / full - codegen / ppc / full - codegen - ppc . cc <nl> ppp b / src / full - codegen / ppc / full - codegen - ppc . cc <nl> void FullCodeGenerator : : EmitTwoByteSeqStringSetChar ( CallRuntime * expr ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitSetValueOf ( CallRuntime * expr ) { <nl> - ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> - DCHECK ( args - > length ( ) = = 2 ) ; <nl> - VisitForStackValue ( args - > at ( 0 ) ) ; / / Load the object . <nl> - VisitForAccumulatorValue ( args - > at ( 1 ) ) ; / / Load the value . <nl> - __ pop ( r4 ) ; / / r3 = value . r4 = object . <nl> - <nl> - Label done ; <nl> - / / If the object is a smi , return the value . <nl> - __ JumpIfSmi ( r4 , & done ) ; <nl> - <nl> - / / If the object is not a value type , return the value . <nl> - __ CompareObjectType ( r4 , r5 , r5 , JS_VALUE_TYPE ) ; <nl> - __ bne ( & done ) ; <nl> - <nl> - / / Store the value . <nl> - __ StoreP ( r3 , FieldMemOperand ( r4 , JSValue : : kValueOffset ) , r0 ) ; <nl> - / / Update the write barrier . Save the value as it will be <nl> - / / overwritten by the write barrier code and is needed afterward . <nl> - __ mr ( r5 , r3 ) ; <nl> - __ RecordWriteField ( r4 , JSValue : : kValueOffset , r5 , r6 , kLRHasBeenSaved , <nl> - kDontSaveFPRegs ) ; <nl> - <nl> - __ bind ( & done ) ; <nl> - context ( ) - > Plug ( r3 ) ; <nl> - } <nl> - <nl> - <nl> void FullCodeGenerator : : EmitToInteger ( CallRuntime * expr ) { <nl> ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> DCHECK_EQ ( 1 , args - > length ( ) ) ; <nl> mmm a / src / full - codegen / x64 / full - codegen - x64 . cc <nl> ppp b / src / full - codegen / x64 / full - codegen - x64 . cc <nl> void FullCodeGenerator : : EmitTwoByteSeqStringSetChar ( CallRuntime * expr ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitSetValueOf ( CallRuntime * expr ) { <nl> - ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> - DCHECK ( args - > length ( ) = = 2 ) ; <nl> - <nl> - VisitForStackValue ( args - > at ( 0 ) ) ; / / Load the object . <nl> - VisitForAccumulatorValue ( args - > at ( 1 ) ) ; / / Load the value . <nl> - __ Pop ( rbx ) ; / / rax = value . rbx = object . <nl> - <nl> - Label done ; <nl> - / / If the object is a smi , return the value . <nl> - __ JumpIfSmi ( rbx , & done ) ; <nl> - <nl> - / / If the object is not a value type , return the value . <nl> - __ CmpObjectType ( rbx , JS_VALUE_TYPE , rcx ) ; <nl> - __ j ( not_equal , & done ) ; <nl> - <nl> - / / Store the value . <nl> - __ movp ( FieldOperand ( rbx , JSValue : : kValueOffset ) , rax ) ; <nl> - / / Update the write barrier . Save the value as it will be <nl> - / / overwritten by the write barrier code and is needed afterward . <nl> - __ movp ( rdx , rax ) ; <nl> - __ RecordWriteField ( rbx , JSValue : : kValueOffset , rdx , rcx , kDontSaveFPRegs ) ; <nl> - <nl> - __ bind ( & done ) ; <nl> - context ( ) - > Plug ( rax ) ; <nl> - } <nl> - <nl> - <nl> void FullCodeGenerator : : EmitToInteger ( CallRuntime * expr ) { <nl> ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> DCHECK_EQ ( 1 , args - > length ( ) ) ; <nl> mmm a / src / full - codegen / x87 / full - codegen - x87 . cc <nl> ppp b / src / full - codegen / x87 / full - codegen - x87 . cc <nl> void FullCodeGenerator : : EmitTwoByteSeqStringSetChar ( CallRuntime * expr ) { <nl> } <nl> <nl> <nl> - void FullCodeGenerator : : EmitSetValueOf ( CallRuntime * expr ) { <nl> - ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> - DCHECK ( args - > length ( ) = = 2 ) ; <nl> - <nl> - VisitForStackValue ( args - > at ( 0 ) ) ; / / Load the object . <nl> - VisitForAccumulatorValue ( args - > at ( 1 ) ) ; / / Load the value . <nl> - __ pop ( ebx ) ; / / eax = value . ebx = object . <nl> - <nl> - Label done ; <nl> - / / If the object is a smi , return the value . <nl> - __ JumpIfSmi ( ebx , & done , Label : : kNear ) ; <nl> - <nl> - / / If the object is not a value type , return the value . <nl> - __ CmpObjectType ( ebx , JS_VALUE_TYPE , ecx ) ; <nl> - __ j ( not_equal , & done , Label : : kNear ) ; <nl> - <nl> - / / Store the value . <nl> - __ mov ( FieldOperand ( ebx , JSValue : : kValueOffset ) , eax ) ; <nl> - <nl> - / / Update the write barrier . Save the value as it will be <nl> - / / overwritten by the write barrier code and is needed afterward . <nl> - __ mov ( edx , eax ) ; <nl> - __ RecordWriteField ( ebx , JSValue : : kValueOffset , edx , ecx , kDontSaveFPRegs ) ; <nl> - <nl> - __ bind ( & done ) ; <nl> - context ( ) - > Plug ( eax ) ; <nl> - } <nl> - <nl> - <nl> void FullCodeGenerator : : EmitToInteger ( CallRuntime * expr ) { <nl> ZoneList < Expression * > * args = expr - > arguments ( ) ; <nl> DCHECK_EQ ( 1 , args - > length ( ) ) ; <nl> mmm a / src / js / v8natives . js <nl> ppp b / src / js / v8natives . js <nl> utils . InstallFunctions ( GlobalObject , DONT_ENUM , [ <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> / / Boolean <nl> <nl> - function BooleanConstructor ( x ) { <nl> - / / TODO ( bmeurer ) : Move this to toplevel . <nl> - " use strict " ; <nl> - if ( ! IS_UNDEFINED ( new . target ) ) { <nl> - % _SetValueOf ( this , TO_BOOLEAN ( x ) ) ; <nl> - } else { <nl> - return TO_BOOLEAN ( x ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> function BooleanToString ( ) { <nl> / / NOTE : Both Boolean objects and values can enter here as <nl> / / ' this ' . This is not as dictated by ECMA - 262 . <nl> function BooleanValueOf ( ) { <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> <nl> - % SetCode ( GlobalBoolean , BooleanConstructor ) ; <nl> % FunctionSetPrototype ( GlobalBoolean , new GlobalBoolean ( false ) ) ; <nl> % AddNamedProperty ( GlobalBoolean . prototype , " constructor " , GlobalBoolean , <nl> DONT_ENUM ) ; <nl> mmm a / src / runtime / runtime - object . cc <nl> ppp b / src / runtime / runtime - object . cc <nl> RUNTIME_FUNCTION ( Runtime_ValueOf ) { <nl> } <nl> <nl> <nl> - RUNTIME_FUNCTION ( Runtime_SetValueOf ) { <nl> - SealHandleScope shs ( isolate ) ; <nl> - DCHECK ( args . length ( ) = = 2 ) ; <nl> - CONVERT_ARG_CHECKED ( Object , obj , 0 ) ; <nl> - CONVERT_ARG_CHECKED ( Object , value , 1 ) ; <nl> - if ( ! obj - > IsJSValue ( ) ) return value ; <nl> - JSValue : : cast ( obj ) - > set_value ( value ) ; <nl> - return value ; <nl> - } <nl> - <nl> - <nl> - RUNTIME_FUNCTION ( Runtime_JSValueGetValue ) { <nl> - SealHandleScope shs ( isolate ) ; <nl> - DCHECK ( args . length ( ) = = 1 ) ; <nl> - CONVERT_ARG_CHECKED ( JSValue , obj , 0 ) ; <nl> - return JSValue : : cast ( obj ) - > value ( ) ; <nl> - } <nl> - <nl> - <nl> RUNTIME_FUNCTION ( Runtime_IsJSReceiver ) { <nl> SealHandleScope shs ( isolate ) ; <nl> DCHECK ( args . length ( ) = = 1 ) ; <nl> mmm a / src / runtime / runtime . h <nl> ppp b / src / runtime / runtime . h <nl> namespace internal { <nl> F ( GetDataProperty , 2 , 1 ) \ <nl> F ( HasFastPackedElements , 1 , 1 ) \ <nl> F ( ValueOf , 1 , 1 ) \ <nl> - F ( SetValueOf , 2 , 1 ) \ <nl> - F ( JSValueGetValue , 1 , 1 ) \ <nl> F ( IsJSReceiver , 1 , 1 ) \ <nl> F ( IsStrong , 1 , 1 ) \ <nl> F ( ClassOf , 1 , 1 ) \ <nl> mmm a / test / cctest / compiler / test - run - intrinsics . cc <nl> ppp b / test / cctest / compiler / test - run - intrinsics . cc <nl> TEST ( OneByteSeqStringSetChar ) { <nl> } <nl> <nl> <nl> - TEST ( SetValueOf ) { <nl> - FunctionTester T ( " ( function ( a , b ) { return % _SetValueOf ( a , b ) ; } ) " , flags ) ; <nl> - <nl> - T . CheckCall ( T . Val ( " a " ) , T . NewObject ( " ( new String ) " ) , T . Val ( " a " ) ) ; <nl> - T . CheckCall ( T . Val ( 123 ) , T . NewObject ( " ( new Number ) " ) , T . Val ( 123 ) ) ; <nl> - T . CheckCall ( T . Val ( " x " ) , T . undefined ( ) , T . Val ( " x " ) ) ; <nl> - } <nl> - <nl> - <nl> TEST ( StringAdd ) { <nl> FunctionTester T ( " ( function ( a , b ) { return % _StringAdd ( a , b ) ; } ) " , flags ) ; <nl> <nl> deleted file mode 100644 <nl> index 8c42c8a20ba . . 00000000000 <nl> mmm a / test / mjsunit / regress / setvalueof - deopt . js <nl> ppp / dev / null <nl> <nl> - / / Copyright 2014 the V8 project authors . All rights reserved . <nl> - / / Redistribution and use in source and binary forms , with or without <nl> - / / modification , are permitted provided that the following conditions are <nl> - / / met : <nl> - / / <nl> - / / * Redistributions of source code must retain the above copyright <nl> - / / notice , this list of conditions and the following disclaimer . <nl> - / / * Redistributions in binary form must reproduce the above <nl> - / / copyright notice , this list of conditions and the following <nl> - / / disclaimer in the documentation and / or other materials provided <nl> - / / with the distribution . <nl> - / / * Neither the name of Google Inc . nor the names of its <nl> - / / contributors may be used to endorse or promote products derived <nl> - / / from this software without specific prior written permission . <nl> - / / <nl> - / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> - / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> - / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> - / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> - / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> - / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> - / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> - / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> - / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> - / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> - / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> - <nl> - / / Flags : - - allow - natives - syntax <nl> - <nl> - function g ( x , y ) { <nl> - return y ; <nl> - } <nl> - <nl> - function f ( deopt ) { <nl> - return g ( % _SetValueOf ( 1 , 1 ) , deopt + 0 ) ; <nl> - } <nl> - <nl> - f ( 0 ) ; <nl> - f ( 0 ) ; <nl> - f ( 0 ) ; <nl> - % OptimizeFunctionOnNextCall ( f ) ; <nl> - assertEquals ( " result0 " , f ( " result " ) ) ; <nl> mmm a / test / unittests / compiler / js - intrinsic - lowering - unittest . cc <nl> ppp b / test / unittests / compiler / js - intrinsic - lowering - unittest . cc <nl> TEST_F ( JSIntrinsicLoweringTest , InlineIsJSReceiver ) { <nl> } <nl> <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / % _JSValueGetValue <nl> - <nl> - <nl> - TEST_F ( JSIntrinsicLoweringTest , InlineJSValueGetValue ) { <nl> - Node * const input = Parameter ( 0 ) ; <nl> - Node * const context = Parameter ( 1 ) ; <nl> - Node * const effect = graph ( ) - > start ( ) ; <nl> - Node * const control = graph ( ) - > start ( ) ; <nl> - Reduction const r = Reduce ( graph ( ) - > NewNode ( <nl> - javascript ( ) - > CallRuntime ( Runtime : : kInlineJSValueGetValue , 1 ) , input , <nl> - context , effect , control ) ) ; <nl> - ASSERT_TRUE ( r . Changed ( ) ) ; <nl> - EXPECT_THAT ( r . replacement ( ) , <nl> - IsLoadField ( AccessBuilder : : ForValue ( ) , input , effect , control ) ) ; <nl> - } <nl> - <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / % _MathFloor <nl> <nl>
[ builtins ] Move the Boolean constructor to C + + .
v8/v8
8f87c0acb7f241654cac9574b7ec61ef83a428da
2016-02-16T14:03:07Z
mmm a / xbmc / cores / VideoPlayer / VideoPlayerVideo . cpp <nl> ppp b / xbmc / cores / VideoPlayer / VideoPlayerVideo . cpp <nl> void CVideoPlayerVideo : : Process ( ) <nl> } <nl> <nl> m_renderManager . DiscardBuffer ( ) ; <nl> + FlushMessages ( ) ; <nl> } <nl> else if ( pMsg - > IsType ( CDVDMsg : : PLAYER_SETSPEED ) ) <nl> { <nl> void CVideoPlayerVideo : : Flush ( bool sync ) <nl> / * flush using message as this get ' s called from VideoPlayer thread * / <nl> / * and any demux packet that has been taken out of queue need to * / <nl> / * be disposed of before we flush * / <nl> - FlushMessages ( ) ; <nl> SendMessage ( new CDVDMsgBool ( CDVDMsg : : GENERAL_FLUSH , sync ) , 1 ) ; <nl> m_bAbortOutput = true ; <nl> } <nl>
[ VideoPlayer ] Ensure queue is not in intermediate state when flushing
xbmc/xbmc
d7b381b85aff8bb552076bb9736034f2fade97da
2019-03-11T08:00:38Z
mmm a / programs / bios - boot - tutorial / README . md <nl> ppp b / programs / bios - boot - tutorial / README . md <nl> The ` bios - boot - tutorial . py ` script simulates the EOSIO bios boot sequence . <nl> <nl> The script can be run with no arguments directly from the ` programs / bios - boot - tutorial ` directory . <nl> <nl> - ` ` ` <nl> - cd programs / bios - boot - tutorial <nl> + ` ` ` bash <nl> + $ cd programs / bios - boot - tutorial <nl> <nl> - . / bios - boot - tutorial . py <nl> + $ . / bios - boot - tutorial . py <nl> ` ` ` <nl> <nl> - See [ EOSIO Documentation Wiki , Tutorial - Bios Boot ] ( https : / / github . com / EOSIO / eos / wiki ) for additional information . <nl> + See [ EOSIO Documentation Wiki : Tutorial - Bios Boot ] ( https : / / github . com / EOSIO / eos / wiki / Tutorial - Bios - Boot - Sequence ) for additional information . <nl>
Link to wiki / Tutorial - Bios - Boot - Sequence directly
EOSIO/eos
4f0188ce808e0913f673629c593e7d53c2fb5da5
2018-05-19T10:26:26Z
mmm a / trunk / src / kernel / srs_kernel_error . hpp <nl> ppp b / trunk / src / kernel / srs_kernel_error . hpp <nl> CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE . <nl> # define ERROR_OpenSslSha256Final 2035 <nl> # define ERROR_OpenSslSha256EvpDigest 2036 <nl> # define ERROR_OpenSslSha256DigestSize 2037 <nl> + # define ERROR_OpenSslGetPeerPublicKey 2038 <nl> + # define ERROR_OpenSslComputeSharedKey 2039 <nl> / / <nl> / / system control message , <nl> / / not an error , but special control logic . <nl> mmm a / trunk / src / rtmp / srs_protocol_handshake . cpp <nl> ppp b / trunk / src / rtmp / srs_protocol_handshake . cpp <nl> namespace _srs_internal <nl> <nl> return ret ; <nl> } <nl> + int __openssl_compute_key ( DH * pdh , const char * peer_pub_key , int ppk_size , char * secret ) <nl> + { <nl> + int ret = ERROR_SUCCESS ; <nl> + <nl> + int32_t bits_count = 1024 ; <nl> + <nl> + / / 2 . generate the g , p , private / public key . <nl> + if ( ( ret = __openssl_initialize_dh ( pdh , bits_count ) ) ! = ERROR_SUCCESS ) { <nl> + return ret ; <nl> + } <nl> + <nl> + / / copy public key to bytes . <nl> + srs_assert ( BN_num_bytes ( pdh - > pub_key ) = = ppk_size ) ; <nl> + <nl> + BIGNUM * ppk = NULL ; <nl> + if ( ( ppk = BN_bin2bn ( ( const unsigned char * ) peer_pub_key , ppk_size , 0 ) ) = = NULL ) { <nl> + ret = ERROR_OpenSslGetPeerPublicKey ; <nl> + return ret ; <nl> + } <nl> + <nl> + / / if failed , donot return , do cleanup . <nl> + if ( DH_compute_key ( ( unsigned char * ) secret , ppk , pdh ) < 0 ) { <nl> + ret = ERROR_OpenSslComputeSharedKey ; <nl> + } <nl> + <nl> + if ( ppk ) { <nl> + BN_free ( ppk ) ; <nl> + } <nl> + <nl> + return ret ; <nl> + } <nl> void __openssl_free ( DH * pdh ) <nl> { <nl> if ( pdh ! = NULL ) { <nl>
add __openssl_compute_key to calc the shared key
ossrs/srs
d4c2aa1e8e084385df9822a5469c741c3fa703f6
2014-08-08T03:34:17Z
mmm a / src / app / crash / read_document . cpp <nl> ppp b / src / app / crash / read_document . cpp <nl> class Reader : public SubObjectsIO { <nl> return spr . release ( ) ; <nl> } <nl> <nl> + / / TODO could we use doc : : read_layer ( ) here ? <nl> Layer * readLayer ( std : : ifstream & s ) { <nl> LayerFlags flags = ( LayerFlags ) read32 ( s ) ; <nl> ObjectType type = ( ObjectType ) read16 ( s ) ; <nl> class Reader : public SubObjectsIO { <nl> lay - > setName ( name ) ; <nl> lay - > setFlags ( flags ) ; <nl> <nl> + / / Blend mode & opacity <nl> + lay - > setBlendMode ( ( BlendMode ) read16 ( s ) ) ; <nl> + lay - > setOpacity ( read8 ( s ) ) ; <nl> + <nl> / / Cels <nl> int ncels = read32 ( s ) ; <nl> for ( int i = 0 ; i < ncels ; + + i ) { <nl> mmm a / src / app / crash / write_document . cpp <nl> ppp b / src / app / crash / write_document . cpp <nl> class Writer { <nl> CelConstIterator it , begin = static_cast < const LayerImage * > ( lay ) - > getCelBegin ( ) ; <nl> CelConstIterator end = static_cast < const LayerImage * > ( lay ) - > getCelEnd ( ) ; <nl> <nl> + / / Blend mode & opacity <nl> + write16 ( s , ( int ) static_cast < const LayerImage * > ( lay ) - > blendMode ( ) ) ; <nl> + write8 ( s , static_cast < const LayerImage * > ( lay ) - > opacity ( ) ) ; <nl> + <nl> / / Cels <nl> write32 ( s , static_cast < const LayerImage * > ( lay ) - > getCelsCount ( ) ) ; <nl> for ( it = begin ; it ! = end ; + + it ) { <nl>
Save / restore opacity & blend mode correctly from recovery data
aseprite/aseprite
fe5d3236a3d33e7356e640c7d12c00fbd691212b
2017-04-13T19:09:29Z
mmm a / cmake / modules / FindBluray . cmake <nl> ppp b / cmake / modules / FindBluray . cmake <nl> <nl> # Bluray : : Bluray - The libbluray library <nl> <nl> if ( PKG_CONFIG_FOUND ) <nl> - pkg_check_modules ( PC_BLURAY libbluray > = 0 . 7 . 0 QUIET ) <nl> + pkg_check_modules ( PC_BLURAY libbluray > = 0 . 9 . 3 QUIET ) <nl> endif ( ) <nl> <nl> find_path ( BLURAY_INCLUDE_DIR libbluray / bluray . h <nl> mmm a / docs / README . linux <nl> ppp b / docs / README . linux <nl> external libraries enabled ) . <nl> Build - Depends : autoconf , automake , autopoint , autotools - dev , cmake , curl , <nl> default - jre , gawk , gperf , libao - dev , libasound2 - dev , <nl> libass - dev ( > = 0 . 9 . 8 ) , libavahi - client - dev , libavahi - common - dev , libbluetooth - dev , <nl> - libbluray - dev ( > = 0 . 7 . 0 ) , libbz2 - dev , libcap - dev , <nl> + libbluray - dev ( > = 0 . 9 . 3 ) , libbz2 - dev , libcap - dev , <nl> libcdio - dev , libcec - dev , libcurl4 - openssl - dev | libcurl4 - gnutls - dev | libcurl - dev , <nl> libcwiid - dev , libdbus - 1 - dev , libegl1 - mesa - dev , libfmt3 - dev , libfontconfig - dev , libfreetype6 - dev , <nl> libfribidi - dev , libgif - dev ( > = 4 . 1 . 6 ) , libgl1 - mesa - dev | libgl - dev , libglu1 - mesa - dev | libglu - dev , <nl> mmm a / docs / README . opensuse <nl> ppp b / docs / README . opensuse <nl> libunistring - devel <nl> Build - Depends : autoconf , automake , autopoint , autotools - dev , cmake , curl , <nl> default - jre , gawk , gperf , libao - dev , libasound2 - dev , <nl> libass - dev ( > = 0 . 9 . 8 ) , libavahi - client - dev , libavahi - common - dev , libbluetooth - dev , <nl> - libbluray - dev ( > = 0 . 7 . 0 ) , libbz2 - dev , libcap - dev , <nl> + libbluray - dev ( > = 0 . 9 . 3 ) , libbz2 - dev , libcap - dev , <nl> libcdio - dev , libcec - dev , libcurl4 - openssl - dev | libcurl4 - gnutls - dev | libcurl - dev , <nl> libcwiid - dev , libdbus - 1 - dev , libegl1 - mesa - dev , libfontconfig - dev , libfreetype6 - dev , <nl> libfribidi - dev , libgif - dev ( > = 4 . 1 . 6 ) , libgl1 - mesa - dev | libgl - dev , libglu1 - mesa - dev | libglu - dev , <nl> mmm a / lib / DllLibbluray . h <nl> ppp b / lib / DllLibbluray . h <nl> extern " C " <nl> } <nl> <nl> typedef int ( * read_blocks_f ) ( void * handle , void * buf , int lba , int num_blocks ) ; <nl> + typedef struct bd_dir_s * ( * open_dir_f ) ( void * handle , const char * rel_path ) ; <nl> + typedef struct bd_file_s * ( * open_file_f ) ( void * handle , const char * rel_path ) ; <nl> <nl> class DllLibblurayInterface <nl> { <nl> class DllLibblurayInterface <nl> virtual BLURAY * bd_open ( const char * device_path , const char * keyfile_path ) = 0 ; <nl> virtual int bd_open_disc ( BLURAY * bd , const char * device_path , const char * keyfile_path ) = 0 ; <nl> virtual int bd_open_stream ( BLURAY * bd , void * read_blocks_handle , read_blocks_f func ) = 0 ; <nl> + virtual int bd_open_files ( BLURAY * bd , void * handle , open_dir_f dir_func , open_file_f file_func ) = 0 ; <nl> virtual BLURAY * bd_init ( void ) = 0 ; <nl> virtual void bd_close ( BLURAY * bd ) = 0 ; <nl> virtual int64_t bd_seek ( BLURAY * bd , uint64_t pos ) = 0 ; <nl> class DllLibbluray : public DllDynamic , DllLibblurayInterface <nl> DEFINE_METHOD2 ( BLURAY * , bd_open , ( const char * p1 , const char * p2 ) ) <nl> DEFINE_METHOD3 ( int , bd_open_disc , ( BLURAY * p1 , const char * p2 , const char * p3 ) ) <nl> DEFINE_METHOD3 ( int , bd_open_stream , ( BLURAY * p1 , void * p2 , read_blocks_f p3 ) ) <nl> + DEFINE_METHOD4 ( int , bd_open_files , ( BLURAY * p1 , void * p2 , open_dir_f p3 , open_file_f p4 ) ) <nl> DEFINE_METHOD0 ( BLURAY * , bd_init ) <nl> DEFINE_METHOD1 ( void , bd_close , ( BLURAY * p1 ) ) <nl> DEFINE_METHOD2 ( int64_t , bd_seek , ( BLURAY * p1 , uint64_t p2 ) ) <nl> class DllLibbluray : public DllDynamic , DllLibblurayInterface <nl> RESOLVE_METHOD_RENAME ( bd_open , bd_open ) <nl> RESOLVE_METHOD ( bd_open_disc ) <nl> RESOLVE_METHOD ( bd_open_stream ) <nl> + RESOLVE_METHOD ( bd_open_files ) <nl> RESOLVE_METHOD ( bd_init ) <nl> RESOLVE_METHOD_RENAME ( bd_close , bd_close ) <nl> RESOLVE_METHOD_RENAME ( bd_seek , bd_seek ) <nl> class DllLibbluray : public DllDynamic , DllLibblurayInterface <nl> static int file_eof ( BD_FILE_H * file ) ; <nl> static int64_t file_read ( BD_FILE_H * file , uint8_t * buf , int64_t size ) ; <nl> static int64_t file_write ( BD_FILE_H * file , const uint8_t * buf , int64_t size ) ; <nl> - static BD_FILE_H * file_open ( const char * filename , const char * mode ) ; <nl> static void dir_close ( BD_DIR_H * dir ) ; <nl> static int dir_read ( BD_DIR_H * dir , BD_DIRENT * entry ) ; <nl> - static BD_DIR_H * dir_open ( const char * dirname ) ; <nl> + static BD_FILE_H * file_open ( void * handle , const char * rel_path ) ; <nl> + static BD_DIR_H * dir_open ( void * handle , const char * rel_path ) ; <nl> static void bluray_logger ( const char * msg ) ; <nl> } ; <nl> mmm a / xbmc / cores / VideoPlayer / DVDInputStreams / DVDInputStreamBluray . cpp <nl> ppp b / xbmc / cores / VideoPlayer / DVDInputStreams / DVDInputStreamBluray . cpp <nl> int64_t DllLibbluray : : file_write ( BD_FILE_H * file , const uint8_t * buf , int64_t si <nl> return static_cast < int64_t > ( static_cast < CFile * > ( file - > internal ) - > Write ( buf , static_cast < size_t > ( size ) ) ) ; <nl> } <nl> <nl> - BD_FILE_H * DllLibbluray : : file_open ( const char * filename , const char * mode ) <nl> - { <nl> - BD_FILE_H * file = new BD_FILE_H ; <nl> - <nl> - file - > close = file_close ; <nl> - file - > seek = file_seek ; <nl> - file - > read = file_read ; <nl> - file - > write = file_write ; <nl> - file - > tell = file_tell ; <nl> - file - > eof = file_eof ; <nl> - <nl> - CFile * fp = new CFile ( ) ; <nl> - if ( mode ! = nullptr & & StringUtils : : EqualsNoCase ( mode , " wb " ) & & fp - > OpenForWrite ( filename , true ) ) <nl> - { <nl> - file - > internal = ( void * ) fp ; <nl> - return file ; <nl> - } <nl> - else if ( fp - > Open ( filename ) ) <nl> - { <nl> - file - > internal = ( void * ) fp ; <nl> - return file ; <nl> - } <nl> - <nl> - CLog : : Log ( LOGDEBUG , " CDVDInputStreamBluray - Error opening file ! ( % s ) " , CURL : : GetRedacted ( filename ) . c_str ( ) ) ; <nl> - <nl> - delete fp ; <nl> - delete file ; <nl> - <nl> - return NULL ; <nl> - } <nl> - <nl> struct SDirState <nl> { <nl> SDirState ( ) <nl> void DllLibbluray : : dir_close ( BD_DIR_H * dir ) <nl> } <nl> } <nl> <nl> - <nl> int DllLibbluray : : dir_read ( BD_DIR_H * dir , BD_DIRENT * entry ) <nl> { <nl> SDirState * state = static_cast < SDirState * > ( dir - > internal ) ; <nl> int DllLibbluray : : dir_read ( BD_DIR_H * dir , BD_DIRENT * entry ) <nl> return 0 ; <nl> } <nl> <nl> - BD_DIR_H * DllLibbluray : : dir_open ( const char * dirname ) <nl> + BD_DIR_H * DllLibbluray : : dir_open ( void * handle , const char * rel_path ) <nl> { <nl> - CLog : : Log ( LOGDEBUG , " CDVDInputStreamBluray - Opening dir % s \ n " , dirname ) ; <nl> - SDirState * st = new SDirState ( ) ; <nl> + CLog : : Log ( LOGDEBUG , " CDVDInputStreamBluray - Opening dir % s \ n " , rel_path ) ; <nl> <nl> - std : : string strDirname ( dirname ) ; <nl> + std : : string strRelPath ( rel_path ) ; <nl> + std : : string * strBasePath = reinterpret_cast < std : : string * > ( handle ) ; <nl> + if ( ! strBasePath ) <nl> + { <nl> + CLog : : Log ( LOGDEBUG , " CDVDInputStreamBluray - Error opening dir , null handle ! " ) ; <nl> + return NULL ; <nl> + } <nl> <nl> - if ( ! CDirectory : : GetDirectory ( strDirname , st - > list ) ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " CDVDInputStreamBluray - Error opening dir ! ( % s ) \ n " , dirname ) ; <nl> - delete st ; <nl> - return NULL ; <nl> - } <nl> + std : : string strDirname = URIUtils : : AddFileToFolder ( * strBasePath , strRelPath ) ; <nl> + URIUtils : : RemoveSlashAtEnd ( strDirname ) ; <nl> <nl> - BD_DIR_H * dir = new BD_DIR_H ; <nl> - dir - > close = dir_close ; <nl> - dir - > read = dir_read ; <nl> - dir - > internal = ( void * ) st ; <nl> + SDirState * st = new SDirState ( ) ; <nl> + if ( ! CDirectory : : GetDirectory ( strDirname , st - > list ) ) <nl> + { <nl> + CLog : : Log ( LOGDEBUG , " CDVDInputStreamBluray - Error opening dir ! ( % s ) \ n " , rel_path ) ; <nl> + delete st ; <nl> + return NULL ; <nl> + } <nl> <nl> - return dir ; <nl> + BD_DIR_H * dir = new BD_DIR_H ; <nl> + dir - > close = DllLibbluray : : dir_close ; <nl> + dir - > read = DllLibbluray : : dir_read ; <nl> + dir - > internal = ( void * ) st ; <nl> + <nl> + return dir ; <nl> + } <nl> + BD_FILE_H * DllLibbluray : : file_open ( void * handle , const char * rel_path ) <nl> + { <nl> + <nl> + std : : string strRelPath ( rel_path ) ; <nl> + std : : string * strBasePath = reinterpret_cast < std : : string * > ( handle ) ; <nl> + if ( ! strBasePath ) <nl> + { <nl> + CLog : : Log ( LOGDEBUG , " CDVDInputStreamBluray - Error opening dir , null handle ! " ) ; <nl> + return NULL ; <nl> + } <nl> + <nl> + std : : string strFilename = URIUtils : : AddFileToFolder ( * strBasePath , strRelPath ) ; <nl> + <nl> + BD_FILE_H * file = new BD_FILE_H ; <nl> + <nl> + file - > close = DllLibbluray : : file_close ; <nl> + file - > seek = DllLibbluray : : file_seek ; <nl> + file - > read = DllLibbluray : : file_read ; <nl> + file - > write = DllLibbluray : : file_write ; <nl> + file - > tell = DllLibbluray : : file_tell ; <nl> + file - > eof = DllLibbluray : : file_eof ; <nl> + <nl> + CFile * fp = new CFile ( ) ; <nl> + if ( fp - > Open ( strFilename ) ) <nl> + { <nl> + file - > internal = ( void * ) fp ; <nl> + return file ; <nl> + } <nl> + <nl> + CLog : : Log ( LOGDEBUG , " CDVDInputStreamBluray - Error opening file ! ( % s ) " , CURL : : GetRedacted ( strFilename ) . c_str ( ) ) ; <nl> + <nl> + delete fp ; <nl> + delete file ; <nl> + <nl> + return NULL ; <nl> } <nl> <nl> void DllLibbluray : : bluray_logger ( const char * msg ) <nl> void bluray_overlay_argb_cb ( void * this_gen , const struct bd_argb_overlay_s * co <nl> # endif <nl> <nl> CDVDInputStreamBluray : : CDVDInputStreamBluray ( IVideoPlayer * player , const CFileItem & fileitem ) : <nl> - CDVDInputStream ( DVDSTREAM_TYPE_BLURAY , fileitem ) , m_pstream ( nullptr ) <nl> + CDVDInputStream ( DVDSTREAM_TYPE_BLURAY , fileitem ) , m_pstream ( nullptr ) , m_rootPath ( " " ) <nl> { <nl> m_title = NULL ; <nl> m_clip = ( uint32_t ) - 1 ; <nl> bool CDVDInputStreamBluray : : Open ( ) <nl> if ( ! m_dll ) <nl> return false ; <nl> <nl> - m_dll - > bd_register_dir ( DllLibbluray : : dir_open ) ; <nl> - m_dll - > bd_register_file ( DllLibbluray : : file_open ) ; <nl> m_dll - > bd_set_debug_handler ( DllLibbluray : : bluray_logger ) ; <nl> m_dll - > bd_set_debug_mask ( DBG_CRIT | DBG_BLURAY | DBG_NAV ) ; <nl> <nl> bool CDVDInputStreamBluray : : Open ( ) <nl> return false ; <nl> } <nl> } <nl> - else if ( ! m_dll - > bd_open_disc ( m_bd , root . c_str ( ) , NULL ) ) <nl> + else <nl> { <nl> - CLog : : Log ( LOGERROR , " CDVDInputStreamBluray : : Open - failed to open % s " , root . c_str ( ) ) ; <nl> - return false ; <nl> + m_rootPath = root ; <nl> + if ( ! m_dll - > bd_open_files ( m_bd , & m_rootPath , DllLibbluray : : dir_open , DllLibbluray : : file_open ) ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " CDVDInputStreamBluray : : Open - failed to open % s " , CURL : : GetRedacted ( root ) . c_str ( ) ) ; <nl> + return false ; <nl> + } <nl> } <nl> <nl> m_dll - > bd_get_event ( m_bd , NULL ) ; <nl> void CDVDInputStreamBluray : : Close ( ) <nl> m_bd = NULL ; <nl> m_title = NULL ; <nl> m_pstream . reset ( ) ; <nl> + m_rootPath . clear ( ) ; <nl> } <nl> <nl> void CDVDInputStreamBluray : : ProcessEvent ( ) { <nl> mmm a / xbmc / cores / VideoPlayer / DVDInputStreams / DVDInputStreamBluray . h <nl> ppp b / xbmc / cores / VideoPlayer / DVDInputStreams / DVDInputStreamBluray . h <nl> class CDVDInputStreamBluray <nl> bool OpenStream ( CFileItem & item ) ; <nl> void SetupPlayerSettings ( ) ; <nl> std : : unique_ptr < CDVDInputStreamFile > m_pstream ; <nl> + std : : string m_rootPath ; <nl> } ; <nl> mmm a / xbmc / filesystem / BlurayDirectory . cpp <nl> ppp b / xbmc / filesystem / BlurayDirectory . cpp <nl> bool CBlurayDirectory : : GetDirectory ( const CURL & url , CFileItemList & items ) <nl> return false ; <nl> } <nl> <nl> - m_dll - > bd_register_dir ( DllLibbluray : : dir_open ) ; <nl> - m_dll - > bd_register_file ( DllLibbluray : : file_open ) ; <nl> m_dll - > bd_set_debug_handler ( DllLibbluray : : bluray_logger ) ; <nl> m_dll - > bd_set_debug_mask ( DBG_CRIT | DBG_BLURAY | DBG_NAV ) ; <nl> <nl> - m_bd = m_dll - > bd_open ( root . c_str ( ) , NULL ) ; <nl> + m_bd = m_dll - > bd_init ( ) ; <nl> + std : : unique_ptr < std : : string > rootPath ( new std : : string ( root ) ) ; <nl> + m_dll - > bd_open_files ( m_bd , rootPath . get ( ) , DllLibbluray : : dir_open , DllLibbluray : : file_open ) ; <nl> <nl> if ( ! m_bd ) <nl> { <nl>
[ bluray ] Use bd_open_files . This makes BD - J menus from Blu - rays using a protocol like smb : / / functional .
xbmc/xbmc
25d755c0365b2699161bc303bd55afae895fc2ec
2017-07-07T07:02:53Z
mmm a / tensorflow / contrib / boosted_trees / lib / learner / batch / ordinal_split_handler . py <nl> ppp b / tensorflow / contrib / boosted_trees / lib / learner / batch / ordinal_split_handler . py <nl> def __init__ ( self , <nl> name = " StatsAccumulator / { } " . format ( self . _name ) ) <nl> # Allocate both stats accumulator and quantile accumulator on the same <nl> # device so that we can build splits with fewer RPCs . <nl> - with ops . colocate_with ( self . _stats_accumulator . resource ( ) ) : <nl> + with ops . colocate_with ( self . _stats_accumulator . resource_handle ) : <nl> self . _quantile_accumulator = quantile_ops . QuantileAccumulator ( <nl> init_stamp_token , <nl> epsilon = epsilon , <nl> def make_splits ( self , stamp_token , next_stamp_token , class_id ) : <nl> handler = make_dense_split_tensor <nl> <nl> are_splits_ready , partition_ids , gains , split_infos = ( <nl> - handler ( self . _quantile_accumulator . resource ( ) , <nl> - self . _stats_accumulator . resource ( ) , stamp_token , <nl> + handler ( self . _quantile_accumulator . resource_handle , <nl> + self . _stats_accumulator . resource_handle , stamp_token , <nl> next_stamp_token , self . _multiclass_strategy , class_id , <nl> self . _feature_column_group_id , self . _l1_regularization , <nl> self . _l2_regularization , self . _tree_complexity_regularization , <nl> def make_splits ( self , stamp_token , next_stamp_token , class_id ) : <nl> handler = make_sparse_split_tensor <nl> <nl> are_splits_ready , partition_ids , gains , split_infos = ( <nl> - handler ( self . _quantile_accumulator . resource ( ) , <nl> - self . _stats_accumulator . resource ( ) , stamp_token , <nl> + handler ( self . _quantile_accumulator . resource_handle , <nl> + self . _stats_accumulator . resource_handle , stamp_token , <nl> next_stamp_token , self . _multiclass_strategy , class_id , <nl> self . _feature_column_group_id , self . _l1_regularization , <nl> self . _l2_regularization , self . _tree_complexity_regularization , <nl> mmm a / tensorflow / contrib / boosted_trees / python / kernel_tests / stats_accumulator_ops_test . py <nl> ppp b / tensorflow / contrib / boosted_trees / python / kernel_tests / stats_accumulator_ops_test . py <nl> def testSimpleAcculumator ( self ) : <nl> stamp_token = 0 , <nl> gradient_shape = tensor_shape . scalar ( ) , <nl> hessian_shape = tensor_shape . scalar ( ) ) <nl> - with ops . control_dependencies ( [ accumulator . _create_op ] ) : <nl> + with ops . control_dependencies ( [ accumulator . initializer ] ) : <nl> op1 = accumulator . add ( <nl> stamp_token = 0 , <nl> partition_ids = [ 1 , 2 ] , <nl> def testMultidimensionalAcculumator ( self ) : <nl> stamp_token = 0 , <nl> gradient_shape = tensor_shape . scalar ( ) , <nl> hessian_shape = tensor_shape . scalar ( ) ) <nl> - with ops . control_dependencies ( [ accumulator . _create_op ] ) : <nl> + with ops . control_dependencies ( [ accumulator . initializer ] ) : <nl> op1 = accumulator . add ( <nl> stamp_token = 0 , <nl> partition_ids = [ 1 , 2 , 1 ] , <nl> def testDropStaleUpdate ( self ) : <nl> stamp_token = 0 , <nl> gradient_shape = tensor_shape . scalar ( ) , <nl> hessian_shape = tensor_shape . scalar ( ) ) <nl> - with ops . control_dependencies ( [ accumulator . _create_op ] ) : <nl> + with ops . control_dependencies ( [ accumulator . initializer ] ) : <nl> op1 = accumulator . add ( <nl> stamp_token = 0 , <nl> partition_ids = [ 1 , 2 ] , <nl> def testSerialize ( self ) : <nl> stamp_token = 0 , <nl> gradient_shape = tensor_shape . scalar ( ) , <nl> hessian_shape = tensor_shape . scalar ( ) ) <nl> - with ops . control_dependencies ( [ accumulator . _create_op ] ) : <nl> + with ops . control_dependencies ( [ accumulator . initializer ] ) : <nl> op1 = accumulator . add ( <nl> stamp_token = 0 , <nl> partition_ids = [ 1 , 2 ] , <nl> def testSerialize ( self ) : <nl> <nl> with ops . control_dependencies ( [ op1 ] ) : <nl> ( stamp_token , num_updates , partition_1 , feature_1 , grads_1 , <nl> - hessians_1 ) = accumulator . serialize ( ) <nl> + hessians_1 ) = accumulator . saveable . serialize ( ) <nl> # Make sure that the accumulator hasn ' t changed during serialization . <nl> with ops . control_dependencies ( [ stamp_token ] ) : <nl> num_updates_2 , partition_2 , feature_2 , grads_2 , hessians_2 = ( <nl> def testDeserialize ( self ) : <nl> stamp_token = 0 , <nl> gradient_shape = tensor_shape . scalar ( ) , <nl> hessian_shape = tensor_shape . scalar ( ) ) <nl> - with ops . control_dependencies ( [ accumulator . _create_op ] ) : <nl> + with ops . control_dependencies ( [ accumulator . initializer ] ) : <nl> # These will be deleted due to deserialize call . <nl> op1 = accumulator . add ( <nl> stamp_token = 0 , <nl> def testDeserialize ( self ) : <nl> <nl> with ops . control_dependencies ( [ op1 ] ) : <nl> deserialize = ( <nl> - accumulator . deserialize ( <nl> + accumulator . saveable . deserialize ( <nl> stamp_token = 2 , <nl> num_updates = 3 , <nl> partition_ids = [ 3 , 4 ] , <nl> def testSimpleAcculumator ( self ) : <nl> stamp_token = 0 , <nl> gradient_shape = tensor_shape . TensorShape ( [ 2 ] ) , <nl> hessian_shape = tensor_shape . TensorShape ( [ 2 , 2 ] ) ) <nl> - with ops . control_dependencies ( [ accumulator . _create_op ] ) : <nl> + with ops . control_dependencies ( [ accumulator . initializer ] ) : <nl> op1 = accumulator . add ( <nl> stamp_token = 0 , <nl> partition_ids = [ 1 , 2 ] , <nl> def testMultidimensionalAcculumator ( self ) : <nl> stamp_token = 0 , <nl> gradient_shape = tensor_shape . TensorShape ( [ 2 ] ) , <nl> hessian_shape = tensor_shape . TensorShape ( [ 2 , 2 ] ) ) <nl> - with ops . control_dependencies ( [ accumulator . _create_op ] ) : <nl> + with ops . control_dependencies ( [ accumulator . initializer ] ) : <nl> op1 = accumulator . add ( <nl> stamp_token = 0 , <nl> partition_ids = [ 1 , 2 ] , <nl> def testDropStaleUpdate ( self ) : <nl> stamp_token = 0 , <nl> gradient_shape = tensor_shape . TensorShape ( [ 2 ] ) , <nl> hessian_shape = tensor_shape . TensorShape ( [ 2 , 2 ] ) ) <nl> - with ops . control_dependencies ( [ accumulator . _create_op ] ) : <nl> + with ops . control_dependencies ( [ accumulator . initializer ] ) : <nl> op1 = accumulator . add ( <nl> stamp_token = 0 , <nl> partition_ids = [ 1 , 2 ] , <nl> def testSerialize ( self ) : <nl> stamp_token = 0 , <nl> gradient_shape = tensor_shape . TensorShape ( [ 2 ] ) , <nl> hessian_shape = tensor_shape . TensorShape ( [ 2 , 2 ] ) ) <nl> - with ops . control_dependencies ( [ accumulator . _create_op ] ) : <nl> + with ops . control_dependencies ( [ accumulator . initializer ] ) : <nl> op1 = accumulator . add ( <nl> stamp_token = 0 , <nl> partition_ids = [ 1 , 2 ] , <nl> def testSerialize ( self ) : <nl> <nl> with ops . control_dependencies ( [ op1 ] ) : <nl> ( stamp_token , num_updates_1 , partition_1 , feature_1 , grads_1 , <nl> - hessians_1 ) = accumulator . serialize ( ) <nl> + hessians_1 ) = accumulator . saveable . serialize ( ) <nl> # Make sure that the accumulator hasn ' t changed during serialization . <nl> with ops . control_dependencies ( [ stamp_token ] ) : <nl> num_updates_2 , partition_2 , feature_2 , grads_2 , hessians_2 = ( <nl> def testDeserialize ( self ) : <nl> stamp_token = 0 , <nl> gradient_shape = tensor_shape . TensorShape ( [ 2 ] ) , <nl> hessian_shape = tensor_shape . TensorShape ( [ 2 , 2 ] ) ) <nl> - with ops . control_dependencies ( [ accumulator . _create_op ] ) : <nl> + with ops . control_dependencies ( [ accumulator . initializer ] ) : <nl> # These will be deleted due to deserialize call . <nl> op1 = accumulator . add ( <nl> stamp_token = 0 , <nl> def testDeserialize ( self ) : <nl> 0 . 08 ] ] ] ) <nl> <nl> with ops . control_dependencies ( [ op1 ] ) : <nl> - deserialize = accumulator . deserialize ( <nl> + deserialize = accumulator . saveable . deserialize ( <nl> stamp_token = 2 , <nl> num_updates = 3 , <nl> partition_ids = [ 3 , 4 ] , <nl> mmm a / tensorflow / contrib / boosted_trees / python / ops / model_ops . py <nl> ppp b / tensorflow / contrib / boosted_trees / python / ops / model_ops . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> + import functools <nl> + <nl> # pylint : disable = unused - import <nl> from tensorflow . contrib . boosted_trees . python . ops import boosted_trees_ops_loader <nl> # pylint : enable = unused - import <nl> <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . ops import resources <nl> from tensorflow . python . training import saver <nl> + from tensorflow . python . training . checkpointable import tracking <nl> <nl> ops . NotDifferentiable ( " TreeEnsembleVariable " ) <nl> ops . NotDifferentiable ( " TreeEnsembleSerialize " ) <nl> def restore ( self , restored_tensors , unused_restored_shapes ) : <nl> tree_ensemble_config = restored_tensors [ 1 ] ) <nl> <nl> <nl> + class TreeEnsembleVariable ( tracking . TrackableResource ) : <nl> + " " " A Tree ensemble model . " " " <nl> + <nl> + def __init__ ( self , stamp_token , tree_ensemble_config , name , container = None ) : <nl> + self . _stamp_token = stamp_token <nl> + self . _tree_ensemble_config = tree_ensemble_config <nl> + self . _name = name <nl> + self . _container = container <nl> + self . _init_op = None <nl> + super ( TreeEnsembleVariable , self ) . __init__ ( ) <nl> + <nl> + def create_resource ( self ) : <nl> + return gen_model_ops . decision_tree_ensemble_resource_handle_op ( <nl> + self . _container , shared_name = self . _name , name = self . _name ) <nl> + <nl> + def initialize ( self ) : <nl> + return gen_model_ops . create_tree_ensemble_variable ( <nl> + self . resource_handle , self . _stamp_token , self . _tree_ensemble_config ) <nl> + <nl> + @ property <nl> + def initializer ( self ) : <nl> + if self . _init_op is None : <nl> + self . _init_op = self . initialize ( ) <nl> + return self . _init_op <nl> + <nl> + def is_initialized ( self ) : <nl> + return gen_model_ops . tree_ensemble_is_initialized_op ( self . resource_handle ) <nl> + <nl> + def _gather_saveables_for_checkpoint ( self ) : <nl> + return { <nl> + " tree_ensemble_variable " : <nl> + functools . partial ( <nl> + TreeEnsembleVariableSavable , <nl> + tree_ensemble_handle = self . resource_handle , <nl> + create_op = self . initializer ) <nl> + } <nl> + <nl> + <nl> def tree_ensemble_variable ( stamp_token , <nl> tree_ensemble_config , <nl> name , <nl> def tree_ensemble_variable ( stamp_token , <nl> A ` Tensor ` of type mutable ` string ` . The handle to the tree ensemble . <nl> " " " <nl> with ops . name_scope ( name , " TreeEnsembleVariable " ) as name : <nl> - resource_handle = gen_model_ops . decision_tree_ensemble_resource_handle_op ( <nl> - container , shared_name = name , name = name ) <nl> - create_op = gen_model_ops . create_tree_ensemble_variable ( <nl> - resource_handle , stamp_token , tree_ensemble_config ) <nl> - is_initialized_op = gen_model_ops . tree_ensemble_is_initialized_op ( <nl> - resource_handle ) <nl> + tree_ensemble_var = TreeEnsembleVariable ( stamp_token , tree_ensemble_config , <nl> + name , container ) <nl> + resource_handle = tree_ensemble_var . resource_handle <nl> + create_op = tree_ensemble_var . initializer <nl> + is_initialized_op = tree_ensemble_var . is_initialized ( ) <nl> # Adds the variable to the savable list . <nl> saveable = TreeEnsembleVariableSavable ( resource_handle , create_op , <nl> resource_handle . name ) <nl> mmm a / tensorflow / contrib / boosted_trees / python / ops / quantile_ops . py <nl> ppp b / tensorflow / contrib / boosted_trees / python / ops / quantile_ops . py <nl> <nl> from tensorflow . python . framework import sparse_tensor <nl> from tensorflow . python . ops import resources <nl> from tensorflow . python . training import saver <nl> + from tensorflow . python . training . checkpointable import tracking <nl> <nl> # Pattern to remove all non alpha numeric from a string . <nl> _PATTERN = re . compile ( r " [ \ W_ ] + " ) <nl> <nl> <nl> - class QuantileAccumulator ( saver . BaseSaverBuilder . SaveableObject ) : <nl> - " " " A resource that allows distributed quantile computation . " " " <nl> - <nl> - def __init__ ( self , <nl> - init_stamp_token , <nl> - epsilon , <nl> - num_quantiles , <nl> - max_elements = None , <nl> - name = None , <nl> - container = None , <nl> - generate_quantiles = False ) : <nl> - " " " Creates a QuantileAccumulator object . <nl> - <nl> - Args : <nl> - init_stamp_token : The initial value for the stamp token . <nl> - epsilon : Error bound on the quantile computation . <nl> - num_quantiles : Number of quantiles to produce from the final summary . <nl> - max_elements : Maximum number of elements added to the accumulator . <nl> - name : the name to save the accumulator under . <nl> - container : An optional ` string ` . Defaults to ` " " ` <nl> - generate_quantiles : Generate quantiles instead of approximate boundaries . <nl> - If true , exactly ` num_quantiles ` will be produced in the final summary . <nl> - " " " <nl> - self . _epsilon = epsilon <nl> - self . _generate_quantiles = generate_quantiles <nl> + class QuantileAccumulatorSaveable ( saver . BaseSaverBuilder . SaveableObject ) : <nl> + " " " SaveableObject implementation for QuantileAccumulator . " " " <nl> <nl> - name = _PATTERN . sub ( " " , name ) <nl> - with ops . name_scope ( name , " QuantileAccumulator " ) as name : <nl> - self . _quantile_accumulator_handle = ( <nl> - gen_quantile_ops . quantile_stream_resource_handle_op ( <nl> - container = container , shared_name = name , name = name ) ) <nl> - self . _create_op = gen_quantile_ops . create_quantile_accumulator ( <nl> - self . _quantile_accumulator_handle , <nl> - init_stamp_token , <nl> - epsilon = epsilon , <nl> - max_elements = max_elements , <nl> - num_quantiles = num_quantiles , <nl> - generate_quantiles = generate_quantiles ) <nl> - is_initialized_op = gen_quantile_ops . quantile_accumulator_is_initialized ( <nl> - self . _quantile_accumulator_handle ) <nl> - resources . register_resource ( self . _quantile_accumulator_handle , <nl> - self . _create_op , is_initialized_op ) <nl> - self . _make_savable ( name ) <nl> - <nl> - def _make_savable ( self , name ) : <nl> + def __init__ ( self , resource_handle , create_op , name ) : <nl> + self . _resource_handle = resource_handle <nl> + self . _create_op = create_op <nl> stamp_token , state , are_buckets_ready , buckets = ( <nl> - gen_quantile_ops . quantile_accumulator_serialize ( <nl> - self . _quantile_accumulator_handle ) ) <nl> + gen_quantile_ops . quantile_accumulator_serialize ( resource_handle ) ) <nl> # slice_spec is useful for saving a slice from a variable . <nl> # It ' s not meaningful in quantile accumulator . <nl> slice_spec = " " <nl> def make_save_spec ( tensor , suffix ) : <nl> specs + = [ make_save_spec ( state , " _state " ) ] <nl> specs + = [ make_save_spec ( are_buckets_ready , " _are_buckets_ready " ) ] <nl> specs + = [ make_save_spec ( buckets , " buckets " ) ] <nl> - super ( QuantileAccumulator , <nl> - self ) . __init__ ( self . _quantile_accumulator_handle , specs , name ) <nl> - ops . add_to_collection ( ops . GraphKeys . SAVEABLE_OBJECTS , self ) <nl> + super ( QuantileAccumulatorSaveable , self ) . __init__ ( self . _resource_handle , <nl> + specs , name ) <nl> <nl> def restore ( self , restored_tensors , unused_restored_shapes ) : <nl> " " " Restores the associated quantile accumulator from ' restored_tensors ' . <nl> def restore ( self , restored_tensors , unused_restored_shapes ) : <nl> buckets = restored_tensors [ 3 ] <nl> with ops . control_dependencies ( [ self . _create_op ] ) : <nl> return gen_quantile_ops . quantile_accumulator_deserialize ( <nl> - self . _quantile_accumulator_handle , <nl> + self . _resource_handle , <nl> stamp_token = stamp_token , <nl> stream_state = state , <nl> are_buckets_ready = are_buckets_ready , <nl> buckets = buckets ) <nl> <nl> + <nl> + class QuantileAccumulator ( tracking . TrackableResource ) : <nl> + " " " A resource that allows distributed quantile computation . " " " <nl> + <nl> + def __init__ ( self , <nl> + init_stamp_token , <nl> + epsilon , <nl> + num_quantiles , <nl> + max_elements = None , <nl> + name = None , <nl> + container = None , <nl> + generate_quantiles = False ) : <nl> + " " " Creates a QuantileAccumulator object . <nl> + <nl> + Args : <nl> + init_stamp_token : The initial value for the stamp token . <nl> + epsilon : Error bound on the quantile computation . <nl> + num_quantiles : Number of quantiles to produce from the final summary . <nl> + max_elements : Maximum number of elements added to the accumulator . <nl> + name : the name to save the accumulator under . <nl> + container : An optional ` string ` . Defaults to ` " " ` <nl> + generate_quantiles : Generate quantiles instead of approximate boundaries . <nl> + If true , exactly ` num_quantiles ` will be produced in the final summary . <nl> + " " " <nl> + self . _init_stamp_token = init_stamp_token <nl> + self . _epsilon = epsilon <nl> + self . _num_quantiles = num_quantiles <nl> + self . _max_elements = max_elements <nl> + self . _container = container <nl> + self . _generate_quantiles = generate_quantiles <nl> + super ( QuantileAccumulator , self ) . __init__ ( ) <nl> + <nl> + name = _PATTERN . sub ( " " , name ) <nl> + with ops . name_scope ( name , " QuantileAccumulator " ) as name : <nl> + self . _name = name <nl> + self . _resource_handle = self . create_resource ( ) <nl> + self . _init_op = self . initialize ( ) <nl> + is_initialized_op = self . is_initialized ( ) <nl> + resources . register_resource ( self . resource_handle , self . _init_op , <nl> + is_initialized_op ) <nl> + self . _saveable = QuantileAccumulatorSaveable ( self . resource_handle , <nl> + self . _init_op , name ) <nl> + ops . add_to_collection ( ops . GraphKeys . SAVEABLE_OBJECTS , self . _saveable ) <nl> + <nl> + def create_resource ( self ) : <nl> + return gen_quantile_ops . quantile_stream_resource_handle_op ( <nl> + container = self . _container , shared_name = self . _name , name = self . _name ) <nl> + <nl> + def initialize ( self ) : <nl> + return gen_quantile_ops . create_quantile_accumulator ( <nl> + self . resource_handle , <nl> + self . _init_stamp_token , <nl> + epsilon = self . _epsilon , <nl> + max_elements = self . _max_elements , <nl> + num_quantiles = self . _num_quantiles , <nl> + generate_quantiles = self . _generate_quantiles ) <nl> + <nl> + @ property <nl> + def initializer ( self ) : <nl> + if self . _init_op is None : <nl> + self . _init_op = self . initialize ( ) <nl> + return self . _init_op <nl> + <nl> + def is_initialized ( self ) : <nl> + return gen_quantile_ops . quantile_accumulator_is_initialized ( <nl> + self . resource_handle ) <nl> + <nl> + def _gather_saveables_for_checkpoint ( self ) : <nl> + return { " quantile_accumulator " , self . saveable } <nl> + <nl> def get_buckets ( self , stamp_token ) : <nl> " " " Returns quantile buckets created during previous flush . " " " <nl> are_buckets_ready , buckets = ( <nl> gen_quantile_ops . quantile_accumulator_get_buckets ( <nl> - quantile_accumulator_handles = [ self . _quantile_accumulator_handle ] , <nl> + quantile_accumulator_handles = [ self . resource_handle ] , <nl> stamp_token = stamp_token ) ) <nl> return are_buckets_ready [ 0 ] , buckets [ 0 ] <nl> <nl> def schedule_get_buckets ( self ) : <nl> " " " Returns a scheduled read of buckets created during previous flush . " " " <nl> return batch_ops_utils . ScheduledStampedResourceOp ( <nl> - resource_handle = self . _quantile_accumulator_handle , <nl> + resource_handle = self . resource_handle , <nl> op = gen_quantile_ops . quantile_accumulator_get_buckets ) <nl> <nl> def _make_summary ( self , column , example_weights ) : <nl> def add_summary ( self , stamp_token , column , example_weights ) : <nl> " " " Adds quantile summary to its stream in resource . " " " <nl> summary = self . _make_summary ( column , example_weights ) <nl> return gen_quantile_ops . quantile_accumulator_add_summaries ( <nl> - quantile_accumulator_handles = [ self . _quantile_accumulator_handle ] , <nl> + quantile_accumulator_handles = [ self . resource_handle ] , <nl> stamp_token = stamp_token , <nl> summaries = [ summary ] ) <nl> <nl> def add_prebuilt_summary ( self , stamp_token , summary ) : <nl> " " " Adds quantile summary to its stream in resource . " " " <nl> return gen_quantile_ops . quantile_accumulator_add_summaries ( <nl> - quantile_accumulator_handles = [ self . _quantile_accumulator_handle ] , <nl> + quantile_accumulator_handles = [ self . resource_handle ] , <nl> stamp_token = stamp_token , <nl> summaries = [ summary ] ) <nl> <nl> def schedule_add_summary ( self , stamp_token , column , example_weights ) : <nl> summary = self . _make_summary ( column , example_weights ) <nl> return batch_ops_utils . ScheduledStampedResourceOp ( <nl> op = gen_quantile_ops . quantile_accumulator_add_summaries , <nl> - resource_handle = self . _quantile_accumulator_handle , <nl> + resource_handle = self . resource_handle , <nl> summaries = summary ) <nl> <nl> def flush ( self , stamp_token , next_stamp_token ) : <nl> def flush ( self , stamp_token , next_stamp_token ) : <nl> The flush operation . <nl> " " " <nl> return gen_quantile_ops . quantile_accumulator_flush ( <nl> - quantile_accumulator_handle = self . _quantile_accumulator_handle , <nl> + quantile_accumulator_handle = self . resource_handle , <nl> stamp_token = stamp_token , <nl> next_stamp_token = next_stamp_token ) <nl> <nl> def flush_summary ( self , stamp_token , next_stamp_token ) : <nl> " " " Finalizes quantile summary stream and resets it for next iteration . " " " <nl> result = gen_quantile_ops . quantile_accumulator_flush_summary ( <nl> - quantile_accumulator_handle = self . _quantile_accumulator_handle , <nl> + quantile_accumulator_handle = self . resource_handle , <nl> stamp_token = stamp_token , <nl> next_stamp_token = next_stamp_token ) <nl> return result <nl> - <nl> - def resource ( self ) : <nl> - return self . _quantile_accumulator_handle <nl> mmm a / tensorflow / contrib / boosted_trees / python / ops / stats_accumulator_ops . py <nl> ppp b / tensorflow / contrib / boosted_trees / python / ops / stats_accumulator_ops . py <nl> <nl> from tensorflow . python . framework import tensor_shape <nl> from tensorflow . python . ops import resources <nl> from tensorflow . python . training import saver <nl> + from tensorflow . python . training . checkpointable import tracking <nl> <nl> # Pattern to remove all non alpha numeric from a string . <nl> _PATTERN = re . compile ( r " [ \ W_ ] + " ) <nl> <nl> <nl> - class StatsAccumulator ( saver . BaseSaverBuilder . SaveableObject ) : <nl> + class StatsAccumulatorSaveable ( saver . BaseSaverBuilder . SaveableObject ) : <nl> + " " " SaveableObject implementation for StatsAccumulator . " " " <nl> + <nl> + def __init__ ( self , resource_handle , create_op , is_scalar , name ) : <nl> + self . _create_op = create_op <nl> + self . _resource_handle = resource_handle <nl> + self . _is_scalar = is_scalar <nl> + slice_spec = " " <nl> + saver_name = self . _resource_handle . name <nl> + ( stamp_token , num_updates , partition_ids , feature_ids , gradients , <nl> + hessians ) = self . serialize ( ) <nl> + specs = [ <nl> + saver . BaseSaverBuilder . SaveSpec ( stamp_token , slice_spec , <nl> + saver_name + " _stamp " ) , <nl> + saver . BaseSaverBuilder . SaveSpec ( num_updates , slice_spec , <nl> + saver_name + " _num_updates " ) , <nl> + saver . BaseSaverBuilder . SaveSpec ( partition_ids , slice_spec , <nl> + saver_name + " _partition_ids " ) , <nl> + saver . BaseSaverBuilder . SaveSpec ( feature_ids , slice_spec , <nl> + saver_name + " _feature_ids " ) , <nl> + saver . BaseSaverBuilder . SaveSpec ( gradients , slice_spec , <nl> + saver_name + " _gradients " ) , <nl> + saver . BaseSaverBuilder . SaveSpec ( hessians , slice_spec , <nl> + saver_name + " hessians " ) , <nl> + ] <nl> + super ( StatsAccumulatorSaveable , self ) . __init__ ( self . _resource_handle , specs , <nl> + name ) <nl> + <nl> + def serialize ( self ) : <nl> + " " " Serializes the stats accumulator state . " " " <nl> + if self . _is_scalar : <nl> + return gen_stats_accumulator_ops . stats_accumulator_scalar_serialize ( <nl> + self . _resource_handle ) <nl> + else : <nl> + return gen_stats_accumulator_ops . stats_accumulator_tensor_serialize ( <nl> + self . _resource_handle ) <nl> + <nl> + def deserialize ( self , stamp_token , num_updates , partition_ids , feature_ids , <nl> + gradients , hessians ) : <nl> + " " " Resets the stats accumulator with the serialized state . " " " <nl> + if self . _is_scalar : <nl> + return gen_stats_accumulator_ops . stats_accumulator_scalar_deserialize ( <nl> + self . _resource_handle , stamp_token , num_updates , partition_ids , <nl> + feature_ids , gradients , hessians ) <nl> + else : <nl> + return gen_stats_accumulator_ops . stats_accumulator_tensor_deserialize ( <nl> + self . _resource_handle , stamp_token , num_updates , partition_ids , <nl> + feature_ids , gradients , hessians ) <nl> + <nl> + def restore ( self , restored_tensors , unused_restored_shapes ) : <nl> + " " " Restores the associated tree ensemble from ' restored_tensors ' . <nl> + <nl> + Args : <nl> + restored_tensors : the tensors that were loaded from a checkpoint . <nl> + unused_restored_shapes : the shapes this object should conform to after <nl> + restore . Not meaningful for trees . <nl> + <nl> + Returns : <nl> + The operation that restores the state of the tree ensemble variable . <nl> + " " " <nl> + with ops . control_dependencies ( [ self . _create_op ] ) : <nl> + return self . deserialize ( <nl> + stamp_token = restored_tensors [ 0 ] , <nl> + num_updates = restored_tensors [ 1 ] , <nl> + partition_ids = restored_tensors [ 2 ] , <nl> + feature_ids = restored_tensors [ 3 ] , <nl> + gradients = restored_tensors [ 4 ] , <nl> + hessians = restored_tensors [ 5 ] ) <nl> + <nl> + <nl> + class StatsAccumulator ( tracking . TrackableResource ) : <nl> " " " A resource that allows to accumulate gradients and hessians . <nl> <nl> For consistency guarantees , we use read and write stamp tokens . <nl> def __init__ ( self , <nl> Returns : <nl> A ` Tensor ` of type mutable ` string ` . The handle to the stats accumulator . <nl> " " " <nl> + self . _stamp_token = stamp_token <nl> + self . _gradient_shape = gradient_shape <nl> + self . _hessian_shape = hessian_shape <nl> + self . _container = container <nl> + <nl> + if ( gradient_shape = = tensor_shape . scalar ( ) and <nl> + hessian_shape = = tensor_shape . scalar ( ) ) : <nl> + self . _is_scalar = True <nl> + else : <nl> + self . _is_scalar = False <nl> + <nl> if name is not None : <nl> name = _PATTERN . sub ( " " , name ) <nl> with ops . name_scope ( name , " StatsAccumulator " ) as name : <nl> - # Both values are scalars . <nl> - if ( gradient_shape = = tensor_shape . scalar ( ) and <nl> - hessian_shape = = tensor_shape . scalar ( ) ) : <nl> - self . _is_scalar = True <nl> - self . _resource_handle = ( gen_stats_accumulator_ops . <nl> - stats_accumulator_scalar_resource_handle_op ( <nl> - container , name , name = name ) ) <nl> - <nl> - create_op = gen_stats_accumulator_ops . create_stats_accumulator_scalar ( <nl> - self . _resource_handle , stamp_token ) <nl> - is_initialized_op = ( <nl> - gen_stats_accumulator_ops . stats_accumulator_scalar_is_initialized ( <nl> - self . _resource_handle ) ) <nl> - else : <nl> - self . _is_scalar = False <nl> - self . _resource_handle = ( gen_stats_accumulator_ops . <nl> - stats_accumulator_tensor_resource_handle_op ( <nl> - container , name , name = name ) ) <nl> - create_op = gen_stats_accumulator_ops . create_stats_accumulator_tensor ( <nl> - self . _resource_handle , stamp_token , gradient_shape . as_list ( ) , <nl> - hessian_shape . as_list ( ) ) <nl> - is_initialized_op = ( <nl> - gen_stats_accumulator_ops . stats_accumulator_tensor_is_initialized ( <nl> - self . _resource_handle ) ) <nl> + self . _name = name <nl> + self . _resource_handle = self . create_resource ( ) <nl> + self . _init_op = self . initialize ( ) <nl> + is_initialized_op = self . is_initialized ( ) <nl> + resources . register_resource ( self . resource_handle , self . initializer , <nl> + is_initialized_op ) <nl> + self . _saveable = StatsAccumulatorSaveable ( <nl> + self . resource_handle , self . initializer , self . _is_scalar , name ) <nl> + ops . add_to_collection ( ops . GraphKeys . SAVEABLE_OBJECTS , self . _saveable ) <nl> <nl> - self . _create_op = create_op <nl> - slice_spec = " " <nl> - saver_name = self . _resource_handle . name <nl> - ( stamp_token , num_updates , partition_ids , feature_ids , gradients , <nl> - hessians ) = self . serialize ( ) <nl> - specs = [ <nl> - saver . BaseSaverBuilder . SaveSpec ( stamp_token , slice_spec , <nl> - saver_name + " _stamp " ) , <nl> - saver . BaseSaverBuilder . SaveSpec ( num_updates , slice_spec , <nl> - saver_name + " _num_updates " ) , <nl> - saver . BaseSaverBuilder . SaveSpec ( partition_ids , slice_spec , <nl> - saver_name + " _partition_ids " ) , <nl> - saver . BaseSaverBuilder . SaveSpec ( feature_ids , slice_spec , <nl> - saver_name + " _feature_ids " ) , <nl> - saver . BaseSaverBuilder . SaveSpec ( gradients , slice_spec , <nl> - saver_name + " _gradients " ) , <nl> - saver . BaseSaverBuilder . SaveSpec ( hessians , slice_spec , <nl> - saver_name + " hessians " ) , <nl> - ] <nl> + def create_resource ( self ) : <nl> + if self . _is_scalar : <nl> + return ( <nl> + gen_stats_accumulator_ops . stats_accumulator_scalar_resource_handle_op ( <nl> + self . _container , self . _name , name = self . _name ) ) <nl> + else : <nl> + return ( <nl> + gen_stats_accumulator_ops . stats_accumulator_tensor_resource_handle_op ( <nl> + self . _container , self . _name , name = self . _name ) ) <nl> <nl> - super ( StatsAccumulator , self ) . __init__ ( self . _resource_handle , specs , name ) <nl> - resources . register_resource ( self . _resource_handle , create_op , <nl> - is_initialized_op ) <nl> - ops . add_to_collection ( ops . GraphKeys . SAVEABLE_OBJECTS , self ) <nl> + def initialize ( self ) : <nl> + if self . _is_scalar : <nl> + return gen_stats_accumulator_ops . create_stats_accumulator_scalar ( <nl> + self . resource_handle , self . _stamp_token ) <nl> + else : <nl> + return gen_stats_accumulator_ops . create_stats_accumulator_tensor ( <nl> + self . resource_handle , self . _stamp_token , <nl> + self . _gradient_shape . as_list ( ) , self . _hessian_shape . as_list ( ) ) <nl> + <nl> + @ property <nl> + def initializer ( self ) : <nl> + if self . _init_op is None : <nl> + self . _init_op = self . initialize ( ) <nl> + return self . _init_op <nl> + <nl> + def is_initialized ( self ) : <nl> + if self . _is_scalar : <nl> + return gen_stats_accumulator_ops . stats_accumulator_scalar_is_initialized ( <nl> + self . resource_handle ) <nl> + else : <nl> + return gen_stats_accumulator_ops . stats_accumulator_tensor_is_initialized ( <nl> + self . resource_handle ) <nl> + <nl> + @ property <nl> + def saveable ( self ) : <nl> + return self . _saveable <nl> + <nl> + def _gather_saveables_for_checkpoint ( self ) : <nl> + return { " stats_accumulator " , self . saveable } <nl> <nl> def add ( self , stamp_token , partition_ids , feature_ids , gradients , hessians ) : <nl> " " " Updates the stats accumulator . " " " <nl> def add ( self , stamp_token , partition_ids , feature_ids , gradients , hessians ) : <nl> partition_ids , feature_ids , gradients , hessians ) ) <nl> if self . _is_scalar : <nl> return gen_stats_accumulator_ops . stats_accumulator_scalar_add ( <nl> - [ self . _resource_handle ] , stamp_token , [ partition_ids ] , [ feature_ids ] , <nl> + [ self . resource_handle ] , stamp_token , [ partition_ids ] , [ feature_ids ] , <nl> [ gradients ] , [ hessians ] ) <nl> else : <nl> return gen_stats_accumulator_ops . stats_accumulator_tensor_add ( <nl> - [ self . _resource_handle ] , stamp_token , [ partition_ids ] , [ feature_ids ] , <nl> + [ self . resource_handle ] , stamp_token , [ partition_ids ] , [ feature_ids ] , <nl> [ gradients ] , [ hessians ] ) <nl> <nl> def schedule_add ( self , partition_ids , feature_ids , gradients , hessians ) : <nl> def schedule_add ( self , partition_ids , feature_ids , gradients , hessians ) : <nl> if self . _is_scalar : <nl> return batch_ops_utils . ScheduledStampedResourceOp ( <nl> op = gen_stats_accumulator_ops . stats_accumulator_scalar_add , <nl> - resource_handle = self . _resource_handle , <nl> + resource_handle = self . resource_handle , <nl> partition_ids = partition_ids , <nl> feature_ids = feature_ids , <nl> gradients = gradients , <nl> def schedule_add ( self , partition_ids , feature_ids , gradients , hessians ) : <nl> else : <nl> return batch_ops_utils . ScheduledStampedResourceOp ( <nl> op = gen_stats_accumulator_ops . stats_accumulator_tensor_add , <nl> - resource_handle = self . _resource_handle , <nl> + resource_handle = self . resource_handle , <nl> partition_ids = partition_ids , <nl> feature_ids = feature_ids , <nl> gradients = gradients , <nl> def _make_summary ( self , partition_ids , feature_ids , gradients , hessians ) : <nl> return gen_stats_accumulator_ops . stats_accumulator_tensor_make_summary ( <nl> partition_ids , feature_ids , gradients , hessians ) <nl> <nl> - def deserialize ( self , stamp_token , num_updates , partition_ids , feature_ids , <nl> - gradients , hessians ) : <nl> - " " " Resets the stats accumulator with the serialized state . " " " <nl> - if self . _is_scalar : <nl> - return gen_stats_accumulator_ops . stats_accumulator_scalar_deserialize ( <nl> - self . _resource_handle , stamp_token , num_updates , partition_ids , <nl> - feature_ids , gradients , hessians ) <nl> - else : <nl> - return gen_stats_accumulator_ops . stats_accumulator_tensor_deserialize ( <nl> - self . _resource_handle , stamp_token , num_updates , partition_ids , <nl> - feature_ids , gradients , hessians ) <nl> - <nl> def flush ( self , stamp_token , next_stamp_token ) : <nl> " " " Flushes the stats accumulator . " " " <nl> if self . _is_scalar : <nl> return gen_stats_accumulator_ops . stats_accumulator_scalar_flush ( <nl> - self . _resource_handle , stamp_token , next_stamp_token ) <nl> + self . resource_handle , stamp_token , next_stamp_token ) <nl> else : <nl> return gen_stats_accumulator_ops . stats_accumulator_tensor_flush ( <nl> - self . _resource_handle , stamp_token , next_stamp_token ) <nl> - <nl> - def serialize ( self ) : <nl> - " " " Serializes the stats accumulator state . " " " <nl> - if self . _is_scalar : <nl> - return gen_stats_accumulator_ops . stats_accumulator_scalar_serialize ( <nl> - self . _resource_handle ) <nl> - else : <nl> - return gen_stats_accumulator_ops . stats_accumulator_tensor_serialize ( <nl> - self . _resource_handle ) <nl> - <nl> - def restore ( self , restored_tensors , unused_restored_shapes ) : <nl> - " " " Restores the associated tree ensemble from ' restored_tensors ' . <nl> - <nl> - Args : <nl> - restored_tensors : the tensors that were loaded from a checkpoint . <nl> - unused_restored_shapes : the shapes this object should conform to after <nl> - restore . Not meaningful for trees . <nl> - <nl> - Returns : <nl> - The operation that restores the state of the tree ensemble variable . <nl> - " " " <nl> - with ops . control_dependencies ( [ self . _create_op ] ) : <nl> - return self . deserialize ( <nl> - stamp_token = restored_tensors [ 0 ] , <nl> - num_updates = restored_tensors [ 1 ] , <nl> - partition_ids = restored_tensors [ 2 ] , <nl> - feature_ids = restored_tensors [ 3 ] , <nl> - gradients = restored_tensors [ 4 ] , <nl> - hessians = restored_tensors [ 5 ] ) <nl> - <nl> - def resource ( self ) : <nl> - return self . _resource_handle <nl> + self . resource_handle , stamp_token , next_stamp_token ) <nl> mmm a / tensorflow / contrib / boosted_trees / python / training / functions / gbdt_batch . py <nl> ppp b / tensorflow / contrib / boosted_trees / python / training / functions / gbdt_batch . py <nl> def increment_step_counter_and_maybe_update_ensemble ( self , predictions_dict , <nl> <nl> # Get accumulated steps and examples for the current layer . <nl> _ , _ , _ , _ , acc_examples , acc_steps = ( <nl> - steps_accumulator . serialize ( ) ) <nl> + steps_accumulator . saveable . serialize ( ) ) <nl> acc_examples = math_ops . cast ( acc_examples [ 0 ] , dtypes . int64 ) <nl> acc_steps = math_ops . cast ( acc_steps [ 0 ] , dtypes . int64 ) <nl> ensemble_update_ops . append ( <nl> mmm a / tensorflow / python / ops / boosted_trees_ops . py <nl> ppp b / tensorflow / python / ops / boosted_trees_ops . py <nl> <nl> # pylint : enable = unused - import <nl> <nl> from tensorflow . python . training import saver <nl> + from tensorflow . python . training . checkpointable import tracking <nl> <nl> <nl> class PruningMode ( object ) : <nl> def restore ( self , restored_tensors , unused_restored_shapes ) : <nl> tree_ensemble_serialized = restored_tensors [ 1 ] ) <nl> <nl> <nl> - class TreeEnsemble ( object ) : <nl> + class TreeEnsemble ( tracking . TrackableResource ) : <nl> " " " Creates TreeEnsemble resource . " " " <nl> <nl> def __init__ ( self , name , stamp_token = 0 , is_local = False , serialized_proto = ' ' ) : <nl> + self . _stamp_token = stamp_token <nl> + self . _serialized_proto = serialized_proto <nl> + self . _is_local = is_local <nl> with ops . name_scope ( name , ' TreeEnsemble ' ) as name : <nl> - self . _resource_handle = ( <nl> - gen_boosted_trees_ops . boosted_trees_ensemble_resource_handle_op ( <nl> - container = ' ' , shared_name = name , name = name ) ) <nl> - create_op = gen_boosted_trees_ops . boosted_trees_create_ensemble ( <nl> - self . resource_handle , <nl> - stamp_token , <nl> - tree_ensemble_serialized = serialized_proto ) <nl> - is_initialized_op = ( <nl> - gen_boosted_trees_ops . is_boosted_trees_ensemble_initialized ( <nl> - self . _resource_handle ) ) <nl> + self . _name = name <nl> + self . _resource_handle = self . create_resource ( ) <nl> + self . _init_op = self . initialize ( ) <nl> + is_initialized_op = self . is_initialized ( ) <nl> # Adds the variable to the savable list . <nl> if not is_local : <nl> - saveable = _TreeEnsembleSavable ( self . resource_handle , create_op , <nl> - self . resource_handle . name ) <nl> - ops . add_to_collection ( ops . GraphKeys . SAVEABLE_OBJECTS , saveable ) <nl> + self . _saveable = _TreeEnsembleSavable ( <nl> + self . resource_handle , self . initializer , self . resource_handle . name ) <nl> + ops . add_to_collection ( ops . GraphKeys . SAVEABLE_OBJECTS , self . _saveable ) <nl> resources . register_resource ( <nl> self . resource_handle , <nl> - create_op , <nl> + self . initializer , <nl> is_initialized_op , <nl> is_shared = not is_local ) <nl> <nl> + def create_resource ( self ) : <nl> + return gen_boosted_trees_ops . boosted_trees_ensemble_resource_handle_op ( <nl> + container = ' ' , shared_name = self . _name , name = self . _name ) <nl> + <nl> + def initialize ( self ) : <nl> + return gen_boosted_trees_ops . boosted_trees_create_ensemble ( <nl> + self . resource_handle , <nl> + self . _stamp_token , <nl> + tree_ensemble_serialized = self . _serialized_proto ) <nl> + <nl> @ property <nl> - def resource_handle ( self ) : <nl> - return self . _resource_handle <nl> + def initializer ( self ) : <nl> + if self . _init_op is None : <nl> + self . _init_op = self . initialize ( ) <nl> + return self . _init_op <nl> + <nl> + def is_initialized ( self ) : <nl> + return gen_boosted_trees_ops . is_boosted_trees_ensemble_initialized ( <nl> + self . resource_handle ) <nl> + <nl> + def _gather_saveables_for_checkpoint ( self ) : <nl> + if not self . _is_local : <nl> + return { ' tree_ensemble ' : self . _saveable } <nl> <nl> def get_stamp_token ( self ) : <nl> " " " Returns the current stamp token of the resource . " " " <nl>
Making all the boosted tree resources subclass TrackableResource . This is needed for tracking resources in the absence of collection as well as help out with saving / loading in TF 2 . 0 land .
tensorflow/tensorflow
6c776aa01e48313c1c3322c2f40de24355489172
2018-11-06T00:47:17Z
mmm a / . bazelrc <nl> ppp b / . bazelrc <nl> build : clang - - action_env = BAZEL_COMPILER = clang <nl> build : clang - - linkopt = - fuse - ld = lld <nl> <nl> # Basic ASAN / UBSAN that works for gcc <nl> + build : asan - - action_env = ENVOY_ASAN = 1 <nl> build : asan - - config = sanitizer <nl> # ASAN install its signal handler , disable ours so the stacktrace will be printed by ASAN <nl> build : asan - - define signal_trace = disabled <nl> build : macos - asan - - copt - DGRPC_BAZEL_BUILD <nl> build : macos - asan - - dynamic_mode = off <nl> <nl> # Clang TSAN <nl> + build : clang - tsan - - action_env = ENVOY_TSAN = 1 <nl> build : clang - tsan - - config = sanitizer <nl> build : clang - tsan - - define ENVOY_CONFIG_TSAN = 1 <nl> build : clang - tsan - - copt - fsanitize = thread <nl> build : clang - tsan - - copt - DEVENT__DISABLE_DEBUG_MODE <nl> <nl> # Clang MSAN - broken today since we need to rebuild lib [ std ] c + + and external deps with MSAN <nl> # support ( see https : / / github . com / envoyproxy / envoy / issues / 443 ) . <nl> + build : clang - msan - - action_env = ENVOY_MSAN = 1 <nl> build : clang - msan - - config = sanitizer <nl> build : clang - msan - - define ENVOY_CONFIG_MSAN = 1 <nl> build : clang - msan - - copt - fsanitize = memory <nl> build : clang - msan - - copt - fsanitize - memory - track - origins = 2 <nl> # and update stats_integration_test with appropriate m_per_cluster value <nl> build : libc + + - - config = clang <nl> build : libc + + - - action_env = CXXFLAGS = - stdlib = libc + + <nl> + build : libc + + - - action_env = LDFLAGS = - stdlib = libc + + <nl> build : libc + + - - action_env = BAZEL_CXXOPTS = - stdlib = libc + + <nl> build : libc + + - - action_env = BAZEL_LINKLIBS = - l % : libc + + . a : - l % : libc + + abi . a : - lm <nl> build : libc + + - - define force_libcpp = enabled <nl> build : rbe - toolchain - clang - libc + + - - config = rbe - toolchain <nl> build : rbe - toolchain - clang - libc + + - - crosstool_top = @ rbe_ubuntu_clang_libcxx / / cc : toolchain <nl> build : rbe - toolchain - clang - libc + + - - extra_toolchains = @ rbe_ubuntu_clang_libcxx / / config : cc - toolchain <nl> build : rbe - toolchain - clang - libc + + - - action_env = CC = clang - - action_env = CXX = clang + + - - action_env = PATH = / usr / sbin : / usr / bin : / sbin : / bin : / opt / llvm / bin <nl> + build : rbe - toolchain - clang - libc + + - - action_env = CXXFLAGS = - stdlib = libc + + <nl> + build : rbe - toolchain - clang - libc + + - - action_env = LDFLAGS = - stdlib = libc + + <nl> build : rbe - toolchain - clang - libc + + - - define force_libcpp = enabled <nl> <nl> build : rbe - toolchain - gcc - - config = rbe - toolchain <nl> new file mode 100644 <nl> index 00000000000 . . 4794b9e8f6f <nl> mmm / dev / null <nl> ppp b / bazel / external / wee8 . BUILD <nl> <nl> + licenses ( [ " notice " ] ) # Apache 2 <nl> + <nl> + load ( " : genrule_cmd . bzl " , " genrule_cmd " ) <nl> + <nl> + cc_library ( <nl> + name = " wee8 " , <nl> + srcs = [ <nl> + " libwee8 . a " , <nl> + ] , <nl> + hdrs = [ <nl> + " wee8 / third_party / wasm - api / wasm . hh " , <nl> + ] , <nl> + includes = [ " wee8 / third_party " ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " build " , <nl> + srcs = glob ( [ " wee8 / * * " ] ) , <nl> + outs = [ <nl> + " libwee8 . a " , <nl> + ] , <nl> + cmd = genrule_cmd ( " @ envoy / / bazel / external : wee8 . genrule_cmd " ) , <nl> + ) <nl> new file mode 100644 <nl> index 00000000000 . . 5a37d85b04d <nl> mmm / dev / null <nl> ppp b / bazel / external / wee8 . genrule_cmd <nl> <nl> + # ! / bin / bash <nl> + <nl> + set - e <nl> + <nl> + # This works only on Linux - x86_64 and macOS - x86_64 . <nl> + if [ [ ( ` uname ` ! = " Linux " & & ` uname ` ! = " Darwin " ) | | ` uname - m ` ! = " x86_64 " ] ] ; then <nl> + echo " ERROR : wee8 is currently supported only on Linux - x86_64 and macOS - x86_64 . " <nl> + exit 1 <nl> + fi <nl> + <nl> + # Bazel magic . <nl> + ROOT = $ $ ( dirname $ ( rootpath wee8 / BUILD . gn ) ) / . . <nl> + pushd $ $ ROOT / wee8 <nl> + <nl> + # Clean after previous build . <nl> + rm - rf out / wee8 <nl> + <nl> + # Export compiler configuration . <nl> + if [ [ ( ` uname ` = = " Darwin " & & $ $ { CXX - } = = " " ) | | $ $ { CXX - } = = * " clang " * ] ] ; then <nl> + export IS_CLANG = true <nl> + export CC = $ $ { CC : - clang } <nl> + export CXX = $ $ { CXX : - clang + + } <nl> + else <nl> + export IS_CLANG = false <nl> + export CC = $ $ { CC : - gcc } <nl> + export CXX = $ $ { CXX : - g + + } <nl> + fi <nl> + <nl> + export AR = $ $ { AR : - ar } <nl> + export NM = $ $ { NM : - nm } <nl> + <nl> + # Hook sanitizers . <nl> + if [ [ $ $ { ENVOY_ASAN - } = = " 1 " ] ] ; then <nl> + WEE8_BUILD_ARGS + = " is_asan = true " <nl> + WEE8_BUILD_ARGS + = " is_lsan = true " <nl> + fi <nl> + if [ [ $ $ { ENVOY_MSAN - } = = " 1 " ] ] ; then <nl> + WEE8_BUILD_ARGS + = " is_msan = true " <nl> + fi <nl> + if [ [ $ $ { ENVOY_TSAN - } = = " 1 " ] ] ; then <nl> + WEE8_BUILD_ARGS + = " is_tsan = true " <nl> + fi <nl> + <nl> + # Release build . <nl> + WEE8_BUILD_ARGS + = " is_debug = false " <nl> + # Clang or not Clang , that is the question . <nl> + WEE8_BUILD_ARGS + = " is_clang = $ $ IS_CLANG " <nl> + # Hack to disable bleeding - edge compiler flags . <nl> + WEE8_BUILD_ARGS + = " use_xcode_clang = true " <nl> + # Use local toolchain . <nl> + WEE8_BUILD_ARGS + = " custom_toolchain = \ " / / build / toolchain / linux / unbundle : default \ " " <nl> + # Use local stdlibc + + / libc + + . <nl> + WEE8_BUILD_ARGS + = " use_custom_libcxx = false " <nl> + # Use local sysroot . <nl> + WEE8_BUILD_ARGS + = " use_sysroot = false " <nl> + # Disable unused GLib2 dependency . <nl> + WEE8_BUILD_ARGS + = " use_glib = false " <nl> + # Expose debug symbols . <nl> + WEE8_BUILD_ARGS + = " v8_expose_symbols = true " <nl> + # Build monolithic library . <nl> + WEE8_BUILD_ARGS + = " is_component_build = false " <nl> + WEE8_BUILD_ARGS + = " v8_enable_i18n_support = false " <nl> + WEE8_BUILD_ARGS + = " v8_enable_gdbjit = false " <nl> + WEE8_BUILD_ARGS + = " v8_use_external_startup_data = false " <nl> + # Disable read - only heap , since it ' s leaky and HEAPCHECK complains about it . <nl> + # TODO ( PiotrSikora ) : remove when fixed upstream . <nl> + WEE8_BUILD_ARGS + = " v8_enable_shared_ro_heap = false " <nl> + <nl> + # Build wee8 . <nl> + third_party / depot_tools / gn gen out / wee8 - - args = " $ $ WEE8_BUILD_ARGS " <nl> + third_party / depot_tools / ninja - C out / wee8 wee8 <nl> + <nl> + # Move compiled library to the expected destinations . <nl> + popd <nl> + mv $ $ ROOT / wee8 / out / wee8 / obj / libwee8 . a $ ( execpath libwee8 . a ) <nl> new file mode 100644 <nl> index 00000000000 . . 97f0e14b3b9 <nl> mmm / dev / null <nl> ppp b / bazel / external / wee8 . patch <nl> <nl> + # 1 . Fix linking with unbundled toolchain on macOS . <nl> + # 2 . Increase VSZ limit to 4TiB ( allows us to start up to 370 VMs ) . <nl> + mmm wee8 / build / toolchain / gcc_toolchain . gni <nl> ppp + wee8 / build / toolchain / gcc_toolchain . gni <nl> + template ( " gcc_toolchain " ) { <nl> + # AIX does not support either - D ( deterministic output ) or response <nl> + # files . <nl> + command = " $ ar - X64 { { arflags } } - r - c - s { { output } } { { inputs } } " <nl> + + } else if ( current_os = = " mac " ) { <nl> + + command = " \ " $ ar \ " { { arflags } } - r - c - s { { output } } { { inputs } } " <nl> + } else { <nl> + rspfile = " { { output } } . rsp " <nl> + rspfile_content = " { { inputs } } " <nl> + template ( " gcc_toolchain " ) { <nl> + <nl> + start_group_flag = " " <nl> + end_group_flag = " " <nl> + - if ( current_os ! = " aix " ) { <nl> + + if ( current_os ! = " aix " & & current_os ! = " mac " ) { <nl> + # the " - - start - group . . - - end - group " feature isn ' t available on the aix ld . <nl> + start_group_flag = " - Wl , - - start - group " <nl> + end_group_flag = " - Wl , - - end - group " <nl> + mmm wee8 / src / wasm / wasm - memory . cc <nl> ppp + wee8 / src / wasm / wasm - memory . cc <nl> + void * TryAllocateBackingStore ( WasmMemoryTracker * memory_tracker , Heap * heap , <nl> + / / address space limits needs to be smaller . <nl> + constexpr size_t kAddressSpaceLimit = 0x8000000000L ; / / 512 GiB <nl> + # elif V8_TARGET_ARCH_64_BIT <nl> + - constexpr size_t kAddressSpaceLimit = 0x10100000000L ; / / 1 TiB + 4 GiB <nl> + + constexpr size_t kAddressSpaceLimit = 0x40100000000L ; / / 4 TiB + 4 GiB <nl> + # else <nl> + constexpr size_t kAddressSpaceLimit = 0xC0000000 ; / / 3 GiB <nl> + # endif <nl> mmm a / bazel / repositories . bzl <nl> ppp b / bazel / repositories . bzl <nl> def envoy_dependencies ( skip_targets = [ ] ) : <nl> _io_opencensus_cpp ( ) <nl> _com_github_curl ( ) <nl> _com_github_envoyproxy_sqlparser ( ) <nl> + _com_googlesource_chromium_v8 ( ) <nl> _com_googlesource_quiche ( ) <nl> _com_lightstep_tracer_cpp ( ) <nl> _io_opentracing_cpp ( ) <nl> cc_library ( name = " curl " , visibility = [ " / / visibility : public " ] , deps = [ " @ envoy / <nl> actual = " @ envoy / / bazel / foreign_cc : curl " , <nl> ) <nl> <nl> + def _com_googlesource_chromium_v8 ( ) : <nl> + location = REPOSITORY_LOCATIONS [ " com_googlesource_chromium_v8 " ] <nl> + genrule_repository ( <nl> + name = " com_googlesource_chromium_v8 " , <nl> + genrule_cmd_file = " @ envoy / / bazel / external : wee8 . genrule_cmd " , <nl> + build_file = " @ envoy / / bazel / external : wee8 . BUILD " , <nl> + patches = [ " @ envoy / / bazel / external : wee8 . patch " ] , <nl> + * * location <nl> + ) <nl> + native . bind ( <nl> + name = " wee8 " , <nl> + actual = " @ com_googlesource_chromium_v8 / / : wee8 " , <nl> + ) <nl> + <nl> def _com_googlesource_quiche ( ) : <nl> location = REPOSITORY_LOCATIONS [ " com_googlesource_quiche " ] <nl> genrule_repository ( <nl> mmm a / bazel / repository_locations . bzl <nl> ppp b / bazel / repository_locations . bzl <nl> REPOSITORY_LOCATIONS = dict ( <nl> strip_prefix = " curl - 7 . 66 . 0 " , <nl> urls = [ " https : / / github . com / curl / curl / releases / download / curl - 7_66_0 / curl - 7 . 66 . 0 . tar . gz " ] , <nl> ) , <nl> + com_googlesource_chromium_v8 = dict ( <nl> + # This archive was created using https : / / storage . googleapis . com / envoyproxy - wee8 / wee8 - archive . sh <nl> + # and contains complete checkout of V8 with all dependencies necessary to build wee8 . <nl> + sha256 = " 7c897863d31569b4a4e16277d94415f1c42f3e130c1ff3573b048b76b15b635f " , <nl> + urls = [ " https : / / storage . googleapis . com / envoyproxy - wee8 / wee8 - 7 . 9 . 317 . 14 . tar . gz " ] , <nl> + ) , <nl> com_googlesource_quiche = dict ( <nl> # Static snapshot of https : / / quiche . googlesource . com / quiche / + archive / 4abb566fbbc63df8fe7c1ac30b21632b9eb18d0c . tar . gz <nl> sha256 = " c60bca3cf7f58b91394a89da96080657ff0fbe4d5675be9b21e90da8f68bc06f " , <nl> mmm a / ci / README . md <nl> ppp b / ci / README . md <nl> Dependencies are installed by the ` ci / mac_ci_setup . sh ` script , via [ Homebrew ] ( ht <nl> which is pre - installed on the CircleCI macOS image . The dependencies are cached are re - installed <nl> on every build . The ` ci / mac_ci_steps . sh ` script executes the specific commands that <nl> build and test Envoy . If Envoy cannot be built ( ` error : / Library / Developer / CommandLineTools / usr / bin / libtool : no output file specified ( specify with - o output ) ` ) , <nl> - ensure that XCode is installed . <nl> + ensure that Xcode is installed . <nl> <nl> # Coverity Scan Build Flow <nl> <nl> mmm a / source / extensions / common / wasm / BUILD <nl> ppp b / source / extensions / common / wasm / BUILD <nl> envoy_cc_library ( <nl> " : wasm_vm_interface " , <nl> " / / source / common / common : assert_lib " , <nl> " / / source / extensions / common / wasm / null : null_lib " , <nl> + " / / source / extensions / common / wasm / v8 : v8_lib " , <nl> ] , <nl> ) <nl> new file mode 100644 <nl> index 00000000000 . . fb5a4730264 <nl> mmm / dev / null <nl> ppp b / source / extensions / common / wasm / v8 / BUILD <nl> <nl> + licenses ( [ " notice " ] ) # Apache 2 <nl> + <nl> + load ( <nl> + " / / bazel : envoy_build_system . bzl " , <nl> + " envoy_cc_library " , <nl> + " envoy_package " , <nl> + ) <nl> + <nl> + envoy_package ( ) <nl> + <nl> + envoy_cc_library ( <nl> + name = " v8_lib " , <nl> + srcs = [ " v8 . cc " ] , <nl> + hdrs = [ " v8 . h " ] , <nl> + external_deps = [ <nl> + " wee8 " , <nl> + ] , <nl> + deps = [ <nl> + " / / source / common / common : assert_lib " , <nl> + " / / source / extensions / common / wasm : wasm_vm_interface " , <nl> + " / / source / extensions / common / wasm : well_known_names " , <nl> + ] , <nl> + ) <nl> new file mode 100644 <nl> index 00000000000 . . 44e4a46f198 <nl> mmm / dev / null <nl> ppp b / source / extensions / common / wasm / v8 / v8 . cc <nl> <nl> + # include " extensions / common / wasm / v8 / v8 . h " <nl> + <nl> + # include < memory > <nl> + # include < utility > <nl> + # include < vector > <nl> + <nl> + # include " common / common / assert . h " <nl> + <nl> + # include " extensions / common / wasm / well_known_names . h " <nl> + <nl> + # include " absl / container / flat_hash_map . h " <nl> + # include " absl / strings / match . h " <nl> + # include " wasm - api / wasm . hh " <nl> + <nl> + namespace Envoy { <nl> + namespace Extensions { <nl> + namespace Common { <nl> + namespace Wasm { <nl> + namespace V8 { <nl> + <nl> + wasm : : Engine * engine ( ) { <nl> + static const auto engine = wasm : : Engine : : make ( ) ; <nl> + return engine . get ( ) ; <nl> + } <nl> + <nl> + struct FuncData { <nl> + FuncData ( std : : string name ) : name_ ( std : : move ( name ) ) { } <nl> + <nl> + std : : string name_ ; <nl> + wasm : : own < wasm : : Func > callback_ ; <nl> + void * raw_func_ ; <nl> + } ; <nl> + <nl> + using FuncDataPtr = std : : unique_ptr < FuncData > ; <nl> + <nl> + class V8 : public WasmVm { <nl> + public : <nl> + V8 ( ) = default ; <nl> + <nl> + / / Extensions : : Common : : Wasm : : WasmVm <nl> + absl : : string_view runtime ( ) override { return WasmRuntimeNames : : get ( ) . V8 ; } <nl> + <nl> + bool load ( const std : : string & code , bool allow_precompiled ) override ; <nl> + absl : : string_view getCustomSection ( absl : : string_view name ) override ; <nl> + void link ( absl : : string_view debug_name ) override ; <nl> + <nl> + / / V8 is currently not cloneable . <nl> + bool cloneable ( ) override { return false ; } <nl> + WasmVmPtr clone ( ) override { return nullptr ; } <nl> + <nl> + uint64_t getMemorySize ( ) override ; <nl> + absl : : optional < absl : : string_view > getMemory ( uint64_t pointer , uint64_t size ) override ; <nl> + bool setMemory ( uint64_t pointer , uint64_t size , const void * data ) override ; <nl> + bool getWord ( uint64_t pointer , Word * word ) override ; <nl> + bool setWord ( uint64_t pointer , Word word ) override ; <nl> + <nl> + # define _REGISTER_HOST_FUNCTION ( T ) \ <nl> + void registerCallback ( absl : : string_view module_name , absl : : string_view function_name , T , \ <nl> + typename ConvertFunctionTypeWordToUint32 < T > : : type f ) override { \ <nl> + registerHostFunctionImpl ( module_name , function_name , f ) ; \ <nl> + } ; <nl> + FOR_ALL_WASM_VM_IMPORTS ( _REGISTER_HOST_FUNCTION ) <nl> + # undef _REGISTER_HOST_FUNCTION <nl> + <nl> + # define _GET_MODULE_FUNCTION ( T ) \ <nl> + void getFunction ( absl : : string_view function_name , T * f ) override { \ <nl> + getModuleFunctionImpl ( function_name , f ) ; \ <nl> + } ; <nl> + FOR_ALL_WASM_VM_EXPORTS ( _GET_MODULE_FUNCTION ) <nl> + # undef _GET_MODULE_FUNCTION <nl> + <nl> + private : <nl> + template < typename . . . Args > <nl> + void registerHostFunctionImpl ( absl : : string_view module_name , absl : : string_view function_name , <nl> + void ( * function ) ( void * , Args . . . ) ) ; <nl> + <nl> + template < typename R , typename . . . Args > <nl> + void registerHostFunctionImpl ( absl : : string_view module_name , absl : : string_view function_name , <nl> + R ( * function ) ( void * , Args . . . ) ) ; <nl> + <nl> + template < typename . . . Args > <nl> + void getModuleFunctionImpl ( absl : : string_view function_name , <nl> + std : : function < void ( Context * , Args . . . ) > * function ) ; <nl> + <nl> + template < typename R , typename . . . Args > <nl> + void getModuleFunctionImpl ( absl : : string_view function_name , <nl> + std : : function < R ( Context * , Args . . . ) > * function ) ; <nl> + <nl> + wasm : : vec < byte_t > source_ = wasm : : vec < byte_t > : : invalid ( ) ; <nl> + wasm : : own < wasm : : Store > store_ ; <nl> + wasm : : own < wasm : : Module > module_ ; <nl> + wasm : : own < wasm : : Instance > instance_ ; <nl> + wasm : : own < wasm : : Memory > memory_ ; <nl> + wasm : : own < wasm : : Table > table_ ; <nl> + <nl> + absl : : flat_hash_map < std : : string , FuncDataPtr > host_functions_ ; <nl> + absl : : flat_hash_map < std : : string , wasm : : own < wasm : : Func > > module_functions_ ; <nl> + } ; <nl> + <nl> + / / Helper functions . <nl> + <nl> + static std : : string printValue ( const wasm : : Val & value ) { <nl> + switch ( value . kind ( ) ) { <nl> + case wasm : : I32 : <nl> + return std : : to_string ( value . get < uint32_t > ( ) ) ; <nl> + case wasm : : I64 : <nl> + return std : : to_string ( value . get < uint64_t > ( ) ) ; <nl> + case wasm : : F32 : <nl> + return std : : to_string ( value . get < float > ( ) ) ; <nl> + case wasm : : F64 : <nl> + return std : : to_string ( value . get < double > ( ) ) ; <nl> + default : <nl> + return " unknown " ; <nl> + } <nl> + } <nl> + <nl> + static std : : string printValues ( const wasm : : Val values [ ] , size_t size ) { <nl> + if ( size = = 0 ) { <nl> + return " " ; <nl> + } <nl> + <nl> + std : : string s ; <nl> + for ( size_t i = 0 ; i < size ; i + + ) { <nl> + if ( i ) { <nl> + s . append ( " , " ) ; <nl> + } <nl> + s . append ( printValue ( values [ i ] ) ) ; <nl> + } <nl> + return s ; <nl> + } <nl> + <nl> + static const char * printValKind ( wasm : : ValKind kind ) { <nl> + switch ( kind ) { <nl> + case wasm : : I32 : <nl> + return " i32 " ; <nl> + case wasm : : I64 : <nl> + return " i64 " ; <nl> + case wasm : : F32 : <nl> + return " f32 " ; <nl> + case wasm : : F64 : <nl> + return " f64 " ; <nl> + case wasm : : ANYREF : <nl> + return " anyref " ; <nl> + case wasm : : FUNCREF : <nl> + return " funcref " ; <nl> + default : <nl> + return " unknown " ; <nl> + } <nl> + } <nl> + <nl> + static std : : string printValTypes ( const wasm : : ownvec < wasm : : ValType > & types ) { <nl> + if ( types . size ( ) = = 0 ) { <nl> + return " void " ; <nl> + } <nl> + <nl> + std : : string s ; <nl> + s . reserve ( types . size ( ) * 8 / * max size + " " * / - 1 ) ; <nl> + for ( size_t i = 0 ; i < types . size ( ) ; i + + ) { <nl> + if ( i ) { <nl> + s . append ( " " ) ; <nl> + } <nl> + s . append ( printValKind ( types [ i ] - > kind ( ) ) ) ; <nl> + } <nl> + return s ; <nl> + } <nl> + <nl> + static bool equalValTypes ( const wasm : : ownvec < wasm : : ValType > & left , <nl> + const wasm : : ownvec < wasm : : ValType > & right ) { <nl> + if ( left . size ( ) ! = right . size ( ) ) { <nl> + return false ; <nl> + } <nl> + for ( size_t i = 0 ; i < left . size ( ) ; i + + ) { <nl> + if ( left [ i ] - > kind ( ) ! = right [ i ] - > kind ( ) ) { <nl> + return false ; <nl> + } <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + static uint32_t parseVarint ( const byte_t * & pos , const byte_t * end ) { <nl> + uint32_t n = 0 ; <nl> + uint32_t shift = 0 ; <nl> + byte_t b ; <nl> + do { <nl> + if ( pos + 1 > end ) { <nl> + throw WasmVmException ( " Failed to parse corrupted WASM module " ) ; <nl> + } <nl> + b = * pos + + ; <nl> + n + = ( b & 0x7f ) < < shift ; <nl> + shift + = 7 ; <nl> + } while ( ( b & 0x80 ) ! = 0 ) ; <nl> + return n ; <nl> + } <nl> + <nl> + / / Template magic . <nl> + <nl> + template < typename T > struct ConvertWordType { <nl> + using type = T ; / / NOLINT ( readability - identifier - naming ) <nl> + } ; <nl> + template < > struct ConvertWordType < Word > { <nl> + using type = uint32_t ; / / NOLINT ( readability - identifier - naming ) <nl> + } ; <nl> + <nl> + template < typename T > wasm : : Val makeVal ( T t ) { return wasm : : Val : : make ( t ) ; } <nl> + template < > wasm : : Val makeVal ( Word t ) { return wasm : : Val : : make ( static_cast < uint32_t > ( t . u64_ ) ) ; } <nl> + <nl> + template < typename T > constexpr auto convertArgToValKind ( ) ; <nl> + template < > constexpr auto convertArgToValKind < Word > ( ) { return wasm : : I32 ; } ; <nl> + template < > constexpr auto convertArgToValKind < int32_t > ( ) { return wasm : : I32 ; } ; <nl> + template < > constexpr auto convertArgToValKind < uint32_t > ( ) { return wasm : : I32 ; } ; <nl> + template < > constexpr auto convertArgToValKind < int64_t > ( ) { return wasm : : I64 ; } ; <nl> + template < > constexpr auto convertArgToValKind < uint64_t > ( ) { return wasm : : I64 ; } ; <nl> + template < > constexpr auto convertArgToValKind < float > ( ) { return wasm : : F32 ; } ; <nl> + template < > constexpr auto convertArgToValKind < double > ( ) { return wasm : : F64 ; } ; <nl> + <nl> + template < typename T , std : : size_t . . . I > <nl> + constexpr auto convertArgsTupleToValTypesImpl ( absl : : index_sequence < I . . . > ) { <nl> + return wasm : : ownvec < wasm : : ValType > : : make ( <nl> + wasm : : ValType : : make ( convertArgToValKind < typename std : : tuple_element < I , T > : : type > ( ) ) . . . ) ; <nl> + } <nl> + <nl> + template < typename T > constexpr auto convertArgsTupleToValTypes ( ) { <nl> + return convertArgsTupleToValTypesImpl < T > ( absl : : make_index_sequence < std : : tuple_size < T > : : value > ( ) ) ; <nl> + } <nl> + <nl> + template < typename T , typename U , std : : size_t . . . I > <nl> + constexpr T convertValTypesToArgsTupleImpl ( const U & arr , absl : : index_sequence < I . . . > ) { <nl> + return std : : make_tuple ( <nl> + ( arr [ I ] <nl> + . template get < <nl> + typename ConvertWordType < typename std : : tuple_element < I , T > : : type > : : type > ( ) ) . . . ) ; <nl> + } <nl> + <nl> + template < typename T , typename U > constexpr T convertValTypesToArgsTuple ( const U & arr ) { <nl> + return convertValTypesToArgsTupleImpl < T > ( arr , <nl> + absl : : make_index_sequence < std : : tuple_size < T > : : value > ( ) ) ; <nl> + } <nl> + <nl> + / / V8 implementation . <nl> + <nl> + bool V8 : : load ( const std : : string & code , bool / * allow_precompiled * / ) { <nl> + ENVOY_LOG ( trace , " load ( ) " ) ; <nl> + store_ = wasm : : Store : : make ( engine ( ) ) ; <nl> + RELEASE_ASSERT ( store_ ! = nullptr , " " ) ; <nl> + <nl> + source_ = wasm : : vec < byte_t > : : make_uninitialized ( code . size ( ) ) ; <nl> + : : memcpy ( source_ . get ( ) , code . data ( ) , code . size ( ) ) ; <nl> + <nl> + module_ = wasm : : Module : : make ( store_ . get ( ) , source_ ) ; <nl> + return module_ ! = nullptr ; <nl> + } <nl> + <nl> + absl : : string_view V8 : : getCustomSection ( absl : : string_view name ) { <nl> + ENVOY_LOG ( trace , " getCustomSection ( \ " { } \ " ) " , name ) ; <nl> + ASSERT ( source_ . get ( ) ! = nullptr ) ; <nl> + <nl> + const byte_t * end = source_ . get ( ) + source_ . size ( ) ; <nl> + const byte_t * pos = source_ . get ( ) + 8 ; / / skip header <nl> + while ( pos < end ) { <nl> + if ( pos + 1 > end ) { <nl> + throw WasmVmException ( " Failed to parse corrupted WASM module " ) ; <nl> + } <nl> + auto type = * pos + + ; <nl> + auto rest = parseVarint ( pos , end ) ; <nl> + if ( pos + rest > end ) { <nl> + throw WasmVmException ( " Failed to parse corrupted WASM module " ) ; <nl> + } <nl> + if ( type = = 0 / * custom section * / ) { <nl> + auto start = pos ; <nl> + auto len = parseVarint ( pos , end ) ; <nl> + if ( pos + len > end ) { <nl> + throw WasmVmException ( " Failed to parse corrupted WASM module " ) ; <nl> + } <nl> + pos + = len ; <nl> + rest - = ( pos - start ) ; <nl> + if ( len = = name . size ( ) & & : : memcmp ( pos - len , name . data ( ) , len ) = = 0 ) { <nl> + ENVOY_LOG ( trace , " getCustomSection ( \ " { } \ " ) found , size : { } " , name , rest ) ; <nl> + return { pos , rest } ; <nl> + } <nl> + } <nl> + pos + = rest ; <nl> + } <nl> + return " " ; <nl> + } <nl> + <nl> + void V8 : : link ( absl : : string_view debug_name ) { <nl> + ENVOY_LOG ( trace , " link ( \ " { } \ " ) " , debug_name ) ; <nl> + ASSERT ( module_ ! = nullptr ) ; <nl> + <nl> + const auto import_types = module_ . get ( ) - > imports ( ) ; <nl> + std : : vector < const wasm : : Extern * > imports ; <nl> + <nl> + for ( size_t i = 0 ; i < import_types . size ( ) ; i + + ) { <nl> + absl : : string_view module ( import_types [ i ] - > module ( ) . get ( ) , import_types [ i ] - > module ( ) . size ( ) ) ; <nl> + absl : : string_view name ( import_types [ i ] - > name ( ) . get ( ) , import_types [ i ] - > name ( ) . size ( ) ) ; <nl> + auto import_type = import_types [ i ] - > type ( ) ; <nl> + <nl> + switch ( import_type - > kind ( ) ) { <nl> + <nl> + case wasm : : EXTERN_FUNC : { <nl> + ENVOY_LOG ( trace , " link ( ) , export host func : { } . { } ( { } - > { } ) " , module , name , <nl> + printValTypes ( import_type - > func ( ) - > params ( ) ) , <nl> + printValTypes ( import_type - > func ( ) - > results ( ) ) ) ; <nl> + <nl> + auto it = host_functions_ . find ( absl : : StrCat ( module , " . " , name ) ) ; <nl> + if ( it = = host_functions_ . end ( ) ) { <nl> + throw WasmVmException ( <nl> + fmt : : format ( " Failed to load WASM module due to a missing import : { } . { } " , module , name ) ) ; <nl> + } <nl> + auto func = it - > second . get ( ) - > callback_ . get ( ) ; <nl> + if ( ! equalValTypes ( import_type - > func ( ) - > params ( ) , func - > type ( ) - > params ( ) ) | | <nl> + ! equalValTypes ( import_type - > func ( ) - > results ( ) , func - > type ( ) - > results ( ) ) ) { <nl> + throw WasmVmException ( fmt : : format ( <nl> + " Failed to load WASM module due to an import type mismatch : { } . { } , " <nl> + " want : { } - > { } , but host exports : { } - > { } " , <nl> + module , name , printValTypes ( import_type - > func ( ) - > params ( ) ) , <nl> + printValTypes ( import_type - > func ( ) - > results ( ) ) , printValTypes ( func - > type ( ) - > params ( ) ) , <nl> + printValTypes ( func - > type ( ) - > results ( ) ) ) ) ; <nl> + } <nl> + imports . push_back ( func ) ; <nl> + } break ; <nl> + <nl> + case wasm : : EXTERN_GLOBAL : { <nl> + / / TODO ( PiotrSikora ) : add support when / if needed . <nl> + ENVOY_LOG ( trace , " link ( ) , export host global : { } . { } ( { } ) " , module , name , <nl> + printValKind ( import_type - > global ( ) - > content ( ) - > kind ( ) ) ) ; <nl> + <nl> + throw WasmVmException ( <nl> + fmt : : format ( " Failed to load WASM module due to a missing import : { } . { } " , module , name ) ) ; <nl> + } break ; <nl> + <nl> + case wasm : : EXTERN_MEMORY : { <nl> + ENVOY_LOG ( trace , " link ( ) , export host memory : { } . { } ( min : { } max : { } ) " , module , name , <nl> + import_type - > memory ( ) - > limits ( ) . min , import_type - > memory ( ) - > limits ( ) . max ) ; <nl> + <nl> + ASSERT ( memory_ = = nullptr ) ; <nl> + auto type = wasm : : MemoryType : : make ( import_type - > memory ( ) - > limits ( ) ) ; <nl> + memory_ = wasm : : Memory : : make ( store_ . get ( ) , type . get ( ) ) ; <nl> + imports . push_back ( memory_ . get ( ) ) ; <nl> + } break ; <nl> + <nl> + case wasm : : EXTERN_TABLE : { <nl> + ENVOY_LOG ( trace , " link ( ) , export host table : { } . { } ( min : { } max : { } ) " , module , name , <nl> + import_type - > table ( ) - > limits ( ) . min , import_type - > table ( ) - > limits ( ) . max ) ; <nl> + <nl> + ASSERT ( table_ = = nullptr ) ; <nl> + auto type = <nl> + wasm : : TableType : : make ( wasm : : ValType : : make ( import_type - > table ( ) - > element ( ) - > kind ( ) ) , <nl> + import_type - > table ( ) - > limits ( ) ) ; <nl> + table_ = wasm : : Table : : make ( store_ . get ( ) , type . get ( ) ) ; <nl> + imports . push_back ( table_ . get ( ) ) ; <nl> + } break ; <nl> + } <nl> + } <nl> + <nl> + ASSERT ( import_types . size ( ) = = imports . size ( ) ) ; <nl> + <nl> + instance_ = wasm : : Instance : : make ( store_ . get ( ) , module_ . get ( ) , imports . data ( ) ) ; <nl> + RELEASE_ASSERT ( instance_ ! = nullptr , " " ) ; <nl> + <nl> + const auto export_types = module_ . get ( ) - > exports ( ) ; <nl> + const auto exports = instance_ . get ( ) - > exports ( ) ; <nl> + ASSERT ( export_types . size ( ) = = exports . size ( ) ) ; <nl> + <nl> + for ( size_t i = 0 ; i < export_types . size ( ) ; i + + ) { <nl> + absl : : string_view name ( export_types [ i ] - > name ( ) . get ( ) , export_types [ i ] - > name ( ) . size ( ) ) ; <nl> + auto export_type = export_types [ i ] - > type ( ) ; <nl> + auto export_item = exports [ i ] . get ( ) ; <nl> + ASSERT ( export_type - > kind ( ) = = export_item - > kind ( ) ) ; <nl> + <nl> + switch ( export_type - > kind ( ) ) { <nl> + <nl> + case wasm : : EXTERN_FUNC : { <nl> + ENVOY_LOG ( trace , " link ( ) , import module func : { } ( { } - > { } ) " , name , <nl> + printValTypes ( export_type - > func ( ) - > params ( ) ) , <nl> + printValTypes ( export_type - > func ( ) - > results ( ) ) ) ; <nl> + <nl> + ASSERT ( export_item - > func ( ) ! = nullptr ) ; <nl> + module_functions_ . insert_or_assign ( name , export_item - > func ( ) - > copy ( ) ) ; <nl> + } break ; <nl> + <nl> + case wasm : : EXTERN_GLOBAL : { <nl> + / / TODO ( PiotrSikora ) : add support when / if needed . <nl> + ENVOY_LOG ( trace , " link ( ) , import module global : { } ( { } ) mmm IGNORED " , name , <nl> + printValKind ( export_type - > global ( ) - > content ( ) - > kind ( ) ) ) ; <nl> + } break ; <nl> + <nl> + case wasm : : EXTERN_MEMORY : { <nl> + ENVOY_LOG ( trace , " link ( ) , import module memory : { } ( min : { } max : { } ) " , name , <nl> + export_type - > memory ( ) - > limits ( ) . min , export_type - > memory ( ) - > limits ( ) . max ) ; <nl> + <nl> + ASSERT ( export_item - > memory ( ) ! = nullptr ) ; <nl> + ASSERT ( memory_ = = nullptr ) ; <nl> + memory_ = exports [ i ] - > memory ( ) - > copy ( ) ; <nl> + } break ; <nl> + <nl> + case wasm : : EXTERN_TABLE : { <nl> + / / TODO ( PiotrSikora ) : add support when / if needed . <nl> + ENVOY_LOG ( trace , " link ( ) , import module table : { } ( min : { } max : { } ) mmm IGNORED " , name , <nl> + export_type - > table ( ) - > limits ( ) . min , export_type - > table ( ) - > limits ( ) . max ) ; <nl> + } break ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + uint64_t V8 : : getMemorySize ( ) { <nl> + ENVOY_LOG ( trace , " getMemorySize ( ) " ) ; <nl> + return memory_ - > data_size ( ) ; <nl> + } <nl> + <nl> + absl : : optional < absl : : string_view > V8 : : getMemory ( uint64_t pointer , uint64_t size ) { <nl> + ENVOY_LOG ( trace , " getMemory ( { } , { } ) " , pointer , size ) ; <nl> + ASSERT ( memory_ ! = nullptr ) ; <nl> + if ( pointer + size > memory_ - > data_size ( ) ) { <nl> + return absl : : nullopt ; <nl> + } <nl> + return absl : : string_view ( memory_ - > data ( ) + pointer , size ) ; <nl> + } <nl> + <nl> + bool V8 : : setMemory ( uint64_t pointer , uint64_t size , const void * data ) { <nl> + ENVOY_LOG ( trace , " setMemory ( { } , { } ) " , pointer , size ) ; <nl> + ASSERT ( memory_ ! = nullptr ) ; <nl> + if ( pointer + size > memory_ - > data_size ( ) ) { <nl> + return false ; <nl> + } <nl> + : : memcpy ( memory_ - > data ( ) + pointer , data , size ) ; <nl> + return true ; <nl> + } <nl> + <nl> + bool V8 : : getWord ( uint64_t pointer , Word * word ) { <nl> + ENVOY_LOG ( trace , " getWord ( { } ) " , pointer ) ; <nl> + constexpr auto size = sizeof ( uint32_t ) ; <nl> + if ( pointer + size > memory_ - > data_size ( ) ) { <nl> + return false ; <nl> + } <nl> + uint32_t word32 ; <nl> + : : memcpy ( & word32 , memory_ - > data ( ) + pointer , size ) ; <nl> + word - > u64_ = word32 ; <nl> + return true ; <nl> + } <nl> + <nl> + bool V8 : : setWord ( uint64_t pointer , Word word ) { <nl> + ENVOY_LOG ( trace , " setWord ( { } , { } ) " , pointer , word . u64_ ) ; <nl> + constexpr auto size = sizeof ( uint32_t ) ; <nl> + if ( pointer + size > memory_ - > data_size ( ) ) { <nl> + return false ; <nl> + } <nl> + uint32_t word32 = word . u32 ( ) ; <nl> + : : memcpy ( memory_ - > data ( ) + pointer , & word32 , size ) ; <nl> + return true ; <nl> + } <nl> + <nl> + template < typename . . . Args > <nl> + void V8 : : registerHostFunctionImpl ( absl : : string_view module_name , absl : : string_view function_name , <nl> + void ( * function ) ( void * , Args . . . ) ) { <nl> + ENVOY_LOG ( trace , " registerHostFunction ( \ " { } . { } \ " ) " , module_name , function_name ) ; <nl> + auto data = std : : make_unique < FuncData > ( absl : : StrCat ( module_name , " . " , function_name ) ) ; <nl> + auto type = wasm : : FuncType : : make ( convertArgsTupleToValTypes < std : : tuple < Args . . . > > ( ) , <nl> + convertArgsTupleToValTypes < std : : tuple < > > ( ) ) ; <nl> + auto func = wasm : : Func : : make ( <nl> + store_ . get ( ) , type . get ( ) , <nl> + [ ] ( void * data , const wasm : : Val params [ ] , wasm : : Val [ ] ) - > wasm : : own < wasm : : Trap > { <nl> + auto func_data = reinterpret_cast < FuncData * > ( data ) ; <nl> + ENVOY_LOG ( trace , " [ vm - > host ] { } ( { } ) " , func_data - > name_ , <nl> + printValues ( params , std : : tuple_size < std : : tuple < Args . . . > > : : value ) ) ; <nl> + auto args_tuple = convertValTypesToArgsTuple < std : : tuple < Args . . . > > ( params ) ; <nl> + auto args = std : : tuple_cat ( std : : make_tuple ( current_context_ ) , args_tuple ) ; <nl> + auto function = reinterpret_cast < void ( * ) ( void * , Args . . . ) > ( func_data - > raw_func_ ) ; <nl> + absl : : apply ( function , args ) ; <nl> + ENVOY_LOG ( trace , " [ vm < - host ] { } return : void " , func_data - > name_ ) ; <nl> + return nullptr ; <nl> + } , <nl> + data . get ( ) ) ; <nl> + data - > callback_ = std : : move ( func ) ; <nl> + data - > raw_func_ = reinterpret_cast < void * > ( function ) ; <nl> + host_functions_ . insert_or_assign ( absl : : StrCat ( module_name , " . " , function_name ) , std : : move ( data ) ) ; <nl> + } <nl> + <nl> + template < typename R , typename . . . Args > <nl> + void V8 : : registerHostFunctionImpl ( absl : : string_view module_name , absl : : string_view function_name , <nl> + R ( * function ) ( void * , Args . . . ) ) { <nl> + ENVOY_LOG ( trace , " registerHostFunction ( \ " { } . { } \ " ) " , module_name , function_name ) ; <nl> + auto data = std : : make_unique < FuncData > ( absl : : StrCat ( module_name , " . " , function_name ) ) ; <nl> + auto type = wasm : : FuncType : : make ( convertArgsTupleToValTypes < std : : tuple < Args . . . > > ( ) , <nl> + convertArgsTupleToValTypes < std : : tuple < R > > ( ) ) ; <nl> + auto func = wasm : : Func : : make ( <nl> + store_ . get ( ) , type . get ( ) , <nl> + [ ] ( void * data , const wasm : : Val params [ ] , wasm : : Val results [ ] ) - > wasm : : own < wasm : : Trap > { <nl> + auto func_data = reinterpret_cast < FuncData * > ( data ) ; <nl> + ENVOY_LOG ( trace , " [ vm - > host ] { } ( { } ) " , func_data - > name_ , <nl> + printValues ( params , sizeof . . . ( Args ) ) ) ; <nl> + auto args_tuple = convertValTypesToArgsTuple < std : : tuple < Args . . . > > ( params ) ; <nl> + auto args = std : : tuple_cat ( std : : make_tuple ( current_context_ ) , args_tuple ) ; <nl> + auto function = reinterpret_cast < R ( * ) ( void * , Args . . . ) > ( func_data - > raw_func_ ) ; <nl> + R rvalue = absl : : apply ( function , args ) ; <nl> + results [ 0 ] = makeVal ( rvalue ) ; <nl> + ENVOY_LOG ( trace , " [ vm < - host ] { } return : { } " , func_data - > name_ , rvalue ) ; <nl> + return nullptr ; <nl> + } , <nl> + data . get ( ) ) ; <nl> + data - > callback_ = std : : move ( func ) ; <nl> + data - > raw_func_ = reinterpret_cast < void * > ( function ) ; <nl> + host_functions_ . insert_or_assign ( absl : : StrCat ( module_name , " . " , function_name ) , std : : move ( data ) ) ; <nl> + } <nl> + <nl> + template < typename . . . Args > <nl> + void V8 : : getModuleFunctionImpl ( absl : : string_view function_name , <nl> + std : : function < void ( Context * , Args . . . ) > * function ) { <nl> + ENVOY_LOG ( trace , " getModuleFunction ( \ " { } \ " ) " , function_name ) ; <nl> + auto it = module_functions_ . find ( function_name ) ; <nl> + if ( it = = module_functions_ . end ( ) ) { <nl> + * function = nullptr ; <nl> + return ; <nl> + } <nl> + const wasm : : Func * func = it - > second . get ( ) ; <nl> + if ( ! equalValTypes ( func - > type ( ) - > params ( ) , convertArgsTupleToValTypes < std : : tuple < Args . . . > > ( ) ) | | <nl> + ! equalValTypes ( func - > type ( ) - > results ( ) , convertArgsTupleToValTypes < std : : tuple < > > ( ) ) ) { <nl> + throw WasmVmException ( fmt : : format ( " Bad function signature for : { } " , function_name ) ) ; <nl> + } <nl> + * function = [ func , function_name ] ( Context * context , Args . . . args ) - > void { <nl> + wasm : : Val params [ ] = { makeVal ( args ) . . . } ; <nl> + ENVOY_LOG ( trace , " [ host - > vm ] { } ( { } ) " , function_name , printValues ( params , sizeof . . . ( Args ) ) ) ; <nl> + SaveRestoreContext saved_context ( context ) ; <nl> + auto trap = func - > call ( params , nullptr ) ; <nl> + if ( trap ) { <nl> + throw WasmException ( <nl> + fmt : : format ( " Function : { } failed : { } " , function_name , <nl> + absl : : string_view ( trap - > message ( ) . get ( ) , trap - > message ( ) . size ( ) ) ) ) ; <nl> + } <nl> + ENVOY_LOG ( trace , " [ host < - vm ] { } return : void " , function_name ) ; <nl> + } ; <nl> + } <nl> + <nl> + template < typename R , typename . . . Args > <nl> + void V8 : : getModuleFunctionImpl ( absl : : string_view function_name , <nl> + std : : function < R ( Context * , Args . . . ) > * function ) { <nl> + ENVOY_LOG ( trace , " getModuleFunction ( \ " { } \ " ) " , function_name ) ; <nl> + auto it = module_functions_ . find ( function_name ) ; <nl> + if ( it = = module_functions_ . end ( ) ) { <nl> + * function = nullptr ; <nl> + return ; <nl> + } <nl> + const wasm : : Func * func = it - > second . get ( ) ; <nl> + if ( ! equalValTypes ( func - > type ( ) - > params ( ) , convertArgsTupleToValTypes < std : : tuple < Args . . . > > ( ) ) | | <nl> + ! equalValTypes ( func - > type ( ) - > results ( ) , convertArgsTupleToValTypes < std : : tuple < R > > ( ) ) ) { <nl> + throw WasmVmException ( fmt : : format ( " Bad function signature for : { } " , function_name ) ) ; <nl> + } <nl> + * function = [ func , function_name ] ( Context * context , Args . . . args ) - > R { <nl> + wasm : : Val params [ ] = { makeVal ( args ) . . . } ; <nl> + wasm : : Val results [ 1 ] ; <nl> + ENVOY_LOG ( trace , " [ host - > vm ] { } ( { } ) " , function_name , printValues ( params , sizeof . . . ( Args ) ) ) ; <nl> + SaveRestoreContext saved_context ( context ) ; <nl> + auto trap = func - > call ( params , results ) ; <nl> + if ( trap ) { <nl> + throw WasmException ( <nl> + fmt : : format ( " Function : { } failed : { } " , function_name , <nl> + absl : : string_view ( trap - > message ( ) . get ( ) , trap - > message ( ) . size ( ) ) ) ) ; <nl> + } <nl> + R rvalue = results [ 0 ] . get < typename ConvertWordTypeToUint32 < R > : : type > ( ) ; <nl> + ENVOY_LOG ( trace , " [ host < - vm ] { } return : { } " , function_name , rvalue ) ; <nl> + return rvalue ; <nl> + } ; <nl> + } <nl> + <nl> + WasmVmPtr createVm ( ) { return std : : make_unique < V8 > ( ) ; } <nl> + <nl> + } / / namespace V8 <nl> + } / / namespace Wasm <nl> + } / / namespace Common <nl> + } / / namespace Extensions <nl> + } / / namespace Envoy <nl> new file mode 100644 <nl> index 00000000000 . . 3650f190a73 <nl> mmm / dev / null <nl> ppp b / source / extensions / common / wasm / v8 / v8 . h <nl> <nl> + # pragma once <nl> + <nl> + # include < memory > <nl> + <nl> + # include " extensions / common / wasm / wasm_vm . h " <nl> + <nl> + namespace Envoy { <nl> + namespace Extensions { <nl> + namespace Common { <nl> + namespace Wasm { <nl> + namespace V8 { <nl> + <nl> + WasmVmPtr createVm ( ) ; <nl> + <nl> + } / / namespace V8 <nl> + } / / namespace Wasm <nl> + } / / namespace Common <nl> + } / / namespace Extensions <nl> + } / / namespace Envoy <nl> mmm a / source / extensions / common / wasm / wasm_vm . cc <nl> ppp b / source / extensions / common / wasm / wasm_vm . cc <nl> <nl> # include < memory > <nl> <nl> # include " extensions / common / wasm / null / null . h " <nl> + # include " extensions / common / wasm / v8 / v8 . h " <nl> # include " extensions / common / wasm / well_known_names . h " <nl> <nl> namespace Envoy { <nl> WasmVmPtr createWasmVm ( absl : : string_view runtime ) { <nl> throw WasmVmException ( " Failed to create WASM VM with unspecified runtime . " ) ; <nl> } else if ( runtime = = WasmRuntimeNames : : get ( ) . Null ) { <nl> return Null : : createVm ( ) ; <nl> + } else if ( runtime = = WasmRuntimeNames : : get ( ) . V8 ) { <nl> + return V8 : : createVm ( ) ; <nl> } else { <nl> throw WasmVmException ( fmt : : format ( <nl> " Failed to create WASM VM using { } runtime . Envoy was compiled without support for it . " , <nl> mmm a / source / extensions / common / wasm / wasm_vm . h <nl> ppp b / source / extensions / common / wasm / wasm_vm . h <nl> struct Word { <nl> uint64_t u64_ ; <nl> } ; <nl> <nl> + inline std : : ostream & operator < < ( std : : ostream & os , const Word & w ) { return os < < w . u64_ ; } <nl> + <nl> / / Convert Word type for use by 32 - bit VMs . <nl> template < typename T > struct ConvertWordTypeToUint32 { <nl> using type = T ; / / NOLINT ( readability - identifier - naming ) <nl> template < typename R , typename . . . Args > struct ConvertFunctionTypeWordToUint32 < R <nl> typename ConvertWordTypeToUint32 < Args > : : type . . . ) ; <nl> } ; <nl> <nl> + template < typename T > inline auto convertWordToUint32 ( T t ) { return t ; } <nl> + template < > inline auto convertWordToUint32 < Word > ( Word t ) { return static_cast < uint32_t > ( t . u64_ ) ; } <nl> + <nl> + / / Convert a function of the form Word ( Word . . . ) to one of the form uint32_t ( uint32_t . . . ) . <nl> + template < typename F , F * fn > struct ConvertFunctionWordToUint32 { <nl> + static void convertFunctionWordToUint32 ( ) { } <nl> + } ; <nl> + template < typename R , typename . . . Args , auto ( * F ) ( Args . . . ) - > R > <nl> + struct ConvertFunctionWordToUint32 < R ( Args . . . ) , F > { <nl> + static typename ConvertWordTypeToUint32 < R > : : type <nl> + convertFunctionWordToUint32 ( typename ConvertWordTypeToUint32 < Args > : : type . . . args ) { <nl> + return convertWordToUint32 ( F ( std : : forward < Args > ( args ) . . . ) ) ; <nl> + } <nl> + } ; <nl> + template < typename . . . Args , auto ( * F ) ( Args . . . ) - > void > <nl> + struct ConvertFunctionWordToUint32 < void ( Args . . . ) , F > { <nl> + static void convertFunctionWordToUint32 ( typename ConvertWordTypeToUint32 < Args > : : type . . . args ) { <nl> + F ( std : : forward < Args > ( args ) . . . ) ; <nl> + } <nl> + } ; <nl> + <nl> + # define CONVERT_FUNCTION_WORD_TO_UINT32 ( _f ) \ <nl> + & ConvertFunctionWordToUint32 < decltype ( _f ) , _f > : : convertFunctionWordToUint32 <nl> + <nl> / / These are templates and its helper for constructing signatures of functions calling into and out <nl> / / of WASM VMs . <nl> / / - WasmFuncTypeHelper is a helper for WasmFuncType and shouldn ' t be used anywhere else than <nl> template < size_t N > using WasmCallWord = std : : function < WasmFuncType < N , Word , Con <nl> # define FOR_ALL_WASM_VM_EXPORTS ( _f ) \ <nl> _f ( WasmCallVoid < 0 > ) _f ( WasmCallVoid < 1 > ) _f ( WasmCallVoid < 2 > ) _f ( WasmCallVoid < 3 > ) \ <nl> _f ( WasmCallVoid < 4 > ) _f ( WasmCallVoid < 5 > ) _f ( WasmCallVoid < 8 > ) _f ( WasmCallWord < 0 > ) \ <nl> - _f ( WasmCallWord < 1 > ) _f ( WasmCallWord < 3 > ) <nl> + _f ( WasmCallWord < 1 > ) _f ( WasmCallWord < 2 > ) _f ( WasmCallWord < 3 > ) <nl> <nl> / / Calls out of the WASM VM . <nl> / / 1st arg is always a pointer to raw_context ( void * ) . <nl> template < size_t N > using WasmCallbackWord = WasmFuncType < N , Word , void * , Word > * <nl> / / Z = void , j = uint32_t , l = int64_t , m = uint64_t <nl> using WasmCallback_WWl = Word ( * ) ( void * , Word , int64_t ) ; <nl> using WasmCallback_WWm = Word ( * ) ( void * , Word , uint64_t ) ; <nl> + using WasmCallback_dd = double ( * ) ( void * , double ) ; <nl> <nl> # define FOR_ALL_WASM_VM_IMPORTS ( _f ) \ <nl> _f ( WasmCallbackVoid < 0 > ) _f ( WasmCallbackVoid < 1 > ) _f ( WasmCallbackVoid < 2 > ) _f ( WasmCallbackVoid < 3 > ) \ <nl> using WasmCallback_WWm = Word ( * ) ( void * , Word , uint64_t ) ; <nl> _f ( WasmCallbackWord < 2 > ) _f ( WasmCallbackWord < 3 > ) _f ( WasmCallbackWord < 4 > ) \ <nl> _f ( WasmCallbackWord < 5 > ) _f ( WasmCallbackWord < 6 > ) _f ( WasmCallbackWord < 7 > ) \ <nl> _f ( WasmCallbackWord < 8 > ) _f ( WasmCallbackWord < 9 > ) _f ( WasmCallback_WWl ) \ <nl> - _f ( WasmCallback_WWm ) <nl> + _f ( WasmCallback_WWm ) _f ( WasmCallback_dd ) <nl> <nl> / / Wasm VM instance . Provides the low level WASM interface . <nl> class WasmVm : public Logger : : Loggable < Logger : : Id : : wasm > { <nl> mmm a / source / extensions / common / wasm / well_known_names . h <nl> ppp b / source / extensions / common / wasm / well_known_names . h <nl> class WasmRuntimeValues { <nl> / / Null sandbox : modules must be compiled into envoy and registered name is given in the <nl> / / DataSource . inline_string . <nl> const std : : string Null = " envoy . wasm . runtime . null " ; <nl> + / / V8 - based ( https : / / v8 . dev ) WebAssembly runtime . <nl> + const std : : string V8 = " envoy . wasm . runtime . v8 " ; <nl> } ; <nl> <nl> using WasmRuntimeNames = ConstSingleton < WasmRuntimeValues > ; <nl> mmm a / test / extensions / common / wasm / BUILD <nl> ppp b / test / extensions / common / wasm / BUILD <nl> envoy_package ( ) <nl> envoy_cc_test ( <nl> name = " wasm_vm_test " , <nl> srcs = [ " wasm_vm_test . cc " ] , <nl> + data = [ <nl> + " / / test / extensions / common / wasm / test_data : modules " , <nl> + ] , <nl> deps = [ <nl> " / / source / extensions / common / wasm : wasm_vm_lib " , <nl> + " / / test / test_common : environment_lib " , <nl> " / / test / test_common : utility_lib " , <nl> ] , <nl> ) <nl> new file mode 100644 <nl> index 00000000000 . . ef4f3738628 <nl> mmm / dev / null <nl> ppp b / test / extensions / common / wasm / test_data / BUILD <nl> <nl> + licenses ( [ " notice " ] ) # Apache 2 <nl> + <nl> + load ( <nl> + " / / bazel : envoy_build_system . bzl " , <nl> + " envoy_package " , <nl> + ) <nl> + <nl> + envoy_package ( ) <nl> + <nl> + filegroup ( <nl> + name = " modules " , <nl> + srcs = glob ( [ " * . wasm " ] ) , <nl> + ) <nl> new file mode 100644 <nl> index 00000000000 . . 304161e5061 <nl> mmm / dev / null <nl> ppp b / test / extensions / common / wasm / test_data / test_rust . rs <nl> <nl> + / / Build using : <nl> + / / $ rustc - C lto - C opt - level = 3 - C panic = abort - C link - arg = - S - C link - arg = - zstack - size = 32768 - - crate - type cdylib - - target wasm32 - unknown - unknown test_rust . rs <nl> + <nl> + / / Import functions exported from the host environment . <nl> + extern " C " { <nl> + fn pong ( value : u32 ) ; <nl> + fn random ( ) - > u32 ; <nl> + } <nl> + <nl> + # [ no_mangle ] <nl> + extern " C " fn ping ( value : u32 ) { <nl> + unsafe { pong ( value ) } <nl> + } <nl> + <nl> + # [ no_mangle ] <nl> + extern " C " fn lucky ( number : u32 ) - > bool { <nl> + unsafe { number = = random ( ) } <nl> + } <nl> + <nl> + # [ no_mangle ] <nl> + extern " C " fn sum ( a : u32 , b : u32 , c : u32 ) - > u32 { <nl> + a + b + c <nl> + } <nl> + <nl> + # [ no_mangle ] <nl> + extern " C " fn div ( a : u32 , b : u32 ) - > u32 { <nl> + a / b <nl> + } <nl> + <nl> + # [ no_mangle ] <nl> + extern " C " fn abort ( ) { <nl> + panic ! ( " abort " ) <nl> + } <nl> new file mode 100755 <nl> index 00000000000 . . e7b1ce902a1 <nl> Binary files / dev / null and b / test / extensions / common / wasm / test_data / test_rust . wasm differ <nl> mmm a / test / extensions / common / wasm / wasm_vm_test . cc <nl> ppp b / test / extensions / common / wasm / wasm_vm_test . cc <nl> <nl> # include " extensions / common / wasm / null / null_vm_plugin . h " <nl> # include " extensions / common / wasm / wasm_vm . h " <nl> <nl> + # include " test / test_common / environment . h " <nl> # include " test / test_common / utility . h " <nl> <nl> # include " gmock / gmock . h " <nl> # include " gtest / gtest . h " <nl> <nl> + using testing : : HasSubstr ; <nl> + using testing : : Return ; <nl> + <nl> namespace Envoy { <nl> namespace Extensions { <nl> namespace Common { <nl> std : : unique_ptr < Null : : NullVmPlugin > PluginFactory : : create ( ) const { <nl> return result ; <nl> } <nl> <nl> - TEST ( WasmVmTest , NoRuntime ) { <nl> + TEST ( BadVmTest , NoRuntime ) { <nl> EXPECT_THROW_WITH_MESSAGE ( createWasmVm ( " " ) , WasmVmException , <nl> " Failed to create WASM VM with unspecified runtime . " ) ; <nl> } <nl> <nl> - TEST ( WasmVmTest , BadRuntime ) { <nl> + TEST ( BadVmTest , BadRuntime ) { <nl> EXPECT_THROW_WITH_MESSAGE ( createWasmVm ( " envoy . wasm . runtime . invalid " ) , WasmVmException , <nl> " Failed to create WASM VM using envoy . wasm . runtime . invalid runtime . " <nl> " Envoy was compiled without support for it . " ) ; <nl> } <nl> <nl> - TEST ( WasmVmTest , NullVmStartup ) { <nl> + TEST ( NullVmTest , NullVmStartup ) { <nl> auto wasm_vm = createWasmVm ( " envoy . wasm . runtime . null " ) ; <nl> EXPECT_TRUE ( wasm_vm ! = nullptr ) ; <nl> EXPECT_TRUE ( wasm_vm - > runtime ( ) = = " envoy . wasm . runtime . null " ) ; <nl> TEST ( WasmVmTest , NullVmStartup ) { <nl> EXPECT_TRUE ( wasm_vm - > getCustomSection ( " user " ) . empty ( ) ) ; <nl> } <nl> <nl> - TEST ( WasmVmTest , NullVmMemory ) { <nl> + TEST ( NullVmTest , NullVmMemory ) { <nl> auto wasm_vm = createWasmVm ( " envoy . wasm . runtime . null " ) ; <nl> EXPECT_EQ ( wasm_vm - > getMemorySize ( ) , std : : numeric_limits < uint64_t > : : max ( ) ) ; <nl> std : : string d = " data " ; <nl> TEST ( WasmVmTest , NullVmMemory ) { <nl> EXPECT_FALSE ( wasm_vm - > getWord ( 0 / * nullptr * / , & w2 ) ) ; <nl> } <nl> <nl> + class MockHostFunctions { <nl> + public : <nl> + MOCK_CONST_METHOD1 ( pong , void ( uint32_t ) ) ; <nl> + MOCK_CONST_METHOD0 ( random , uint32_t ( ) ) ; <nl> + } ; <nl> + <nl> + MockHostFunctions * g_host_functions ; <nl> + <nl> + void pong ( void * , Word value ) { g_host_functions - > pong ( convertWordToUint32 ( value ) ) ; } <nl> + <nl> + Word random ( void * ) { return Word ( g_host_functions - > random ( ) ) ; } <nl> + <nl> + / / pong ( ) with wrong number of arguments . <nl> + void bad_pong1 ( void * ) { return ; } <nl> + <nl> + / / pong ( ) with wrong return type . <nl> + Word bad_pong2 ( void * , Word ) { return 2 ; } <nl> + <nl> + / / pong ( ) with wrong argument type . <nl> + double bad_pong3 ( void * , double ) { return 3 ; } <nl> + <nl> + class WasmVmTest : public testing : : Test { <nl> + public : <nl> + void SetUp ( ) override { g_host_functions = new MockHostFunctions ( ) ; } <nl> + void TearDown ( ) override { delete g_host_functions ; } <nl> + } ; <nl> + <nl> + TEST_F ( WasmVmTest , V8BadCode ) { <nl> + auto wasm_vm = createWasmVm ( " envoy . wasm . runtime . v8 " ) ; <nl> + ASSERT_TRUE ( wasm_vm ! = nullptr ) ; <nl> + <nl> + EXPECT_FALSE ( wasm_vm - > load ( " bad code " , false ) ) ; <nl> + } <nl> + <nl> + TEST_F ( WasmVmTest , V8Code ) { <nl> + auto wasm_vm = createWasmVm ( " envoy . wasm . runtime . v8 " ) ; <nl> + ASSERT_TRUE ( wasm_vm ! = nullptr ) ; <nl> + <nl> + EXPECT_TRUE ( wasm_vm - > runtime ( ) = = " envoy . wasm . runtime . v8 " ) ; <nl> + EXPECT_FALSE ( wasm_vm - > cloneable ( ) ) ; <nl> + EXPECT_TRUE ( wasm_vm - > clone ( ) = = nullptr ) ; <nl> + <nl> + auto code = TestEnvironment : : readFileToStringForTest ( TestEnvironment : : substitute ( <nl> + " { { test_rundir } } / test / extensions / common / wasm / test_data / test_rust . wasm " ) ) ; <nl> + EXPECT_TRUE ( wasm_vm - > load ( code , false ) ) ; <nl> + <nl> + EXPECT_THAT ( wasm_vm - > getCustomSection ( " producers " ) , HasSubstr ( " rustc " ) ) ; <nl> + EXPECT_TRUE ( wasm_vm - > getCustomSection ( " emscripten_metadata " ) . empty ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( WasmVmTest , V8BadHostFunctions ) { <nl> + auto wasm_vm = createWasmVm ( " envoy . wasm . runtime . v8 " ) ; <nl> + ASSERT_TRUE ( wasm_vm ! = nullptr ) ; <nl> + <nl> + auto code = TestEnvironment : : readFileToStringForTest ( TestEnvironment : : substitute ( <nl> + " { { test_rundir } } / test / extensions / common / wasm / test_data / test_rust . wasm " ) ) ; <nl> + EXPECT_TRUE ( wasm_vm - > load ( code , false ) ) ; <nl> + <nl> + wasm_vm - > registerCallback ( " env " , " random " , & random , CONVERT_FUNCTION_WORD_TO_UINT32 ( random ) ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( wasm_vm - > link ( " test " ) , WasmVmException , <nl> + " Failed to load WASM module due to a missing import : env . pong " ) ; <nl> + <nl> + wasm_vm - > registerCallback ( " env " , " pong " , & bad_pong1 , CONVERT_FUNCTION_WORD_TO_UINT32 ( bad_pong1 ) ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( wasm_vm - > link ( " test " ) , WasmVmException , <nl> + " Failed to load WASM module due to an import type mismatch : env . pong , " <nl> + " want : i32 - > void , but host exports : void - > void " ) ; <nl> + <nl> + wasm_vm - > registerCallback ( " env " , " pong " , & bad_pong2 , CONVERT_FUNCTION_WORD_TO_UINT32 ( bad_pong2 ) ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( wasm_vm - > link ( " test " ) , WasmVmException , <nl> + " Failed to load WASM module due to an import type mismatch : env . pong , " <nl> + " want : i32 - > void , but host exports : i32 - > i32 " ) ; <nl> + <nl> + wasm_vm - > registerCallback ( " env " , " pong " , & bad_pong3 , CONVERT_FUNCTION_WORD_TO_UINT32 ( bad_pong3 ) ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( wasm_vm - > link ( " test " ) , WasmVmException , <nl> + " Failed to load WASM module due to an import type mismatch : env . pong , " <nl> + " want : i32 - > void , but host exports : f64 - > f64 " ) ; <nl> + } <nl> + <nl> + TEST_F ( WasmVmTest , V8BadModuleFunctions ) { <nl> + auto wasm_vm = createWasmVm ( " envoy . wasm . runtime . v8 " ) ; <nl> + ASSERT_TRUE ( wasm_vm ! = nullptr ) ; <nl> + <nl> + auto code = TestEnvironment : : readFileToStringForTest ( TestEnvironment : : substitute ( <nl> + " { { test_rundir } } / test / extensions / common / wasm / test_data / test_rust . wasm " ) ) ; <nl> + EXPECT_TRUE ( wasm_vm - > load ( code , false ) ) ; <nl> + <nl> + wasm_vm - > registerCallback ( " env " , " pong " , & pong , CONVERT_FUNCTION_WORD_TO_UINT32 ( pong ) ) ; <nl> + wasm_vm - > registerCallback ( " env " , " random " , & random , CONVERT_FUNCTION_WORD_TO_UINT32 ( random ) ) ; <nl> + wasm_vm - > link ( " test " ) ; <nl> + <nl> + WasmCallVoid < 1 > ping ; <nl> + WasmCallWord < 3 > sum ; <nl> + <nl> + wasm_vm - > getFunction ( " nonexistent " , & ping ) ; <nl> + EXPECT_TRUE ( ping = = nullptr ) ; <nl> + <nl> + wasm_vm - > getFunction ( " nonexistent " , & sum ) ; <nl> + EXPECT_TRUE ( sum = = nullptr ) ; <nl> + <nl> + EXPECT_THROW_WITH_MESSAGE ( wasm_vm - > getFunction ( " ping " , & sum ) , WasmVmException , <nl> + " Bad function signature for : ping " ) ; <nl> + <nl> + EXPECT_THROW_WITH_MESSAGE ( wasm_vm - > getFunction ( " sum " , & ping ) , WasmVmException , <nl> + " Bad function signature for : sum " ) ; <nl> + } <nl> + <nl> + TEST_F ( WasmVmTest , V8FunctionCalls ) { <nl> + auto wasm_vm = createWasmVm ( " envoy . wasm . runtime . v8 " ) ; <nl> + ASSERT_TRUE ( wasm_vm ! = nullptr ) ; <nl> + <nl> + auto code = TestEnvironment : : readFileToStringForTest ( TestEnvironment : : substitute ( <nl> + " { { test_rundir } } / test / extensions / common / wasm / test_data / test_rust . wasm " ) ) ; <nl> + EXPECT_TRUE ( wasm_vm - > load ( code , false ) ) ; <nl> + <nl> + wasm_vm - > registerCallback ( " env " , " pong " , & pong , CONVERT_FUNCTION_WORD_TO_UINT32 ( pong ) ) ; <nl> + wasm_vm - > registerCallback ( " env " , " random " , & random , CONVERT_FUNCTION_WORD_TO_UINT32 ( random ) ) ; <nl> + wasm_vm - > link ( " test " ) ; <nl> + <nl> + WasmCallVoid < 1 > ping ; <nl> + wasm_vm - > getFunction ( " ping " , & ping ) ; <nl> + EXPECT_CALL ( * g_host_functions , pong ( 42 ) ) ; <nl> + ping ( nullptr / * no context * / , 42 ) ; <nl> + <nl> + WasmCallWord < 1 > lucky ; <nl> + wasm_vm - > getFunction ( " lucky " , & lucky ) ; <nl> + EXPECT_CALL ( * g_host_functions , random ( ) ) . WillRepeatedly ( Return ( 42 ) ) ; <nl> + EXPECT_EQ ( 0 , lucky ( nullptr / * no context * / , 1 ) . u64_ ) ; <nl> + EXPECT_EQ ( 1 , lucky ( nullptr / * no context * / , 42 ) . u64_ ) ; <nl> + <nl> + WasmCallWord < 3 > sum ; <nl> + wasm_vm - > getFunction ( " sum " , & sum ) ; <nl> + EXPECT_EQ ( 42 , sum ( nullptr / * no context * / , 13 , 14 , 15 ) . u64_ ) ; <nl> + <nl> + WasmCallWord < 2 > div ; <nl> + wasm_vm - > getFunction ( " div " , & div ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( div ( nullptr / * no context * / , 42 , 0 ) , WasmException , <nl> + " Function : div failed : Uncaught RuntimeError : unreachable " ) ; <nl> + <nl> + WasmCallVoid < 0 > abort ; <nl> + wasm_vm - > getFunction ( " abort " , & abort ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( abort ( nullptr / * no context * / ) , WasmException , <nl> + " Function : abort failed : Uncaught RuntimeError : unreachable " ) ; <nl> + } <nl> + <nl> + TEST_F ( WasmVmTest , V8Memory ) { <nl> + auto wasm_vm = createWasmVm ( " envoy . wasm . runtime . v8 " ) ; <nl> + ASSERT_TRUE ( wasm_vm ! = nullptr ) ; <nl> + <nl> + auto code = TestEnvironment : : readFileToStringForTest ( TestEnvironment : : substitute ( <nl> + " { { test_rundir } } / test / extensions / common / wasm / test_data / test_rust . wasm " ) ) ; <nl> + EXPECT_TRUE ( wasm_vm - > load ( code , false ) ) ; <nl> + <nl> + wasm_vm - > registerCallback ( " env " , " pong " , & pong , CONVERT_FUNCTION_WORD_TO_UINT32 ( pong ) ) ; <nl> + wasm_vm - > registerCallback ( " env " , " random " , & random , CONVERT_FUNCTION_WORD_TO_UINT32 ( random ) ) ; <nl> + wasm_vm - > link ( " test " ) ; <nl> + <nl> + EXPECT_EQ ( wasm_vm - > getMemorySize ( ) , 65536 / * stack size requested at the build - time * / ) ; <nl> + <nl> + const uint64_t test_addr = 128 ; <nl> + <nl> + std : : string set = " test " ; <nl> + EXPECT_TRUE ( wasm_vm - > setMemory ( test_addr , set . size ( ) , set . data ( ) ) ) ; <nl> + auto got = wasm_vm - > getMemory ( test_addr , set . size ( ) ) . value ( ) ; <nl> + EXPECT_EQ ( sizeof ( " test " ) - 1 , got . size ( ) ) ; <nl> + EXPECT_STREQ ( " test " , got . data ( ) ) ; <nl> + <nl> + EXPECT_FALSE ( wasm_vm - > setMemory ( 1024 * 1024 / * out of bound * / , 1 / * size * / , nullptr ) ) ; <nl> + EXPECT_FALSE ( wasm_vm - > getMemory ( 1024 * 1024 / * out of bound * / , 1 / * size * / ) . has_value ( ) ) ; <nl> + <nl> + Word word ( 0 ) ; <nl> + EXPECT_TRUE ( wasm_vm - > setWord ( test_addr , std : : numeric_limits < uint32_t > : : max ( ) ) ) ; <nl> + EXPECT_TRUE ( wasm_vm - > getWord ( test_addr , & word ) ) ; <nl> + EXPECT_EQ ( std : : numeric_limits < uint32_t > : : max ( ) , word . u64_ ) ; <nl> + <nl> + EXPECT_FALSE ( wasm_vm - > setWord ( 1024 * 1024 / * out of bound * / , 1 ) ) ; <nl> + EXPECT_FALSE ( wasm_vm - > getWord ( 1024 * 1024 / * out of bound * / , & word ) ) ; <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace Wasm <nl> } / / namespace Common <nl>
wasm : add V8 - based WebAssembly runtime . ( )
envoyproxy/envoy
6bbe6f3aa37514d126e16e8fbcbf469634f2305f
2019-11-05T01:08:56Z
mmm a / src / builtins / builtins - collections - gen . cc <nl> ppp b / src / builtins / builtins - collections - gen . cc <nl> TF_BUILTIN ( OrderedHashTableHealIndex , CollectionsBuiltinsAssembler ) { <nl> GotoIfNot ( SmiLessThan ( SmiConstant ( 0 ) , index ) , & return_zero ) ; <nl> <nl> / / Check if the { table } was cleared . <nl> + STATIC_ASSERT ( OrderedHashMap : : kNumberOfDeletedElementsOffset = = <nl> + OrderedHashSet : : kNumberOfDeletedElementsOffset ) ; <nl> Node * number_of_deleted_elements = LoadAndUntagObjectField ( <nl> - table , OrderedHashTableBase : : kNumberOfDeletedElementsOffset ) ; <nl> + table , OrderedHashMap : : kNumberOfDeletedElementsOffset ) ; <nl> + STATIC_ASSERT ( OrderedHashMap : : kClearedTableSentinel = = <nl> + OrderedHashSet : : kClearedTableSentinel ) ; <nl> GotoIf ( WordEqual ( number_of_deleted_elements , <nl> - IntPtrConstant ( OrderedHashTableBase : : kClearedTableSentinel ) ) , <nl> + IntPtrConstant ( OrderedHashMap : : kClearedTableSentinel ) ) , <nl> & return_zero ) ; <nl> <nl> VARIABLE ( var_i , MachineType : : PointerRepresentation ( ) , IntPtrConstant ( 0 ) ) ; <nl> TF_BUILTIN ( OrderedHashTableHealIndex , CollectionsBuiltinsAssembler ) { <nl> { <nl> Node * i = var_i . value ( ) ; <nl> GotoIfNot ( IntPtrLessThan ( i , number_of_deleted_elements ) , & return_index ) ; <nl> + STATIC_ASSERT ( OrderedHashMap : : kRemovedHolesIndex = = <nl> + OrderedHashSet : : kRemovedHolesIndex ) ; <nl> TNode < Smi > removed_index = CAST ( LoadFixedArrayElement ( <nl> - CAST ( table ) , i , <nl> - OrderedHashTableBase : : kRemovedHolesIndex * kPointerSize ) ) ; <nl> + CAST ( table ) , i , OrderedHashMap : : kRemovedHolesIndex * kPointerSize ) ) ; <nl> GotoIf ( SmiGreaterThanOrEqual ( removed_index , index ) , & return_index ) ; <nl> Decrement ( & var_index , 1 , SMI_PARAMETERS ) ; <nl> Increment ( & var_i ) ; <nl> mmm a / src / compiler / access - builder . cc <nl> ppp b / src / compiler / access - builder . cc <nl> FieldAccess AccessBuilder : : ForHashTableBaseCapacity ( ) { <nl> } <nl> <nl> / / static <nl> - FieldAccess AccessBuilder : : ForOrderedHashTableBaseNextTable ( ) { <nl> + FieldAccess AccessBuilder : : ForOrderedHashMapOrSetNextTable ( ) { <nl> / / TODO ( turbofan ) : This will be redundant with the HashTableBase <nl> / / methods above once the hash table unification is done . <nl> + STATIC_ASSERT ( OrderedHashMap : : kNextTableOffset = = <nl> + OrderedHashSet : : kNextTableOffset ) ; <nl> FieldAccess const access = { <nl> - kTaggedBase , OrderedHashTableBase : : kNextTableOffset , <nl> + kTaggedBase , OrderedHashMap : : kNextTableOffset , <nl> MaybeHandle < Name > ( ) , MaybeHandle < Map > ( ) , <nl> Type : : Any ( ) , MachineType : : AnyTagged ( ) , <nl> kFullWriteBarrier } ; <nl> FieldAccess AccessBuilder : : ForOrderedHashTableBaseNextTable ( ) { <nl> } <nl> <nl> / / static <nl> - FieldAccess AccessBuilder : : ForOrderedHashTableBaseNumberOfBuckets ( ) { <nl> + FieldAccess AccessBuilder : : ForOrderedHashMapOrSetNumberOfBuckets ( ) { <nl> / / TODO ( turbofan ) : This will be redundant with the HashTableBase <nl> / / methods above once the hash table unification is done . <nl> + STATIC_ASSERT ( OrderedHashMap : : kNumberOfBucketsOffset = = <nl> + OrderedHashSet : : kNumberOfBucketsOffset ) ; <nl> FieldAccess const access = { kTaggedBase , <nl> - OrderedHashTableBase : : kNumberOfBucketsOffset , <nl> + OrderedHashMap : : kNumberOfBucketsOffset , <nl> MaybeHandle < Name > ( ) , <nl> MaybeHandle < Map > ( ) , <nl> TypeCache : : Get ( ) . kFixedArrayLengthType , <nl> FieldAccess AccessBuilder : : ForOrderedHashTableBaseNumberOfBuckets ( ) { <nl> } <nl> <nl> / / static <nl> - FieldAccess AccessBuilder : : ForOrderedHashTableBaseNumberOfDeletedElements ( ) { <nl> + FieldAccess AccessBuilder : : ForOrderedHashMapOrSetNumberOfDeletedElements ( ) { <nl> / / TODO ( turbofan ) : This will be redundant with the HashTableBase <nl> / / methods above once the hash table unification is done . <nl> - FieldAccess const access = { <nl> - kTaggedBase , <nl> - OrderedHashTableBase : : kNumberOfDeletedElementsOffset , <nl> - MaybeHandle < Name > ( ) , <nl> - MaybeHandle < Map > ( ) , <nl> - TypeCache : : Get ( ) . kFixedArrayLengthType , <nl> - MachineType : : TaggedSigned ( ) , <nl> - kNoWriteBarrier } ; <nl> + STATIC_ASSERT ( OrderedHashMap : : kNumberOfDeletedElementsOffset = = <nl> + OrderedHashSet : : kNumberOfDeletedElementsOffset ) ; <nl> + FieldAccess const access = { kTaggedBase , <nl> + OrderedHashMap : : kNumberOfDeletedElementsOffset , <nl> + MaybeHandle < Name > ( ) , <nl> + MaybeHandle < Map > ( ) , <nl> + TypeCache : : Get ( ) . kFixedArrayLengthType , <nl> + MachineType : : TaggedSigned ( ) , <nl> + kNoWriteBarrier } ; <nl> return access ; <nl> } <nl> <nl> / / static <nl> - FieldAccess AccessBuilder : : ForOrderedHashTableBaseNumberOfElements ( ) { <nl> + FieldAccess AccessBuilder : : ForOrderedHashMapOrSetNumberOfElements ( ) { <nl> / / TODO ( turbofan ) : This will be redundant with the HashTableBase <nl> / / methods above once the hash table unification is done . <nl> + STATIC_ASSERT ( OrderedHashMap : : kNumberOfElementsOffset = = <nl> + OrderedHashSet : : kNumberOfElementsOffset ) ; <nl> FieldAccess const access = { kTaggedBase , <nl> - OrderedHashTableBase : : kNumberOfElementsOffset , <nl> + OrderedHashMap : : kNumberOfElementsOffset , <nl> MaybeHandle < Name > ( ) , <nl> MaybeHandle < Map > ( ) , <nl> TypeCache : : Get ( ) . kFixedArrayLengthType , <nl> mmm a / src / compiler / access - builder . h <nl> ppp b / src / compiler / access - builder . h <nl> class V8_EXPORT_PRIVATE AccessBuilder final <nl> static FieldAccess ForHashTableBaseNumberOfDeletedElement ( ) ; <nl> static FieldAccess ForHashTableBaseCapacity ( ) ; <nl> <nl> - / / Provides access to OrderedHashTableBase fields . <nl> - static FieldAccess ForOrderedHashTableBaseNextTable ( ) ; <nl> - static FieldAccess ForOrderedHashTableBaseNumberOfBuckets ( ) ; <nl> - static FieldAccess ForOrderedHashTableBaseNumberOfElements ( ) ; <nl> - static FieldAccess ForOrderedHashTableBaseNumberOfDeletedElements ( ) ; <nl> + / / Provides access to OrderedHashMapOrSet fields . <nl> + static FieldAccess ForOrderedHashMapOrSetNextTable ( ) ; <nl> + static FieldAccess ForOrderedHashMapOrSetNumberOfBuckets ( ) ; <nl> + static FieldAccess ForOrderedHashMapOrSetNumberOfElements ( ) ; <nl> + static FieldAccess ForOrderedHashMapOrSetNumberOfDeletedElements ( ) ; <nl> <nl> / / Provides access to OrderedHashMap elements . <nl> static ElementAccess ForOrderedHashMapEntryValue ( ) ; <nl> mmm a / src / compiler / effect - control - linearizer . cc <nl> ppp b / src / compiler / effect - control - linearizer . cc <nl> Node * EffectControlLinearizer : : LowerFindOrderedHashMapEntryForInt32Key ( <nl> Node * hash = ChangeUint32ToUintPtr ( ComputeUnseededHash ( key ) ) ; <nl> <nl> Node * number_of_buckets = ChangeSmiToIntPtr ( __ LoadField ( <nl> - AccessBuilder : : ForOrderedHashTableBaseNumberOfBuckets ( ) , table ) ) ; <nl> + AccessBuilder : : ForOrderedHashMapOrSetNumberOfBuckets ( ) , table ) ) ; <nl> hash = __ WordAnd ( hash , __ IntSub ( number_of_buckets , __ IntPtrConstant ( 1 ) ) ) ; <nl> Node * first_entry = ChangeSmiToIntPtr ( __ Load ( <nl> MachineType : : TaggedSigned ( ) , table , <nl> mmm a / src / compiler / js - call - reducer . cc <nl> ppp b / src / compiler / js - call - reducer . cc <nl> Reduction JSCallReducer : : ReduceCollectionPrototypeSize ( <nl> receiver , effect , control ) ; <nl> Node * value = effect = graph ( ) - > NewNode ( <nl> simplified ( ) - > LoadField ( <nl> - AccessBuilder : : ForOrderedHashTableBaseNumberOfElements ( ) ) , <nl> + AccessBuilder : : ForOrderedHashMapOrSetNumberOfElements ( ) ) , <nl> table , effect , control ) ; <nl> ReplaceWithValue ( node , value , effect , control ) ; <nl> return Replace ( value ) ; <nl> Reduction JSCallReducer : : ReduceCollectionIteratorPrototypeNext ( <nl> receiver , effect , control ) ; <nl> Node * next_table = effect = <nl> graph ( ) - > NewNode ( simplified ( ) - > LoadField ( <nl> - AccessBuilder : : ForOrderedHashTableBaseNextTable ( ) ) , <nl> + AccessBuilder : : ForOrderedHashMapOrSetNextTable ( ) ) , <nl> table , effect , control ) ; <nl> Node * check = graph ( ) - > NewNode ( simplified ( ) - > ObjectIsSmi ( ) , next_table ) ; <nl> control = <nl> Reduction JSCallReducer : : ReduceCollectionIteratorPrototypeNext ( <nl> / / Compute the currently used capacity . <nl> Node * number_of_buckets = effect = graph ( ) - > NewNode ( <nl> simplified ( ) - > LoadField ( <nl> - AccessBuilder : : ForOrderedHashTableBaseNumberOfBuckets ( ) ) , <nl> + AccessBuilder : : ForOrderedHashMapOrSetNumberOfBuckets ( ) ) , <nl> table , effect , control ) ; <nl> Node * number_of_elements = effect = graph ( ) - > NewNode ( <nl> simplified ( ) - > LoadField ( <nl> - AccessBuilder : : ForOrderedHashTableBaseNumberOfElements ( ) ) , <nl> + AccessBuilder : : ForOrderedHashMapOrSetNumberOfElements ( ) ) , <nl> table , effect , control ) ; <nl> Node * number_of_deleted_elements = effect = graph ( ) - > NewNode ( <nl> simplified ( ) - > LoadField ( <nl> - AccessBuilder : : ForOrderedHashTableBaseNumberOfDeletedElements ( ) ) , <nl> + AccessBuilder : : ForOrderedHashMapOrSetNumberOfDeletedElements ( ) ) , <nl> table , effect , control ) ; <nl> Node * used_capacity = <nl> graph ( ) - > NewNode ( simplified ( ) - > NumberAdd ( ) , number_of_elements , <nl> Reduction JSCallReducer : : ReduceCollectionIteratorPrototypeNext ( <nl> Node * etrue0 = effect ; <nl> { <nl> / / Load the key of the entry . <nl> + STATIC_ASSERT ( OrderedHashMap : : kHashTableStartIndex = = <nl> + OrderedHashSet : : kHashTableStartIndex ) ; <nl> Node * entry_start_position = graph ( ) - > NewNode ( <nl> simplified ( ) - > NumberAdd ( ) , <nl> graph ( ) - > NewNode ( <nl> Reduction JSCallReducer : : ReduceCollectionIteratorPrototypeNext ( <nl> graph ( ) - > NewNode ( simplified ( ) - > NumberMultiply ( ) , index , <nl> jsgraph ( ) - > Constant ( entry_size ) ) , <nl> number_of_buckets ) , <nl> - jsgraph ( ) - > Constant ( OrderedHashTableBase : : kHashTableStartIndex ) ) ; <nl> + jsgraph ( ) - > Constant ( OrderedHashMap : : kHashTableStartIndex ) ) ; <nl> Node * entry_key = etrue0 = graph ( ) - > NewNode ( <nl> simplified ( ) - > LoadElement ( AccessBuilder : : ForFixedArrayElement ( ) ) , <nl> table , entry_start_position , etrue0 , if_true0 ) ; <nl> mmm a / src / keys . cc <nl> ppp b / src / keys . cc <nl> void KeyAccumulator : : AddKey ( Handle < Object > key , AddKeyConversion convert ) { <nl> / / The keys_ Set is converted directly to a FixedArray in GetKeys which can <nl> / / be left - trimmer . Hence the previous Set should not keep a pointer to the <nl> / / new one . <nl> - keys_ - > set ( OrderedHashTableBase : : kNextTableIndex , Smi : : kZero ) ; <nl> + keys_ - > set ( OrderedHashSet : : kNextTableIndex , Smi : : kZero ) ; <nl> keys_ = new_set ; <nl> } <nl> } <nl> mmm a / src / objects / ordered - hash - table . h <nl> ppp b / src / objects / ordered - hash - table . h <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - / / Non - templatized base class for { OrderedHashTable } s . <nl> - / / TODO ( hash ) : Unify this with the HashTableBase above . <nl> - class OrderedHashTableBase : public FixedArray { <nl> - public : <nl> - static const int kNotFound = - 1 ; <nl> - static const int kMinCapacity = 4 ; <nl> - <nl> - static const int kNumberOfElementsIndex = 0 ; <nl> - / / The next table is stored at the same index as the nof elements . <nl> - static const int kNextTableIndex = kNumberOfElementsIndex ; <nl> - static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1 ; <nl> - static const int kNumberOfBucketsIndex = kNumberOfDeletedElementsIndex + 1 ; <nl> - static const int kHashTableStartIndex = kNumberOfBucketsIndex + 1 ; <nl> - static const int kRemovedHolesIndex = kHashTableStartIndex ; <nl> - <nl> - static constexpr const int kNumberOfElementsOffset = <nl> - FixedArray : : OffsetOfElementAt ( kNumberOfElementsIndex ) ; <nl> - static constexpr const int kNextTableOffset = <nl> - FixedArray : : OffsetOfElementAt ( kNextTableIndex ) ; <nl> - static constexpr const int kNumberOfDeletedElementsOffset = <nl> - FixedArray : : OffsetOfElementAt ( kNumberOfDeletedElementsIndex ) ; <nl> - static constexpr const int kNumberOfBucketsOffset = <nl> - FixedArray : : OffsetOfElementAt ( kNumberOfBucketsIndex ) ; <nl> - static constexpr const int kHashTableStartOffset = <nl> - FixedArray : : OffsetOfElementAt ( kHashTableStartIndex ) ; <nl> - <nl> - static const int kLoadFactor = 2 ; <nl> - <nl> - / / NumberOfDeletedElements is set to kClearedTableSentinel when <nl> - / / the table is cleared , which allows iterator transitions to <nl> - / / optimize that case . <nl> - static const int kClearedTableSentinel = - 1 ; <nl> - } ; <nl> - <nl> / / OrderedHashTable is a HashTable with Object keys that preserves <nl> / / insertion order . There are Map and Set interfaces ( OrderedHashMap <nl> / / and OrderedHashTable , below ) . It is meant to be used by JSMap / JSSet . <nl> class OrderedHashTableBase : public FixedArray { <nl> / / [ 3 + NumberOfRemovedHoles ( ) . . length ] : Not used <nl> / / <nl> template < class Derived , int entrysize > <nl> - class OrderedHashTable : public OrderedHashTableBase { <nl> + class OrderedHashTable : public FixedArray { <nl> public : <nl> / / Returns an OrderedHashTable with a capacity of at least | capacity | . <nl> static Handle < Derived > Allocate ( Isolate * isolate , int capacity , <nl> class OrderedHashTable : public OrderedHashTableBase { <nl> static const int kEntrySize = entrysize + 1 ; <nl> static const int kChainOffset = entrysize ; <nl> <nl> + static const int kNotFound = - 1 ; <nl> + static const int kMinCapacity = 4 ; <nl> + <nl> + static const int kNumberOfElementsIndex = 0 ; <nl> + / / The next table is stored at the same index as the nof elements . <nl> + static const int kNextTableIndex = kNumberOfElementsIndex ; <nl> + static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1 ; <nl> + static const int kNumberOfBucketsIndex = kNumberOfDeletedElementsIndex + 1 ; <nl> + static const int kHashTableStartIndex = kNumberOfBucketsIndex + 1 ; <nl> + static const int kRemovedHolesIndex = kHashTableStartIndex ; <nl> + <nl> + static constexpr const int kNumberOfElementsOffset = <nl> + FixedArray : : OffsetOfElementAt ( kNumberOfElementsIndex ) ; <nl> + static constexpr const int kNextTableOffset = <nl> + FixedArray : : OffsetOfElementAt ( kNextTableIndex ) ; <nl> + static constexpr const int kNumberOfDeletedElementsOffset = <nl> + FixedArray : : OffsetOfElementAt ( kNumberOfDeletedElementsIndex ) ; <nl> + static constexpr const int kNumberOfBucketsOffset = <nl> + FixedArray : : OffsetOfElementAt ( kNumberOfBucketsIndex ) ; <nl> + static constexpr const int kHashTableStartOffset = <nl> + FixedArray : : OffsetOfElementAt ( kHashTableStartIndex ) ; <nl> + <nl> + static const int kLoadFactor = 2 ; <nl> + <nl> + / / NumberOfDeletedElements is set to kClearedTableSentinel when <nl> + / / the table is cleared , which allows iterator transitions to <nl> + / / optimize that case . <nl> + static const int kClearedTableSentinel = - 1 ; <nl> static const int kMaxCapacity = <nl> ( FixedArray : : kMaxLength - kHashTableStartIndex ) / <nl> ( 1 + ( kEntrySize * kLoadFactor ) ) ; <nl>
[ dict ] Remove ordered hash table base
v8/v8
f08e42e32d560f23156baa2c1857ab84dc92d810
2018-11-14T12:35:01Z
mmm a / caffe2 / python / onnx / backend . py <nl> ppp b / caffe2 / python / onnx / backend . py <nl> def _graph_to_net ( cls , onnx_graph , opset_version ) : <nl> c2ops = cls . _onnx_node_to_caffe2_op ( <nl> None , None , node , opset_version ) <nl> except Exception as e : <nl> - success = False <nl> print ( ' ONNX FATAL : ' , e ) <nl> continue <nl> net . op . extend ( c2ops . init_ops ) <nl>
Remove an unused variable found by linter
pytorch/pytorch
76da0b34c2bc5b77b603072839cf4ff71cc70ca7
2018-08-16T07:25:44Z
mmm a / src / php / ext / grpc / php_grpc . c <nl> ppp b / src / php / ext / grpc / php_grpc . c <nl> <nl> # include " call_credentials . h " <nl> # include " server_credentials . h " <nl> # include " completion_queue . h " <nl> + # include < grpc / support / alloc . h > <nl> + # include < grpc / support / log . h > <nl> + # include < grpc / support / string_util . h > <nl> + # include < grpc / support / time . h > <nl> # include < ext / spl / spl_exceptions . h > <nl> # include < zend_exceptions . h > <nl> <nl> ZEND_GET_MODULE ( grpc ) <nl> enable_fork_support , zend_grpc_globals , grpc_globals ) <nl> STD_PHP_INI_ENTRY ( " grpc . poll_strategy " , NULL , PHP_INI_SYSTEM , OnUpdateString , <nl> poll_strategy , zend_grpc_globals , grpc_globals ) <nl> + STD_PHP_INI_ENTRY ( " grpc . grpc_verbosity " , NULL , PHP_INI_SYSTEM , OnUpdateString , <nl> + grpc_verbosity , zend_grpc_globals , grpc_globals ) <nl> + STD_PHP_INI_ENTRY ( " grpc . grpc_trace " , NULL , PHP_INI_SYSTEM , OnUpdateString , <nl> + grpc_trace , zend_grpc_globals , grpc_globals ) <nl> + STD_PHP_INI_ENTRY ( " grpc . log_filename " , NULL , PHP_INI_SYSTEM , OnUpdateString , <nl> + log_filename , zend_grpc_globals , grpc_globals ) <nl> PHP_INI_END ( ) <nl> / * } } } * / <nl> <nl> void apply_ini_settings ( TSRMLS_D ) { <nl> strcat ( poll_str , GRPC_G ( poll_strategy ) ) ; <nl> putenv ( poll_str ) ; <nl> } <nl> + <nl> + if ( GRPC_G ( grpc_verbosity ) ) { <nl> + char * verbosity_str = malloc ( sizeof ( " GRPC_VERBOSITY = " ) + <nl> + strlen ( GRPC_G ( grpc_verbosity ) ) ) ; <nl> + strcpy ( verbosity_str , " GRPC_VERBOSITY = " ) ; <nl> + strcat ( verbosity_str , GRPC_G ( grpc_verbosity ) ) ; <nl> + putenv ( verbosity_str ) ; <nl> + } <nl> + <nl> + if ( GRPC_G ( grpc_trace ) ) { <nl> + char * trace_str = malloc ( sizeof ( " GRPC_TRACE = " ) + <nl> + strlen ( GRPC_G ( grpc_trace ) ) ) ; <nl> + strcpy ( trace_str , " GRPC_TRACE = " ) ; <nl> + strcat ( trace_str , GRPC_G ( grpc_trace ) ) ; <nl> + putenv ( trace_str ) ; <nl> + } <nl> + } <nl> + <nl> + static void custom_logger ( gpr_log_func_args * args ) { <nl> + TSRMLS_FETCH ( ) ; <nl> + <nl> + const char * final_slash ; <nl> + const char * display_file ; <nl> + char * prefix ; <nl> + char * final ; <nl> + gpr_timespec now = gpr_now ( GPR_CLOCK_REALTIME ) ; <nl> + <nl> + final_slash = strrchr ( args - > file , ' / ' ) ; <nl> + if ( final_slash ) { <nl> + display_file = final_slash + 1 ; <nl> + } else { <nl> + display_file = args - > file ; <nl> + } <nl> + <nl> + FILE * fp = fopen ( GRPC_G ( log_filename ) , " ab " ) ; <nl> + if ( ! fp ) { <nl> + return ; <nl> + } <nl> + <nl> + gpr_asprintf ( & prefix , " % s % ld . % 09 " PRId32 " % s : % d ] " , <nl> + gpr_log_severity_string ( args - > severity ) , now . tv_sec , <nl> + now . tv_nsec , display_file , args - > line ) ; <nl> + <nl> + gpr_asprintf ( & final , " % - 60s % s \ n " , prefix , args - > message ) ; <nl> + <nl> + fprintf ( fp , " % s " , final ) ; <nl> + fclose ( fp ) ; <nl> + gpr_free ( prefix ) ; <nl> + gpr_free ( final ) ; <nl> } <nl> <nl> / * { { { PHP_MINIT_FUNCTION <nl> PHP_MINFO_FUNCTION ( grpc ) { <nl> PHP_RINIT_FUNCTION ( grpc ) { <nl> if ( ! GRPC_G ( initialized ) ) { <nl> apply_ini_settings ( TSRMLS_C ) ; <nl> + if ( GRPC_G ( log_filename ) ) { <nl> + gpr_set_log_function ( custom_logger ) ; <nl> + } <nl> grpc_init ( ) ; <nl> register_fork_handlers ( ) ; <nl> grpc_php_init_completion_queue ( TSRMLS_C ) ; <nl> static PHP_GINIT_FUNCTION ( grpc ) { <nl> grpc_globals - > initialized = 0 ; <nl> grpc_globals - > enable_fork_support = 0 ; <nl> grpc_globals - > poll_strategy = NULL ; <nl> + grpc_globals - > grpc_verbosity = NULL ; <nl> + grpc_globals - > grpc_trace = NULL ; <nl> + grpc_globals - > log_filename = NULL ; <nl> } <nl> / * } } } * / <nl> <nl> mmm a / src / php / ext / grpc / php_grpc . h <nl> ppp b / src / php / ext / grpc / php_grpc . h <nl> ZEND_BEGIN_MODULE_GLOBALS ( grpc ) <nl> zend_bool initialized ; <nl> zend_bool enable_fork_support ; <nl> char * poll_strategy ; <nl> + char * grpc_verbosity ; <nl> + char * grpc_trace ; <nl> + char * log_filename ; <nl> ZEND_END_MODULE_GLOBALS ( grpc ) <nl> <nl> ZEND_EXTERN_MODULE_GLOBALS ( grpc ) ; <nl>
PHP : add custom logger
grpc/grpc
778c34d175a164163f8ad87a8bdde52980537834
2019-11-09T06:59:27Z
mmm a / etc / distributed_correctness . yml <nl> ppp b / etc / distributed_correctness . yml <nl> functions : <nl> set - e <nl> set - v <nl> cd . / clusters / $ { cluster } <nl> - cat ips . sh <nl> - . / update_run_config . sh <nl> - rm - rf . / reports <nl> - rm - f . . / . . / reports . tgz <nl> - echo " Run test for $ { test } with setup $ { setup } " <nl> - # run test <nl> + echo " Run test for $ { test } - $ { storageEngine } with setup $ { setup } " <nl> . . / . . / bin / run - $ { test } . sh $ { storageEngine } $ { setup } $ { cluster } <nl> - rm - f . . / perf . json <nl> - chmod 766 perf . json <nl> - cp . / perf . json . . <nl> - cd . . <nl> - pwd <nl> - cat perf . json <nl> echo " Done test for $ { test } with setup $ { setup } ! " <nl> - command : " json . send " <nl> params : <nl> tasks : <nl> <nl> - name : distributed_correctness_suite_1_MMAPv1 <nl> depends_on : <nl> - - name : distributed_correctness_suite_2_wiredTiger <nl> + - name : distributed_correctness_suite_1_wiredTiger <nl> status : " * " <nl> commands : <nl> - func : " prepare environment " <nl> tasks : <nl> - func : " upload log file " <nl> vars : <nl> test : " distributed_correctness_suite_1_MMAPv1 " <nl> - - func : " analyze " <nl> - <nl> - - name : distributed_correctness_suite_2_wiredTiger <nl> - depends_on : <nl> - - name : distributed_correctness_suite_1_wiredTiger <nl> - status : " * " <nl> - commands : <nl> - - func : " prepare environment " <nl> - - func : " restore cluster " <nl> - <nl> - - func : " run test " <nl> - vars : <nl> - storageEngine : " wiredTiger " <nl> - test : " distributed_correctness_suite_2 " <nl> - <nl> - - func : " make test log artifact " <nl> - - func : " upload log file " <nl> - vars : <nl> - test : " distributed_correctness_suite_2_wiredTiger " <nl> - - func : " analyze " <nl> - <nl> - - name : distributed_correctness_suite_2_MMAPv1 <nl> - depends_on : <nl> - - name : distributed_correctness_suite_1_MMAPv1 <nl> - status : " * " <nl> - commands : <nl> - - func : " prepare environment " <nl> - - func : " restore cluster " <nl> - <nl> - - func : " run test " <nl> - vars : <nl> - storageEngine : " mmapv1 " <nl> - test : " distributed_correctness_suite_2 " <nl> - <nl> - - func : " make test log artifact " <nl> - - func : " upload log file " <nl> - vars : <nl> - test : " distributed_correctness_suite_2_MMAPv1 " <nl> - func : " destroy cluster " <nl> - func : " analyze " <nl> <nl> buildvariants : <nl> distros : <nl> - rhel55 <nl> - name : distributed_correctness_suite_1_wiredTiger <nl> - - name : distributed_correctness_suite_2_wiredTiger <nl> - name : distributed_correctness_suite_1_MMAPv1 <nl> - - name : distributed_correctness_suite_2_MMAPv1 <nl> <nl> - name : linux - 3 - node - replSet <nl> display_name : Linux 3 - Node ReplSet <nl> buildvariants : <nl> - " rhel70 - perf - replset " <nl> tasks : <nl> - name : distributed_correctness_suite_1_wiredTiger <nl> - - name : distributed_correctness_suite_2_wiredTiger <nl> - name : distributed_correctness_suite_1_MMAPv1 <nl> - - name : distributed_correctness_suite_2_MMAPv1 <nl> mmm a / etc / longevity . yml <nl> ppp b / etc / longevity . yml <nl> functions : <nl> set - e <nl> set - v <nl> cd . / clusters / $ { cluster } <nl> - # show cluster details <nl> - cat ips . sh <nl> - . / update_run_config . sh <nl> - cat run - $ { test } . json <nl> - # clean up old artifact from the last test <nl> - rm - f . . / perf . json <nl> - rm - rf . / reports <nl> - rm - f . . / . . / reports . tgz <nl> - echo " Run test for $ { test } with setup $ { setup } " <nl> - # run test <nl> + echo " Run test for $ { test } - $ { storageEngine } with setup $ { setup } " <nl> . . / . . / bin / run - $ { test } . sh $ { storageEngine } $ { setup } $ { cluster } <nl> - mv perf . json . . <nl> cd . . <nl> cat perf . json > > perf_all . json <nl> echo " Complete test for $ { test } with setup $ { setup } ! " <nl> tasks : <nl> use_csrs : " true " <nl> - func : " run test " <nl> vars : <nl> + storageEngine : " wiredTiger " <nl> test : " ycsb " <nl> - func : " make test log artifact " <nl> - func : " upload log file " <nl> tasks : <nl> use_csrs : " true " <nl> - func : " run test " <nl> vars : <nl> - test : " ycsb - mmap " <nl> + storageEngine : " mmapv1 " <nl> + test : " ycsb " <nl> - func : " make test log artifact " <nl> - func : " upload log file " <nl> vars : <nl> mmm a / etc / system_perf . yml <nl> ppp b / etc / system_perf . yml <nl> functions : <nl> set - e <nl> set - v <nl> cd . / clusters / $ { cluster } <nl> - cat ips . sh <nl> - cat run - $ { test } . json <nl> - . / update_run_config . sh <nl> - cat run - $ { test } . json <nl> - rm - rf . / reports <nl> - rm - f . . / . . / reports . tgz <nl> - echo " Run test for $ { test } with setup $ { setup } " <nl> - # run test <nl> + echo " Run test for $ { test } - $ { storageEngine } with setup $ { setup } " <nl> . . / . . / bin / run - $ { test } . sh $ { storageEngine } $ { setup } $ { cluster } <nl> - rm - f . . / perf . json <nl> - chmod 766 perf . json <nl> - cp . / perf . json . . <nl> - cd . . <nl> - pwd <nl> - cat perf . json <nl> - echo " Done test for $ { test } with setup $ { setup } ! " <nl> + echo " Done test for $ { test } - $ { storageEngine } with setup $ { setup } ! " <nl> - command : " json . send " <nl> params : <nl> name : " perf " <nl> tasks : <nl> - func : " run test " <nl> vars : <nl> storageEngine : " mmapv1 " <nl> - test : " ycsb - mmap " <nl> + test : " ycsb " <nl> - func : " make test log artifact " <nl> - func : " upload log file " <nl> vars : <nl> tasks : <nl> - func : " run test " <nl> vars : <nl> storageEngine : " mmapv1 " <nl> - test : " benchRun - mmap " <nl> + test : " benchRun " <nl> - func : " make test log artifact " <nl> - func : " upload log file " <nl> vars : <nl>
SERVER - 23642 system_perf . yml refactoring
mongodb/mongo
3bead6a7dee0c224242463d75acda511e8e8bb09
2016-04-18T19:32:29Z
mmm a / Protobuf . podspec <nl> ppp b / Protobuf . podspec <nl> <nl> # dependent projects use the : git notation to refer to the library . <nl> Pod : : Spec . new do | s | <nl> s . name = ' Protobuf ' <nl> - s . version = ' 3 . 0 . 0 - alpha - 4 - pre ' <nl> + s . version = ' 3 . 0 . 0 - alpha - 4 ' <nl> s . summary = ' Protocol Buffers v . 3 runtime library for Objective - C . ' <nl> s . homepage = ' https : / / github . com / google / protobuf ' <nl> s . license = ' New BSD ' <nl> mmm a / configure . ac <nl> ppp b / configure . ac <nl> AC_PREREQ ( 2 . 59 ) <nl> # In the SVN trunk , the version should always be the next anticipated release <nl> # version with the " - pre " suffix . ( We used to use " - SNAPSHOT " but this pushed <nl> # the size of one file name in the dist tarfile over the 99 - char limit . ) <nl> - AC_INIT ( [ Protocol Buffers ] , [ 3 . 0 . 0 - alpha - 4 - pre ] , [ protobuf @ googlegroups . com ] , [ protobuf ] ) <nl> + AC_INIT ( [ Protocol Buffers ] , [ 3 . 0 . 0 - beta - 1 ] , [ protobuf @ googlegroups . com ] , [ protobuf ] ) <nl> <nl> AM_MAINTAINER_MODE ( [ enable ] ) <nl> <nl> mmm a / java / pom . xml <nl> ppp b / java / pom . xml <nl> <nl> < / parent > <nl> < groupId > com . google . protobuf < / groupId > <nl> < artifactId > protobuf - java < / artifactId > <nl> - < version > 3 . 0 . 0 - alpha - 4 - pre < / version > <nl> + < version > 3 . 0 . 0 - beta - 1 < / version > <nl> < packaging > bundle < / packaging > <nl> < name > Protocol Buffer Java API < / name > <nl> < description > <nl> <nl> < instructions > <nl> < Bundle - DocURL > https : / / developers . google . com / protocol - buffers / < / Bundle - DocURL > <nl> < Bundle - SymbolicName > com . google . protobuf < / Bundle - SymbolicName > <nl> - < Export - Package > com . google . protobuf ; version = 3 . 0 . 0 - alpha - 3 < / Export - Package > <nl> + < Export - Package > com . google . protobuf ; version = 3 . 0 . 0 - beta - 1 < / Export - Package > <nl> < / instructions > <nl> < / configuration > <nl> < / plugin > <nl> mmm a / java / util / pom . xml <nl> ppp b / java / util / pom . xml <nl> <nl> < / parent > <nl> < groupId > com . google . protobuf < / groupId > <nl> < artifactId > protobuf - java - util < / artifactId > <nl> - < version > 3 . 0 . 0 - alpha - 4 - pre < / version > <nl> + < version > 3 . 0 . 0 - beta - 1 < / version > <nl> < packaging > bundle < / packaging > <nl> < name > Protocol Buffer Java API < / name > <nl> < description > <nl> <nl> < dependency > <nl> < groupId > com . google . protobuf < / groupId > <nl> < artifactId > protobuf - java < / artifactId > <nl> - < version > 3 . 0 . 0 - alpha - 4 - pre < / version > <nl> + < version > 3 . 0 . 0 - beta - 1 < / version > <nl> < scope > compile < / scope > <nl> < / dependency > <nl> < dependency > <nl> <nl> < instructions > <nl> < Bundle - DocURL > https : / / developers . google . com / protocol - buffers / < / Bundle - DocURL > <nl> < Bundle - SymbolicName > com . google . protobuf . util < / Bundle - SymbolicName > <nl> - < Export - Package > com . google . protobuf . util ; version = 3 . 0 . 0 - alpha - 3 < / Export - Package > <nl> + < Export - Package > com . google . protobuf . util ; version = 3 . 0 . 0 - beta - 1 < / Export - Package > <nl> < / instructions > <nl> < / configuration > <nl> < / plugin > <nl> mmm a / javanano / pom . xml <nl> ppp b / javanano / pom . xml <nl> <nl> < / parent > <nl> < groupId > com . google . protobuf . nano < / groupId > <nl> < artifactId > protobuf - javanano < / artifactId > <nl> - < version > 3 . 0 . 0 - alpha - 4 - pre < / version > <nl> + < version > 3 . 0 . 0 - alpha - 4 < / version > <nl> < packaging > bundle < / packaging > <nl> < name > Protocol Buffer JavaNano API < / name > <nl> < description > <nl> <nl> < instructions > <nl> < Bundle - DocURL > https : / / developers . google . com / protocol - buffers / < / Bundle - DocURL > <nl> < Bundle - SymbolicName > com . google . protobuf < / Bundle - SymbolicName > <nl> - < Export - Package > com . google . protobuf ; version = 3 . 0 . 0 - alpha - 3 < / Export - Package > <nl> + < Export - Package > com . google . protobuf ; version = 3 . 0 . 0 - alpha - 4 < / Export - Package > <nl> < / instructions > <nl> < / configuration > <nl> < / plugin > <nl> mmm a / protoc - artifacts / pom . xml <nl> ppp b / protoc - artifacts / pom . xml <nl> <nl> < / parent > <nl> < groupId > com . google . protobuf < / groupId > <nl> < artifactId > protoc < / artifactId > <nl> - < version > 3 . 0 . 0 - alpha - 4 - pre < / version > <nl> + < version > 3 . 0 . 0 - beta - 1 < / version > <nl> < packaging > pom < / packaging > <nl> < name > Protobuf Compiler < / name > <nl> < description > <nl> mmm a / python / google / protobuf / __init__ . py <nl> ppp b / python / google / protobuf / __init__ . py <nl> <nl> <nl> # Copyright 2007 Google Inc . All Rights Reserved . <nl> <nl> - __version__ = ' 3 . 0 . 0a4 . dev0 ' <nl> + __version__ = ' 3 . 0 . 0a4 ' <nl> mmm a / ruby / Gemfile . lock <nl> ppp b / ruby / Gemfile . lock <nl> <nl> PATH <nl> remote : . <nl> specs : <nl> - google - protobuf ( 3 . 0 . 0 . alpha . 3 ) <nl> + google - protobuf ( 3 . 0 . 0 . alpha . 4 ) <nl> <nl> GEM <nl> remote : https : / / rubygems . org / <nl> mmm a / ruby / google - protobuf . gemspec <nl> ppp b / ruby / google - protobuf . gemspec <nl> <nl> Gem : : Specification . new do | s | <nl> s . name = " google - protobuf " <nl> - s . version = " 3 . 0 . 0 . alpha . 4 . 0 . pre " <nl> + s . version = " 3 . 0 . 0 . alpha . 4 . 0 " <nl> s . licenses = [ " BSD " ] <nl> s . summary = " Protocol Buffers " <nl> s . description = " Protocol Buffers are Google ' s data interchange format . " <nl> mmm a / ruby / pom . xml <nl> ppp b / ruby / pom . xml <nl> <nl> < dependency > <nl> < groupId > com . google . protobuf < / groupId > <nl> < artifactId > protobuf - java < / artifactId > <nl> - < version > 3 . 0 . 0 - alpha - 2 < / version > <nl> + < version > 3 . 0 . 0 - beta - 1 < / version > <nl> < / dependency > <nl> < / dependencies > <nl> < / project > <nl>
Merge pull request from xfxyjwf / version_number
protocolbuffers/protobuf
08da3d4997cbd96ff8f8685c5aa0413a42a14b04
2015-08-26T22:31:00Z
mmm a / CHANGELOG <nl> ppp b / CHANGELOG <nl> v2 . 6 . 0 ( XXXX - XX - XX ) <nl> <nl> * added ` extendible ` package . <nl> <nl> + * added Foxx model lifecycle events to repositories . See # 1257 . <nl> + <nl> <nl> v2 . 5 . 2 ( XXXX - XX - XX ) <nl> mmmmmmmmmmmmmmmmmm - <nl> mmm a / Documentation / Books / Users / Foxx / Develop / Model . mdpp <nl> ppp b / Documentation / Books / Users / Foxx / Develop / Model . mdpp <nl> var PersonModel = Foxx . Model . extend ( { <nl> exports . model = PersonModel ; <nl> ` ` ` <nl> <nl> + You can also use ` joi . object ` schemas directly : <nl> + <nl> + ` ` ` js <nl> + var PersonModel = Foxx . Model . extend ( { <nl> + schema : joi . object ( ) . keys ( { <nl> + name : joi . string ( ) . required ( ) , <nl> + age : joi . number ( ) . integer ( ) , <nl> + active : joi . boolean ( ) . default ( true ) <nl> + } ) <nl> + } ) ; <nl> + ` ` ` <nl> + <nl> This has two effects : On the one hand it provides documentation . If you annotated <nl> your model , you can use it in the * * bodyParam * * method for documentation . <nl> On the other hand it will influence the behavior of the constructor : If you provide <nl> The following events are emitted by a model : <nl> - beforeRemove <nl> - afterRemove <nl> <nl> + Equivalent events will also be emitted by the repository handling the model . <nl> + <nl> Model lifecycle : <nl> <nl> ` ` ` js <nl> mmm a / Documentation / Books / Users / Foxx / Develop / Repository . mdpp <nl> ppp b / Documentation / Books / Users / Foxx / Develop / Repository . mdpp <nl> var TodosRepository = Foxx . Repository . extend ( { <nl> exports . repository = TodosRepository ; <nl> ` ` ` <nl> <nl> + The following events are emitted by a repository : <nl> + <nl> + - beforeCreate <nl> + - afterCreate <nl> + - beforeSave <nl> + - afterSave <nl> + - beforeUpdate <nl> + - afterUpdate <nl> + - beforeRemove <nl> + - afterRemove <nl> + <nl> + Model lifecycle : <nl> + <nl> + ` ` ` js <nl> + var person = new PersonModel ( ) ; <nl> + person . on ( ' beforeCreate ' , function ( ) { <nl> + var model = this ; <nl> + model . fancyMethod ( ) ; / / Do something fancy with the model <nl> + } ) ; <nl> + var people = new Repository ( appContext . collection ( " people " ) , { model : PersonModel } ) ; <nl> + <nl> + people . save ( person ) ; <nl> + / / beforeCreate ( person ) <nl> + / / beforeSave ( person ) <nl> + / / The model is created at db <nl> + / / afterSave ( person ) <nl> + / / afterCreate ( person ) <nl> + <nl> + people . update ( person , data ) ; <nl> + / / beforeUpdate ( person , data ) <nl> + / / beforeSave ( person , data ) <nl> + / / The model is updated at db <nl> + / / afterSave ( person , data ) <nl> + / / afterUpdate ( person , data ) <nl> + <nl> + people . remove ( person ) ; <nl> + / / beforeRemove ( person ) <nl> + / / The model is deleted at db <nl> + / / afterRemove ( person ) <nl> + ` ` ` <nl> + <nl> ! SUBSECTION Initialize <nl> <nl> @ startDocuBlock JSF_foxx_repository_initializer <nl> mmm a / UnitTests / Makefile . unittests <nl> ppp b / UnitTests / Makefile . unittests <nl> SHELL_SERVER_ONLY = \ <nl> @ top_srcdir @ / js / server / tests / shell - database - noncluster . js \ <nl> @ top_srcdir @ / js / server / tests / shell - foxx . js \ <nl> @ top_srcdir @ / js / server / tests / shell - foxx - repository - spec . js \ <nl> + @ top_srcdir @ / js / server / tests / shell - foxx - repository - events - spec . js \ <nl> @ top_srcdir @ / js / server / tests / shell - foxx - query - spec . js \ <nl> @ top_srcdir @ / js / server / tests / shell - foxx - model . js \ <nl> @ top_srcdir @ / js / server / tests / shell - foxx - model - events - spec . js \ <nl> mmm a / js / server / modules / org / arangodb / foxx / model . js <nl> ppp b / js / server / modules / org / arangodb / foxx / model . js <nl> var Model , <nl> / / / @ endDocuBlock <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - excludeExtraAttributes = function ( attributes , Model ) { <nl> + excludeExtraAttributes = function ( attributes , model ) { <nl> ' use strict ' ; <nl> - var extraAttributeNames ; <nl> - if ( Model . prototype . schema ) { <nl> - extraAttributeNames = _ . difference ( <nl> - _ . keys ( metadataSchema ) , <nl> - _ . keys ( Model . prototype . schema ) <nl> - ) ; <nl> + if ( ! model . schema ) { <nl> + return _ . clone ( attributes ) ; <nl> } <nl> - return _ . omit ( attributes , extraAttributeNames ) ; <nl> + return _ . omit ( attributes , _ . difference ( <nl> + _ . keys ( metadataSchema ) , <nl> + _ . keys ( model . schema ) <nl> + ) ) ; <nl> } ; <nl> <nl> Model = function ( attributes ) { <nl> Model = function ( attributes ) { <nl> <nl> this . errors = { } ; <nl> <nl> - var instance = this ; <nl> - if ( instance . schema ) { <nl> + if ( this . schema ) { <nl> + if ( this . schema . isJoi ) { <nl> + this . schema = _ . object ( _ . map ( this . schema . _inner . children , function ( prop ) { <nl> + return [ prop . key , prop . schema ] ; <nl> + } ) ) ; <nl> + } <nl> _ . each ( <nl> - _ . union ( _ . keys ( instance . schema ) , _ . keys ( attributes ) ) , <nl> - function ( attributeName ) { <nl> - instance . set ( attributeName , attributes ? attributes [ attributeName ] : undefined ) ; <nl> - } <nl> + _ . union ( _ . keys ( this . schema ) , _ . keys ( attributes ) ) , <nl> + function ( key ) { <nl> + this . set ( key , attributes & & attributes [ key ] ) ; <nl> + } , <nl> + this <nl> ) ; <nl> } else if ( attributes ) { <nl> - instance . attributes = _ . clone ( attributes ) ; <nl> + this . attributes = _ . clone ( attributes ) ; <nl> } <nl> - EventEmitter . call ( instance ) ; <nl> + EventEmitter . call ( this ) ; <nl> } ; <nl> <nl> util . inherits ( Model , EventEmitter ) ; <nl> <nl> Model . fromClient = function ( attributes ) { <nl> ' use strict ' ; <nl> - return new this ( excludeExtraAttributes ( attributes , this ) ) ; <nl> + var model = new this ( ) ; <nl> + model . set ( excludeExtraAttributes ( attributes , model ) ) ; <nl> + return model ; <nl> } ; <nl> <nl> / / Instance Properties <nl> _ . extend ( Model . prototype , { <nl> _ . each ( attributeName , function ( value , key ) { <nl> this . set ( key , value ) ; <nl> } , this ) ; <nl> - return ; <nl> + return this ; <nl> } <nl> <nl> if ( this . schema ) { <nl> - var schema = ( <nl> - this . schema [ attributeName ] | | <nl> - metadataSchema [ attributeName ] | | <nl> - joi . forbidden ( ) <nl> - ) , <nl> - result = ( <nl> - schema . isJoi ? schema : joi . object ( ) . keys ( schema ) <nl> - ) . validate ( value ) ; <nl> + var schema = this . schema [ attributeName ] | | metadataSchema [ attributeName ] | | joi . forbidden ( ) ; <nl> + var result = schema . validate ( value ) ; <nl> <nl> if ( result . error ) { <nl> this . errors [ attributeName ] = result . error ; <nl> _ . extend ( Model . prototype , { <nl> this . isValid = true ; <nl> } <nl> } <nl> + <nl> if ( result . value = = = undefined ) { <nl> delete this . attributes [ attributeName ] ; <nl> } else { <nl> _ . extend ( Model . prototype , { <nl> } else { <nl> this . attributes [ attributeName ] = value ; <nl> } <nl> + <nl> + return this ; <nl> } , <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> _ . extend ( Model . prototype , { <nl> <nl> forClient : function ( ) { <nl> ' use strict ' ; <nl> - return excludeExtraAttributes ( this . attributes , this . constructor ) ; <nl> + return excludeExtraAttributes ( this . attributes , this ) ; <nl> } <nl> } ) ; <nl> <nl> mmm a / js / server / modules / org / arangodb / foxx / repository . js <nl> ppp b / js / server / modules / org / arangodb / foxx / repository . js <nl> var Repository , <nl> ArangoError = arangodb . ArangoError , <nl> ArangoCollection = arangodb . ArangoCollection , <nl> errors = arangodb . errors , <nl> - extend = require ( ' extendible ' ) ; <nl> + extend = require ( ' extendible ' ) , <nl> + EventEmitter = require ( ' events ' ) . EventEmitter , <nl> + util = require ( ' util ' ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ startDocuBlock JSF_foxx_repository_initializer <nl> Repository = function ( collection , opts ) { <nl> this . collection . ensureIndex ( index ) ; <nl> } , this ) ; <nl> } <nl> + <nl> + EventEmitter . call ( this ) ; <nl> } ; <nl> <nl> + util . inherits ( Repository , EventEmitter ) ; <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - Methods <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> _ . extend ( Repository . prototype , { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> save : function ( model ) { <nl> ' use strict ' ; <nl> + this . emit ( ' beforeCreate ' , model ) ; <nl> model . emit ( ' beforeCreate ' ) ; <nl> + this . emit ( ' beforeSave ' , model ) ; <nl> model . emit ( ' beforeSave ' ) ; <nl> var id_and_rev = this . collection . save ( model . forDB ( ) ) ; <nl> model . set ( id_and_rev ) ; <nl> + this . emit ( ' afterSave ' , model ) ; <nl> model . emit ( ' afterSave ' ) ; <nl> + this . emit ( ' afterCreate ' , model ) ; <nl> model . emit ( ' afterCreate ' ) ; <nl> return model ; <nl> } , <nl> _ . extend ( Repository . prototype , { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> remove : function ( model ) { <nl> ' use strict ' ; <nl> + this . emit ( ' beforeRemove ' , model ) ; <nl> model . emit ( ' beforeRemove ' ) ; <nl> var id = model . get ( ' _id ' ) , <nl> result = this . collection . remove ( id ) ; <nl> + this . emit ( ' afterRemove ' , model ) ; <nl> model . emit ( ' afterRemove ' ) ; <nl> return result ; <nl> } , <nl> _ . extend ( Repository . prototype , { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> update : function ( model , data ) { <nl> ' use strict ' ; <nl> + this . emit ( ' beforeUpdate ' , model , data ) ; <nl> model . emit ( ' beforeUpdate ' , data ) ; <nl> + this . emit ( ' beforeSave ' , model , data ) ; <nl> model . emit ( ' beforeSave ' , data ) ; <nl> var id = model . get ( " _id " ) | | model . get ( " _key " ) , <nl> id_and_rev = this . collection . update ( id , data ) ; <nl> model . set ( data ) ; <nl> model . set ( id_and_rev ) ; <nl> + this . emit ( ' afterSave ' , model , data ) ; <nl> model . emit ( ' afterSave ' , data ) ; <nl> + this . emit ( ' afterUpdate ' , model , data ) ; <nl> model . emit ( ' afterUpdate ' , data ) ; <nl> return model ; <nl> } , <nl> mmm a / js / server / tests / shell - foxx - model . js <nl> ppp b / js / server / tests / shell - foxx - model . js <nl> function ModelSpec ( ) { <nl> assertTrue ( instance . isValid ) ; <nl> } , <nl> <nl> + testJoiObject : function ( ) { <nl> + var Model = FoxxModel . extend ( { <nl> + schema : joi . object ( ) . keys ( { <nl> + lol : joi . string ( ) <nl> + } ) <nl> + } ) ; <nl> + <nl> + instance = new Model ( { lol : 5 } ) ; <nl> + assertEqual ( _ . keys ( instance . attributes ) . length , 1 ) ; <nl> + assertEqual ( instance . get ( " lol " ) , 5 ) ; <nl> + assertEqual ( _ . keys ( instance . errors ) . length , 1 ) ; <nl> + assertFalse ( instance . isValid ) ; <nl> + } , <nl> + <nl> + testAttributeDefaults : function ( ) { <nl> + var special = function ( ) { <nl> + return 42 ; <nl> + } ; <nl> + <nl> + var Model = FoxxModel . extend ( { <nl> + schema : { <nl> + aString : joi . any ( ) . default ( " potato " ) , <nl> + special : joi . any ( ) . default ( special , " current date " ) <nl> + } <nl> + } ) ; <nl> + <nl> + instance = new Model ( ) ; <nl> + assertEqual ( instance . get ( " aString " ) , " potato " ) ; <nl> + assertEqual ( instance . get ( " special " ) , special ( ) ) ; <nl> + } , <nl> + <nl> testCoerceAttributes : function ( ) { <nl> var Model = FoxxModel . extend ( { <nl> schema : { <nl> new file mode 100644 <nl> index 00000000000 . . 03e4ad32f45 <nl> mmm / dev / null <nl> ppp b / js / server / tests / shell - foxx - repository - events - spec . js <nl> <nl> + / * global require , describe , expect , it , beforeEach , createSpyObj * / <nl> + <nl> + var FoxxRepository = require ( " org / arangodb / foxx / repository " ) . Repository , <nl> + Model = require ( " org / arangodb / foxx / model " ) . Model ; <nl> + <nl> + describe ( ' Model Events ' , function ( ) { <nl> + ' use strict ' ; <nl> + <nl> + var collection , instance , repository ; <nl> + <nl> + beforeEach ( function ( ) { <nl> + collection = createSpyObj ( ' collection ' , [ <nl> + ' update ' , <nl> + ' save ' , <nl> + ' remove ' <nl> + ] ) ; <nl> + instance = new Model ( ) ; <nl> + repository = new FoxxRepository ( collection , { model : Model , random : ' ' , beforeCalled : false , afterCalled : false } ) ; <nl> + } ) ; <nl> + <nl> + it ( ' should be possible to subscribe and emit events ' , function ( ) { <nl> + expect ( repository . on ) . toBeDefined ( ) ; <nl> + expect ( repository . emit ) . toBeDefined ( ) ; <nl> + } ) ; <nl> + <nl> + it ( ' should emit beforeCreate and afterCreate events when creating the model ' , function ( ) { <nl> + addHooks ( repository , instance , ' Create ' ) ; <nl> + expect ( repository . save ( instance ) ) . toEqual ( instance ) ; <nl> + expect ( repository . beforeCalled ) . toBe ( true ) ; <nl> + expect ( repository . afterCalled ) . toBe ( true ) ; <nl> + } ) ; <nl> + <nl> + it ( ' should emit beforeSave and afterSave events when creating the model ' , function ( ) { <nl> + addHooks ( repository , instance , ' Save ' ) ; <nl> + expect ( repository . save ( instance ) ) . toEqual ( instance ) ; <nl> + expect ( repository . beforeCalled ) . toBe ( true ) ; <nl> + expect ( repository . afterCalled ) . toBe ( true ) ; <nl> + } ) ; <nl> + <nl> + it ( ' should emit beforeUpdate and afterUpdate events when updating the model ' , function ( ) { <nl> + var newData = { newAttribute : ' test ' } ; <nl> + addHooks ( repository , instance , ' Update ' , newData ) ; <nl> + expect ( repository . update ( instance , newData ) ) . toEqual ( instance ) ; <nl> + expect ( repository . beforeCalled ) . toBe ( true ) ; <nl> + expect ( repository . afterCalled ) . toBe ( true ) ; <nl> + } ) ; <nl> + <nl> + it ( ' should emit beforeSave and afterSave events when updating the model ' , function ( ) { <nl> + var newData = { newAttribute : ' test ' } ; <nl> + addHooks ( repository , instance , ' Save ' , newData ) ; <nl> + expect ( repository . update ( instance , newData ) ) . toEqual ( instance ) ; <nl> + expect ( repository . beforeCalled ) . toBe ( true ) ; <nl> + expect ( repository . afterCalled ) . toBe ( true ) ; <nl> + } ) ; <nl> + <nl> + it ( ' should emit beforeRemove and afterRemove events when removing the model ' , function ( ) { <nl> + addHooks ( repository , instance , ' Remove ' ) ; <nl> + repository . remove ( instance ) ; <nl> + expect ( repository . beforeCalled ) . toBe ( true ) ; <nl> + expect ( repository . afterCalled ) . toBe ( true ) ; <nl> + } ) ; <nl> + <nl> + } ) ; <nl> + <nl> + function addHooks ( repo , model , ev , dataToReceive ) { <nl> + ' use strict ' ; <nl> + <nl> + var random = String ( Math . floor ( Math . random ( ) * 1000 ) ) ; <nl> + <nl> + repo . on ( ' before ' + ev , function ( self , data ) { <nl> + expect ( this ) . toEqual ( repo ) ; <nl> + expect ( self ) . toEqual ( model ) ; <nl> + expect ( data ) . toEqual ( dataToReceive ) ; <nl> + this . random = random ; <nl> + this . beforeCalled = true ; <nl> + } ) ; <nl> + repo . on ( ' after ' + ev , function ( self , data ) { <nl> + expect ( this ) . toEqual ( repo ) ; <nl> + expect ( self ) . toEqual ( model ) ; <nl> + expect ( data ) . toEqual ( dataToReceive ) ; <nl> + this . afterCalled = true ; <nl> + expect ( this . beforeCalled ) . toBe ( true ) ; <nl> + expect ( this . random ) . toEqual ( random ) ; <nl> + } ) ; <nl> + } <nl> \ No newline at end of file <nl>
Merge pull request from arangodb / vulpine - grace
arangodb/arangodb
ecbc6f581f94b4accd02d5ae247d8a43f606bc4a
2015-04-08T01:23:55Z
mmm a / tensorflow / core / util / mkl_util . h <nl> ppp b / tensorflow / core / util / mkl_util . h <nl> class MklDnnData { <nl> } <nl> } ; <nl> <nl> - / / / Base class for operations with reuse of primitives <nl> + / / / Base class for operations with reuse of primitives <nl> / / / <nl> class MklPrimitive { <nl> public : <nl> class FactoryKeyCreator { <nl> static inline memory : : format get_desired_format ( int channel ) { <nl> memory : : format fmt_desired = memory : : format : : any ; <nl> <nl> - if ( port : : TestCPUFeature ( port : : CPUFeature : : AVX512F ) & & <nl> - ( channel % 16 ) = = 0 ) { <nl> - fmt_desired = memory : : format : : nChw16c ; <nl> - } else if ( port : : TestCPUFeature ( port : : CPUFeature : : AVX2 ) & & <nl> - ( channel % 8 ) = = 0 ) { <nl> - fmt_desired = memory : : format : : nChw8c ; <nl> + if ( port : : TestCPUFeature ( port : : CPUFeature : : AVX512F ) <nl> + & & ( channel % 16 ) = = 0 ) { <nl> + fmt_desired = memory : : format : : nChw16c ; <nl> + } else if ( port : : TestCPUFeature ( port : : CPUFeature : : AVX2 ) <nl> + & & ( channel % 8 ) = = 0 ) { <nl> + fmt_desired = memory : : format : : nChw8c ; <nl> } else { <nl> fmt_desired = memory : : format : : nchw ; <nl> } <nl>
minor code style fix
tensorflow/tensorflow
834f0fabf73bae7c3abc0ab0a37296dfe2848298
2018-05-23T05:00:47Z
mmm a / tools / CMakeLists . txt <nl> ppp b / tools / CMakeLists . txt <nl> foreach ( source $ { srcs } ) <nl> endif ( ) <nl> <nl> # Install <nl> - install ( TARGETS $ { name } DESTINATION bin ) <nl> + install ( TARGETS $ { name } DESTINATION $ { CMAKE_INSTALL_BINDIR } ) <nl> + <nl> endforeach ( source ) <nl>
fix install path with GNUInstallDir support
BVLC/caffe
581650b18d7580df726d1d6d54d83c397d1379bb
2016-05-30T04:22:42Z
mmm a / doc / bash_completion / aria2c <nl> ppp b / doc / bash_completion / aria2c <nl> _aria2c ( ) <nl> esac <nl> case $ cur in <nl> - * ) <nl> - COMPREPLY = ( $ ( compgen - W ' - - rpc - save - upload - metadata - - rpc - save - upload - metadata = false - - on - download - start - - metalink - language - - rpc - secret - - torrent - file - - enable - peer - exchange - - enable - peer - exchange = false - - http - proxy - passwd - - bt - tracker - timeout - - ftp - type - - seed - time - - keep - unfinished - download - result - - keep - unfinished - download - result = false - - bt - tracker - connect - timeout - - bt - max - open - files - - no - netrc - - no - netrc = false - - force - sequential - - force - sequential = false - - metalink - base - uri - - private - key - - ftp - passwd - - allow - overwrite - - allow - overwrite = false - - rpc - allow - origin - all - - rpc - allow - origin - all = false - - bt - detach - seed - only - - bt - detach - seed - only = false - - dht - entry - point6 - - summary - interval - - lowest - speed - limit - - bt - tracker - interval - - proxy - method - - metalink - preferred - protocol - - enable - http - keep - alive - - enable - http - keep - alive = false - - metalink - version - - stderr - - stderr = false - - bt - lpd - interface - - force - save - - force - save = false - - rpc - secure - - rpc - secure = false - - listen - port - - rpc - private - key - - server - stat - of - - server - stat - timeout - - https - proxy - user - - piece - length - - dry - run - - dry - run = false - - truncate - console - readout - - truncate - console - readout = false - - save - not - found - - save - not - found = false - - async - dns - server - - bt - max - peers - - max - overall - upload - limit - - rpc - user - - optimize - concurrent - downloads - - optimize - concurrent - downloads = true - - optimize - concurrent - downloads = false - - optimize - concurrent - downloads = A : B - - dir - - split - - on - download - pause - - auto - file - renaming - - auto - file - renaming = false - - http - proxy - - save - session - interval - - daemon - - daemon = false - - https - proxy - - min - tls - version - - save - cookies - - out - - rlimit - nofile - - max - file - not - found - - on - download - stop - - certificate - - bt - min - crypto - level - - remove - control - file - - remove - control - file = false - - enable - dht - - enable - dht = false - - file - allocation - - follow - metalink - - on - bt - download - complete - - ftp - proxy - - show - files - - show - files = false - - timeout - - bt - hash - check - seed - - bt - hash - check - seed = false - - ftp - pasv - - ftp - pasv = false - - check - certificate - - check - certificate = false - - always - resume - - always - resume = false - - load - cookies - - bt - remove - unselected - file - - bt - remove - unselected - file = false - - bt - stop - timeout - - version - - max - concurrent - downloads - - quiet - - quiet = false - - max - download - result - - max - resume - failure - tries - - header - - rpc - listen - all - - rpc - listen - all = false - - all - proxy - user - - server - stat - if - - dht - file - path6 - - save - session - - bt - external - ip - - max - tries - - conditional - get - - conditional - get = false - - ftp - reuse - connection - - ftp - reuse - connection = false - - gid - - dscp - - max - download - limit - - bt - prioritize - piece - - check - integrity - - check - integrity = false - - log - level - - remote - time - - remote - time = false - - uri - selector - - rpc - listen - port - - index - out - - bt - tracker - - referer - - ssh - host - key - md - - console - log - level - - connect - timeout - - stream - piece - selector - - dht - message - timeout - - select - file - - download - result - - disable - ipv6 - - disable - ipv6 = false - - rpc - max - request - size - - rpc - passwd - - stop - with - process - - https - proxy - passwd - - continue - - continue = false - - no - file - allocation - limit - - netrc - path - - ftp - proxy - user - - enable - color - - enable - color = false - - metalink - location - - allow - piece - length - change - - allow - piece - length - change = false - - max - connection - per - server - - no - conf - - no - conf = false - - rpc - certificate - - metalink - os - - enable - http - pipelining - - enable - http - pipelining = false - - http - passwd - - user - agent - - enable - dht6 - - enable - dht6 = false - - dht - file - path - - http - auth - challenge - - http - auth - challenge = false - - bt - enable - hook - after - hash - check - - bt - enable - hook - after - hash - check = false - - peer - id - prefix - - max - mmap - limit - - enable - mmap - - enable - mmap = false - - use - head - - use - head = false - - bt - require - crypto - - bt - require - crypto = false - - show - console - readout - - show - console - readout = false - - conf - path - - log - - no - proxy - - dht - entry - point - - dht - listen - port - - http - user - - retry - wait - - on - download - complete - - help - - help = # basic - - help = # advanced - - help = # http - - help = # https - - help = # ftp - - help = # metalink - - help = # bittorrent - - help = # cookie - - help = # hook - - help = # file - - help = # rpc - - help = # checksum - - help = # experimental - - help = # deprecated - - help = # help - - help = # all - - max - overall - download - limit - - event - poll - - http - accept - gzip - - http - accept - gzip = false - - metalink - file - - all - proxy - - disk - cache - - hash - check - only - - hash - check - only = false - - dht - listen - addr6 - - human - readable - - human - readable = false - - ftp - user - - all - proxy - passwd - - bt - exclude - tracker - - pause - metadata - - pause - metadata = false - - http - proxy - user - - deferred - input - - deferred - input = false - - metalink - enable - unique - protocol - - metalink - enable - unique - protocol = false - - stop - - max - upload - limit - - multiple - interface - - realtime - chunk - checksum - - realtime - chunk - checksum = false - - http - no - cache - - http - no - cache = false - - ca - certificate - - bt - force - encryption - - bt - force - encryption = false - - bt - save - metadata - - bt - save - metadata = false - - seed - ratio - - follow - torrent - - pause - - pause = false - - checksum - - auto - save - interval - - async - dns - - async - dns = false - - bt - enable - lpd - - bt - enable - lpd = false - - parameterized - uri - - parameterized - uri = false - - ftp - proxy - passwd - - enable - rpc - - enable - rpc = false - - min - split - size - - bt - seed - unverified - - bt - seed - unverified = false - - input - file - - interface - - enable - async - dns6 - - enable - async - dns6 = false - - reuse - uri - - reuse - uri = false - - socket - recv - buffer - size - - bt - request - peer - speed - limit - - on - download - error - - bt - metadata - only - - bt - metadata - only = false ' - - " $ cur " ) ) <nl> + COMPREPLY = ( $ ( compgen - W ' - - rpc - save - upload - metadata - - rpc - save - upload - metadata = false - - on - download - start - - metalink - language - - rpc - secret - - torrent - file - - enable - peer - exchange - - enable - peer - exchange = false - - http - proxy - passwd - - bt - tracker - timeout - - ftp - type - - seed - time - - keep - unfinished - download - result - - keep - unfinished - download - result = false - - bt - tracker - connect - timeout - - bt - max - open - files - - no - netrc - - no - netrc = false - - force - sequential - - force - sequential = false - - metalink - base - uri - - private - key - - ftp - passwd - - allow - overwrite - - allow - overwrite = false - - rpc - allow - origin - all - - rpc - allow - origin - all = false - - bt - detach - seed - only - - bt - detach - seed - only = false - - dht - entry - point6 - - summary - interval - - lowest - speed - limit - - bt - tracker - interval - - proxy - method - - metalink - preferred - protocol - - enable - http - keep - alive - - enable - http - keep - alive = false - - metalink - version - - stderr - - stderr = false - - bt - lpd - interface - - force - save - - force - save = false - - rpc - secure - - rpc - secure = false - - listen - port - - rpc - private - key - - server - stat - of - - server - stat - timeout - - https - proxy - user - - piece - length - - dry - run - - dry - run = false - - truncate - console - readout - - truncate - console - readout = false - - save - not - found - - save - not - found = false - - async - dns - server - - bt - max - peers - - max - overall - upload - limit - - rpc - user - - optimize - concurrent - downloads - - optimize - concurrent - downloads = true - - optimize - concurrent - downloads = false - - optimize - concurrent - downloads = A : B - - dir - - split - - on - download - pause - - auto - file - renaming - - auto - file - renaming = false - - http - proxy - - save - session - interval - - daemon - - daemon = false - - https - proxy - - min - tls - version - - save - cookies - - out - - rlimit - nofile - - max - file - not - found - - on - download - stop - - certificate - - bt - min - crypto - level - - remove - control - file - - remove - control - file = false - - enable - dht - - enable - dht = false - - file - allocation - - follow - metalink - - on - bt - download - complete - - ftp - proxy - - show - files - - show - files = false - - timeout - - bt - hash - check - seed - - bt - hash - check - seed = false - - ftp - pasv - - ftp - pasv = false - - check - certificate - - check - certificate = false - - always - resume - - always - resume = false - - load - cookies - - bt - remove - unselected - file - - bt - remove - unselected - file = false - - bt - stop - timeout - - version - - max - concurrent - downloads - - quiet - - quiet = false - - max - download - result - - content - disposition - default - utf8 - - content - disposition - default - utf8 = false - - max - resume - failure - tries - - header - - rpc - listen - all - - rpc - listen - all = false - - all - proxy - user - - server - stat - if - - dht - file - path6 - - save - session - - bt - external - ip - - max - tries - - conditional - get - - conditional - get = false - - ftp - reuse - connection - - ftp - reuse - connection = false - - gid - - dscp - - max - download - limit - - bt - prioritize - piece - - check - integrity - - check - integrity = false - - log - level - - remote - time - - remote - time = false - - uri - selector - - rpc - listen - port - - index - out - - bt - tracker - - referer - - ssh - host - key - md - - console - log - level - - connect - timeout - - stream - piece - selector - - dht - message - timeout - - select - file - - download - result - - disable - ipv6 - - disable - ipv6 = false - - rpc - max - request - size - - rpc - passwd - - stop - with - process - - https - proxy - passwd - - continue - - continue = false - - no - file - allocation - limit - - netrc - path - - ftp - proxy - user - - enable - color - - enable - color = false - - metalink - location - - allow - piece - length - change - - allow - piece - length - change = false - - max - connection - per - server - - no - conf - - no - conf = false - - rpc - certificate - - metalink - os - - enable - http - pipelining - - enable - http - pipelining = false - - http - passwd - - user - agent - - enable - dht6 - - enable - dht6 = false - - dht - file - path - - http - auth - challenge - - http - auth - challenge = false - - bt - enable - hook - after - hash - check - - bt - enable - hook - after - hash - check = false - - peer - id - prefix - - max - mmap - limit - - enable - mmap - - enable - mmap = false - - use - head - - use - head = false - - bt - require - crypto - - bt - require - crypto = false - - show - console - readout - - show - console - readout = false - - conf - path - - log - - no - proxy - - dht - entry - point - - dht - listen - port - - http - user - - retry - wait - - on - download - complete - - help - - help = # basic - - help = # advanced - - help = # http - - help = # https - - help = # ftp - - help = # metalink - - help = # bittorrent - - help = # cookie - - help = # hook - - help = # file - - help = # rpc - - help = # checksum - - help = # experimental - - help = # deprecated - - help = # help - - help = # all - - max - overall - download - limit - - event - poll - - http - accept - gzip - - http - accept - gzip = false - - metalink - file - - all - proxy - - disk - cache - - hash - check - only - - hash - check - only = false - - dht - listen - addr6 - - human - readable - - human - readable = false - - ftp - user - - all - proxy - passwd - - bt - exclude - tracker - - pause - metadata - - pause - metadata = false - - http - proxy - user - - deferred - input - - deferred - input = false - - metalink - enable - unique - protocol - - metalink - enable - unique - protocol = false - - stop - - max - upload - limit - - multiple - interface - - realtime - chunk - checksum - - realtime - chunk - checksum = false - - http - no - cache - - http - no - cache = false - - ca - certificate - - bt - force - encryption - - bt - force - encryption = false - - bt - save - metadata - - bt - save - metadata = false - - seed - ratio - - follow - torrent - - pause - - pause = false - - checksum - - auto - save - interval - - async - dns - - async - dns = false - - bt - enable - lpd - - bt - enable - lpd = false - - parameterized - uri - - parameterized - uri = false - - ftp - proxy - passwd - - enable - rpc - - enable - rpc = false - - min - split - size - - bt - seed - unverified - - bt - seed - unverified = false - - input - file - - interface - - enable - async - dns6 - - enable - async - dns6 = false - - reuse - uri - - reuse - uri = false - - socket - recv - buffer - size - - bt - request - peer - speed - limit - - on - download - error - - bt - metadata - only - - bt - metadata - only = false ' - - " $ cur " ) ) <nl> ; ; <nl> * ) <nl> _filedir ' @ ( torrent | meta4 | metalink | text | txt | list | lst ) ' <nl>
Update bash_completion
aria2/aria2
f55fced91f6205294843a5f8afcaafc41e92f30f
2017-01-16T13:11:12Z
mmm a / include / swift / AST / Decl . h <nl> ppp b / include / swift / AST / Decl . h <nl> class alignas ( 1 < < DeclAlignInBits ) Decl { <nl> HasSingleExpressionBody : 1 <nl> ) ; <nl> <nl> - SWIFT_INLINE_BITFIELD ( FuncDecl , AbstractFunctionDecl , 1 + 2 + 1 + 1 + 2 , <nl> + SWIFT_INLINE_BITFIELD ( FuncDecl , AbstractFunctionDecl , 1 + 2 + 1 + 1 + 1 + 2 , <nl> / / / Whether this function is a ' static ' method . <nl> IsStatic : 1 , <nl> <nl> class alignas ( 1 < < DeclAlignInBits ) Decl { <nl> / / / Whether this function has a dynamic Self return type . <nl> HasDynamicSelf : 1 , <nl> <nl> + / / / Whether we ' ve computed the ' self ' access kind yet . <nl> + SelfAccessComputed : 1 , <nl> + <nl> / / / Backing bits for ' self ' access kind . <nl> SelfAccess : 2 <nl> ) ; <nl> llvm : : raw_ostream & operator < < ( llvm : : raw_ostream & OS , SelfAccessKind SAK ) ; <nl> / / / FuncDecl - ' func ' declaration . <nl> class FuncDecl : public AbstractFunctionDecl { <nl> friend class AbstractFunctionDecl ; <nl> + friend class SelfAccessKindRequest ; <nl> <nl> SourceLoc StaticLoc ; / / Location of the ' static ' token or invalid . <nl> SourceLoc FuncLoc ; / / Location of the ' func ' token . <nl> class FuncDecl : public AbstractFunctionDecl { <nl> <nl> Bits . FuncDecl . HasDynamicSelf = false ; <nl> Bits . FuncDecl . ForcedStaticDispatch = false ; <nl> - Bits . FuncDecl . SelfAccess = static_cast < unsigned > ( SelfAccessKind : : NonMutating ) ; <nl> + Bits . FuncDecl . SelfAccess = <nl> + static_cast < unsigned > ( SelfAccessKind : : NonMutating ) ; <nl> + Bits . FuncDecl . SelfAccessComputed = false ; <nl> } <nl> <nl> private : <nl> class FuncDecl : public AbstractFunctionDecl { <nl> DeclContext * Parent , <nl> ClangNode ClangN ) ; <nl> <nl> + Optional < SelfAccessKind > getCachedSelfAccessKind ( ) const { <nl> + if ( Bits . FuncDecl . SelfAccessComputed ) <nl> + return static_cast < SelfAccessKind > ( Bits . FuncDecl . SelfAccess ) ; <nl> + <nl> + return None ; <nl> + } <nl> + <nl> public : <nl> / / / Factory function only for use by deserialization . <nl> static FuncDecl * createDeserialized ( ASTContext & Context , SourceLoc StaticLoc , <nl> class FuncDecl : public AbstractFunctionDecl { <nl> void setStatic ( bool IsStatic = true ) { <nl> Bits . FuncDecl . IsStatic = IsStatic ; <nl> } <nl> - <nl> + <nl> bool isMutating ( ) const { <nl> return getSelfAccessKind ( ) = = SelfAccessKind : : Mutating ; <nl> } <nl> class FuncDecl : public AbstractFunctionDecl { <nl> return getSelfAccessKind ( ) = = SelfAccessKind : : __Consuming ; <nl> } <nl> <nl> - SelfAccessKind getSelfAccessKind ( ) const { <nl> - return static_cast < SelfAccessKind > ( Bits . FuncDecl . SelfAccess ) ; <nl> - } <nl> + SelfAccessKind getSelfAccessKind ( ) const ; <nl> + <nl> void setSelfAccessKind ( SelfAccessKind mod ) { <nl> Bits . FuncDecl . SelfAccess = static_cast < unsigned > ( mod ) ; <nl> + Bits . FuncDecl . SelfAccessComputed = true ; <nl> } <nl> <nl> SourceLoc getStaticLoc ( ) const { return StaticLoc ; } <nl> mmm a / include / swift / AST / TypeCheckRequests . h <nl> ppp b / include / swift / AST / TypeCheckRequests . h <nl> class SpecializeAttr ; <nl> class TypeAliasDecl ; <nl> struct TypeLoc ; <nl> class ValueDecl ; <nl> + class AbstractStorageDecl ; <nl> <nl> / / / Display a nominal type or extension thereof . <nl> void simple_display ( <nl> class FunctionBuilderTypeRequest : <nl> void noteCycleStep ( DiagnosticEngine & diags ) const ; <nl> } ; <nl> <nl> + / / / Request a function ' s self access kind . <nl> + class SelfAccessKindRequest : <nl> + public SimpleRequest < SelfAccessKindRequest , <nl> + CacheKind : : SeparatelyCached , <nl> + SelfAccessKind , <nl> + FuncDecl * > { <nl> + public : <nl> + using SimpleRequest : : SimpleRequest ; <nl> + <nl> + private : <nl> + friend SimpleRequest ; <nl> + <nl> + / / Evaluation . <nl> + llvm : : Expected < SelfAccessKind > <nl> + evaluate ( Evaluator & evaluator , FuncDecl * func ) const ; <nl> + <nl> + public : <nl> + / / Cycle handling <nl> + void diagnoseCycle ( DiagnosticEngine & diags ) const ; <nl> + void noteCycleStep ( DiagnosticEngine & diags ) const ; <nl> + <nl> + / / Separate caching . <nl> + bool isCached ( ) const { return true ; } <nl> + Optional < SelfAccessKind > getCachedResult ( ) const ; <nl> + void cacheResult ( SelfAccessKind value ) const ; <nl> + } ; <nl> + <nl> / / Allow AnyValue to compare two Type values , even though Type doesn ' t <nl> / / support = = . <nl> template < > <nl> mmm a / include / swift / AST / TypeCheckerTypeIDZone . def <nl> ppp b / include / swift / AST / TypeCheckerTypeIDZone . def <nl> SWIFT_TYPEID ( PropertyWrapperBackingPropertyTypeRequest ) <nl> SWIFT_TYPEID ( PropertyWrapperBackingPropertyInfoRequest ) <nl> SWIFT_TYPEID ( AttachedFunctionBuilderRequest ) <nl> SWIFT_TYPEID ( FunctionBuilderTypeRequest ) <nl> + SWIFT_TYPEID ( SelfAccessKindRequest ) <nl> mmm a / lib / AST / Decl . cpp <nl> ppp b / lib / AST / Decl . cpp <nl> bool FuncDecl : : isBinaryOperator ( ) const { <nl> ! params - > get ( 1 ) - > isVariadic ( ) ; <nl> } <nl> <nl> + SelfAccessKind FuncDecl : : getSelfAccessKind ( ) const { <nl> + auto & ctx = getASTContext ( ) ; <nl> + return evaluateOrDefault ( ctx . evaluator , <nl> + SelfAccessKindRequest { const_cast < FuncDecl * > ( this ) } , <nl> + SelfAccessKind : : NonMutating ) ; <nl> + } <nl> + <nl> ConstructorDecl : : ConstructorDecl ( DeclName Name , SourceLoc ConstructorLoc , <nl> OptionalTypeKind Failability , <nl> SourceLoc FailabilityLoc , <nl> mmm a / lib / AST / TypeCheckRequests . cpp <nl> ppp b / lib / AST / TypeCheckRequests . cpp <nl> void FunctionBuilderTypeRequest : : diagnoseCycle ( DiagnosticEngine & diags ) const { <nl> void FunctionBuilderTypeRequest : : noteCycleStep ( DiagnosticEngine & diags ) const { <nl> std : : get < 0 > ( getStorage ( ) ) - > diagnose ( diag : : circular_reference_through ) ; <nl> } <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - / / <nl> + / / SelfAccessKindRequest computation . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - / / <nl> + <nl> + void SelfAccessKindRequest : : diagnoseCycle ( DiagnosticEngine & diags ) const { <nl> + auto decl = std : : get < 0 > ( getStorage ( ) ) ; <nl> + diags . diagnose ( decl , diag : : circular_reference ) ; <nl> + } <nl> + <nl> + void SelfAccessKindRequest : : noteCycleStep ( DiagnosticEngine & diags ) const { <nl> + auto decl = std : : get < 0 > ( getStorage ( ) ) ; <nl> + diags . diagnose ( decl , diag : : circular_reference_through ) ; <nl> + } <nl> + <nl> + Optional < SelfAccessKind > SelfAccessKindRequest : : getCachedResult ( ) const { <nl> + auto * funcDecl = std : : get < 0 > ( getStorage ( ) ) ; <nl> + return funcDecl - > getCachedSelfAccessKind ( ) ; <nl> + } <nl> + <nl> + void SelfAccessKindRequest : : cacheResult ( SelfAccessKind value ) const { <nl> + auto * funcDecl = std : : get < 0 > ( getStorage ( ) ) ; <nl> + funcDecl - > setSelfAccessKind ( value ) ; <nl> + } <nl> mmm a / lib / ClangImporter / ImportDecl . cpp <nl> ppp b / lib / ClangImporter / ImportDecl . cpp <nl> Decl * SwiftDeclConverter : : importGlobalAsMethod ( <nl> result - > setAccess ( AccessLevel : : Public ) ; <nl> if ( selfIsInOut ) <nl> result - > setSelfAccessKind ( SelfAccessKind : : Mutating ) ; <nl> + else <nl> + result - > setSelfAccessKind ( SelfAccessKind : : NonMutating ) ; <nl> if ( selfIdx ) { <nl> result - > setSelfIndex ( selfIdx . getValue ( ) ) ; <nl> } else { <nl> mmm a / lib / Parse / ParseDecl . cpp <nl> ppp b / lib / Parse / ParseDecl . cpp <nl> static AccessorDecl * createAccessorFunc ( SourceLoc DeclLoc , <nl> ValueArg , ReturnType , <nl> P - > CurDeclContext ) ; <nl> <nl> - / / Non - static set / willSet / didSet / mutableAddress default to mutating . <nl> - / / get / address default to non - mutating . <nl> - switch ( Kind ) { <nl> - case AccessorKind : : Address : <nl> - case AccessorKind : : Get : <nl> - case AccessorKind : : Read : <nl> - break ; <nl> - <nl> - case AccessorKind : : MutableAddress : <nl> - case AccessorKind : : Set : <nl> - case AccessorKind : : WillSet : <nl> - case AccessorKind : : DidSet : <nl> - case AccessorKind : : Modify : <nl> - if ( D - > isInstanceMember ( ) ) <nl> - D - > setSelfAccessKind ( SelfAccessKind : : Mutating ) ; <nl> - break ; <nl> - } <nl> - <nl> return D ; <nl> } <nl> <nl> mmm a / lib / Sema / CodeSynthesis . cpp <nl> ppp b / lib / Sema / CodeSynthesis . cpp <nl> static AccessorDecl * createGetterPrototype ( AbstractStorageDecl * storage , <nl> <nl> if ( storage - > isGetterMutating ( ) ) <nl> getter - > setSelfAccessKind ( SelfAccessKind : : Mutating ) ; <nl> + else <nl> + getter - > setSelfAccessKind ( SelfAccessKind : : NonMutating ) ; <nl> <nl> if ( storage - > isStatic ( ) ) <nl> getter - > setStatic ( ) ; <nl> static AccessorDecl * createSetterPrototype ( AbstractStorageDecl * storage , <nl> <nl> if ( isMutating ) <nl> setter - > setSelfAccessKind ( SelfAccessKind : : Mutating ) ; <nl> + else <nl> + setter - > setSelfAccessKind ( SelfAccessKind : : NonMutating ) ; <nl> <nl> if ( isStatic ) <nl> setter - > setStatic ( ) ; <nl> createCoroutineAccessorPrototype ( AbstractStorageDecl * storage , <nl> <nl> if ( isMutating ) <nl> accessor - > setSelfAccessKind ( SelfAccessKind : : Mutating ) ; <nl> + else <nl> + accessor - > setSelfAccessKind ( SelfAccessKind : : NonMutating ) ; <nl> <nl> if ( isStatic ) <nl> accessor - > setStatic ( ) ; <nl> mmm a / lib / Sema / TypeCheckDecl . cpp <nl> ppp b / lib / Sema / TypeCheckDecl . cpp <nl> static bool doesContextHaveValueSemantics ( DeclContext * dc ) { <nl> return false ; <nl> } <nl> <nl> - static void validateSelfAccessKind ( FuncDecl * FD ) { <nl> - / / Validate the mutating attribute if present , and install it into the bit <nl> - / / on funcdecl ( instead of just being a DeclAttribute ) . <nl> - if ( FD - > getAttrs ( ) . hasAttribute < MutatingAttr > ( ) ) <nl> - FD - > setSelfAccessKind ( SelfAccessKind : : Mutating ) ; <nl> - else if ( FD - > getAttrs ( ) . hasAttribute < NonMutatingAttr > ( ) ) <nl> - FD - > setSelfAccessKind ( SelfAccessKind : : NonMutating ) ; <nl> - else if ( FD - > getAttrs ( ) . hasAttribute < ConsumingAttr > ( ) ) <nl> - FD - > setSelfAccessKind ( SelfAccessKind : : __Consuming ) ; <nl> - else if ( auto accessor = dyn_cast < AccessorDecl > ( FD ) ) { <nl> - if ( accessor - > getAccessorKind ( ) = = AccessorKind : : Get | | <nl> - accessor - > getAccessorKind ( ) = = AccessorKind : : Set | | <nl> - accessor - > getAccessorKind ( ) = = AccessorKind : : DidSet | | <nl> - accessor - > getAccessorKind ( ) = = AccessorKind : : WillSet ) { <nl> - auto storage = accessor - > getStorage ( ) ; <nl> - if ( auto * resolver = FD - > getASTContext ( ) . getLazyResolver ( ) ) <nl> + llvm : : Expected < SelfAccessKind > <nl> + SelfAccessKindRequest : : evaluate ( Evaluator & evaluator , FuncDecl * FD ) const { <nl> + if ( FD - > getAttrs ( ) . getAttribute < MutatingAttr > ( true ) ) { <nl> + if ( ! FD - > isInstanceMember ( ) | | <nl> + ! doesContextHaveValueSemantics ( FD - > getDeclContext ( ) ) ) { <nl> + return SelfAccessKind : : NonMutating ; <nl> + } <nl> + return SelfAccessKind : : Mutating ; <nl> + } else if ( FD - > getAttrs ( ) . hasAttribute < NonMutatingAttr > ( ) ) { <nl> + return SelfAccessKind : : NonMutating ; <nl> + } else if ( FD - > getAttrs ( ) . hasAttribute < ConsumingAttr > ( ) ) { <nl> + return SelfAccessKind : : __Consuming ; <nl> + } <nl> + <nl> + if ( auto * AD = dyn_cast < AccessorDecl > ( FD ) ) { <nl> + / / Non - static set / willSet / didSet / mutableAddress default to mutating . <nl> + / / get / address default to non - mutating . <nl> + switch ( AD - > getAccessorKind ( ) ) { <nl> + case AccessorKind : : Address : <nl> + case AccessorKind : : Get : <nl> + case AccessorKind : : Read : <nl> + break ; <nl> + <nl> + case AccessorKind : : MutableAddress : <nl> + case AccessorKind : : Set : <nl> + case AccessorKind : : Modify : <nl> + if ( AD - > isInstanceMember ( ) & & <nl> + doesContextHaveValueSemantics ( AD - > getDeclContext ( ) ) ) <nl> + return SelfAccessKind : : Mutating ; <nl> + break ; <nl> + <nl> + case AccessorKind : : WillSet : <nl> + case AccessorKind : : DidSet : { <nl> + auto * storage = AD - > getStorage ( ) ; <nl> + <nl> + / / FIXME : Remove this once we request - ify isSetterMutating ( ) <nl> + auto * resolver = storage - > getASTContext ( ) . getLazyResolver ( ) ; <nl> + if ( resolver ) <nl> resolver - > resolveDeclSignature ( storage ) ; <nl> - if ( accessor - > getAccessorKind ( ) = = AccessorKind : : Get ) { <nl> - FD - > setSelfAccessKind ( storage - > isGetterMutating ( ) <nl> - ? SelfAccessKind : : Mutating <nl> - : SelfAccessKind : : NonMutating ) ; <nl> - } else { <nl> - FD - > setSelfAccessKind ( storage - > isSetterMutating ( ) <nl> - ? SelfAccessKind : : Mutating <nl> - : SelfAccessKind : : NonMutating ) ; <nl> - } <nl> - } <nl> - } <nl> + if ( storage - > isSetterMutating ( ) ) <nl> + return SelfAccessKind : : Mutating ; <nl> <nl> - if ( FD - > isMutating ( ) ) { <nl> - if ( ! FD - > isInstanceMember ( ) | | <nl> - ! doesContextHaveValueSemantics ( FD - > getDeclContext ( ) ) ) <nl> - FD - > setSelfAccessKind ( SelfAccessKind : : NonMutating ) ; <nl> + break ; <nl> + } <nl> + } <nl> } <nl> - } <nl> <nl> - static bool validateAccessorIsMutating ( FuncDecl * accessor ) { <nl> - assert ( accessor & & " accessor not present ! " ) ; <nl> - validateSelfAccessKind ( accessor ) ; <nl> - return accessor - > isMutating ( ) ; <nl> + return SelfAccessKind : : NonMutating ; <nl> } <nl> <nl> static bool computeIsGetterMutating ( AbstractStorageDecl * storage ) { <nl> static bool computeIsGetterMutating ( AbstractStorageDecl * storage ) { <nl> if ( ! storage - > getGetter ( ) ) <nl> return false ; <nl> <nl> - return validateAccessorIsMutating ( storage - > getGetter ( ) ) ; <nl> + return storage - > getGetter ( ) - > isMutating ( ) ; <nl> <nl> case ReadImplKind : : Address : <nl> - return validateAccessorIsMutating ( storage - > getAddressor ( ) ) ; <nl> + return storage - > getAddressor ( ) - > isMutating ( ) ; <nl> <nl> case ReadImplKind : : Read : <nl> - return validateAccessorIsMutating ( storage - > getReadCoroutine ( ) ) ; <nl> + return storage - > getReadCoroutine ( ) - > isMutating ( ) ; <nl> } <nl> <nl> llvm_unreachable ( " bad impl kind " ) ; <nl> static bool computeIsSetterMutating ( AbstractStorageDecl * storage ) { <nl> auto * setter = storage - > getSetter ( ) ; <nl> <nl> if ( setter ) <nl> - result = validateAccessorIsMutating ( setter ) ; <nl> + result = setter - > isMutating ( ) ; <nl> <nl> / / As a special extra check , if the user also gave us a modify <nl> / / coroutine , check that it has the same mutatingness as the setter . <nl> static bool computeIsSetterMutating ( AbstractStorageDecl * storage ) { <nl> / / it ' s the implied value . <nl> if ( impl . getReadWriteImpl ( ) = = ReadWriteImplKind : : Modify ) { <nl> auto modifyAccessor = storage - > getModifyCoroutine ( ) ; <nl> - auto modifyResult = validateAccessorIsMutating ( modifyAccessor ) ; <nl> + auto modifyResult = modifyAccessor - > isMutating ( ) ; <nl> if ( ( result | | storage - > isGetterMutating ( ) ) ! = modifyResult ) { <nl> modifyAccessor - > diagnose ( <nl> diag : : modify_mutatingness_differs_from_setter , <nl> static bool computeIsSetterMutating ( AbstractStorageDecl * storage ) { <nl> } <nl> <nl> case WriteImplKind : : MutableAddress : <nl> - return validateAccessorIsMutating ( storage - > getMutableAddressor ( ) ) ; <nl> + return storage - > getMutableAddressor ( ) - > isMutating ( ) ; <nl> <nl> case WriteImplKind : : Modify : <nl> - return validateAccessorIsMutating ( storage - > getModifyCoroutine ( ) ) ; <nl> + return storage - > getModifyCoroutine ( ) - > isMutating ( ) ; <nl> } <nl> llvm_unreachable ( " bad storage kind " ) ; <nl> } <nl> void TypeChecker : : validateDecl ( ValueDecl * D ) { <nl> } <nl> } <nl> <nl> - validateSelfAccessKind ( FD ) ; <nl> - <nl> / / Check whether the return type is dynamic ' Self ' . <nl> FD - > setDynamicSelf ( checkDynamicSelfReturn ( FD ) ) ; <nl> <nl>
Sema : Use a request to compute FuncDecl : : getSelfAccessKind ( )
apple/swift
7913d3023660f0d22dfc842d3fdcebe6f96d6455
2019-06-19T18:38:43Z
mmm a / core / bind / core_bind . cpp <nl> ppp b / core / bind / core_bind . cpp <nl> bool _OS : : is_window_maximized ( ) const { <nl> return OS : : get_singleton ( ) - > is_window_maximized ( ) ; <nl> } <nl> <nl> + void _OS : : set_borderless_window ( bool p_borderless ) { <nl> + OS : : get_singleton ( ) - > set_borderless_window ( p_borderless ) ; <nl> + } <nl> + <nl> + bool _OS : : get_borderless_window ( ) const { <nl> + return OS : : get_singleton ( ) - > get_borderless_window ( ) ; <nl> + } <nl> <nl> void _OS : : set_use_file_access_save_and_swap ( bool p_enable ) { <nl> <nl> void _OS : : _bind_methods ( ) { <nl> ObjectTypeDB : : bind_method ( _MD ( " set_window_maximized " , " enabled " ) , & _OS : : set_window_maximized ) ; <nl> ObjectTypeDB : : bind_method ( _MD ( " is_window_maximized " ) , & _OS : : is_window_maximized ) ; <nl> <nl> + ObjectTypeDB : : bind_method ( _MD ( " set_borderless_window " , " borderless " ) , & _OS : : set_borderless_window ) ; <nl> + ObjectTypeDB : : bind_method ( _MD ( " get_borderless_window " ) , & _OS : : get_borderless_window ) ; <nl> + ObjectTypeDB : : bind_method ( _MD ( " set_multisamples " , " multisamples " ) , & _OS : : set_multisamples ) ; <nl> + ObjectTypeDB : : bind_method ( _MD ( " get_multisamples " ) , & _OS : : get_multisamples ) ; <nl> + <nl> ObjectTypeDB : : bind_method ( _MD ( " set_screen_orientation " , " orientation " ) , & _OS : : set_screen_orientation ) ; <nl> ObjectTypeDB : : bind_method ( _MD ( " get_screen_orientation " ) , & _OS : : get_screen_orientation ) ; <nl> <nl> mmm a / core / bind / core_bind . h <nl> ppp b / core / bind / core_bind . h <nl> class _OS : public Object { <nl> virtual void set_window_maximized ( bool p_enabled ) ; <nl> virtual bool is_window_maximized ( ) const ; <nl> <nl> + virtual void set_borderless_window ( bool p_borderless ) ; <nl> + virtual bool get_borderless_window ( ) const ; <nl> <nl> Error native_video_play ( String p_path , float p_volume , String p_audio_track , String p_subtitle_track ) ; <nl> bool native_video_is_playing ( ) ; <nl> mmm a / core / os / os . h <nl> ppp b / core / os / os . h <nl> class OS { <nl> int width , height ; <nl> bool fullscreen ; <nl> bool resizable ; <nl> + bool borderless_window ; <nl> float get_aspect ( ) const { return ( float ) width / ( float ) height ; } <nl> - VideoMode ( int p_width = 1024 , int p_height = 600 , bool p_fullscreen = false , bool p_resizable = true ) { width = p_width ; height = p_height ; fullscreen = p_fullscreen ; resizable = p_resizable ; } <nl> + VideoMode ( int p_width = 1024 , int p_height = 600 , bool p_fullscreen = false , bool p_resizable = true , bool p_borderless_window = false ) { width = p_width ; height = p_height ; fullscreen = p_fullscreen ; resizable = p_resizable ; borderless_window = p_borderless_window ; } <nl> } ; <nl> protected : <nl> friend class Main ; <nl> friend class Main ; <nl> virtual void set_window_maximized ( bool p_enabled ) { } <nl> virtual bool is_window_maximized ( ) const { return true ; } <nl> <nl> + virtual void set_borderless_window ( int p_borderless ) { } <nl> + virtual bool get_borderless_window ( ) { return 0 ; } <nl> <nl> <nl> <nl> mmm a / main / main . cpp <nl> ppp b / main / main . cpp <nl> Error Main : : setup ( const char * execpath , int argc , char * argv [ ] , bool p_second_phas <nl> video_mode . fullscreen = globals - > get ( " display / fullscreen " ) ; <nl> if ( use_custom_res & & globals - > has ( " display / resizable " ) ) <nl> video_mode . resizable = globals - > get ( " display / resizable " ) ; <nl> + if ( use_custom_res & & globals - > has ( " display / borderless_window " ) ) <nl> + video_mode . borderless_window = globals - > get ( " display / borderless_window " ) ; <nl> <nl> if ( ! force_res & & use_custom_res & & globals - > has ( " display / test_width " ) & & globals - > has ( " display / test_height " ) ) { <nl> int tw = globals - > get ( " display / test_width " ) ; <nl> Error Main : : setup ( const char * execpath , int argc , char * argv [ ] , bool p_second_phas <nl> GLOBAL_DEF ( " display / height " , video_mode . height ) ; <nl> GLOBAL_DEF ( " display / fullscreen " , video_mode . fullscreen ) ; <nl> GLOBAL_DEF ( " display / resizable " , video_mode . resizable ) ; <nl> + GLOBAL_DEF ( " display / borderless_window " , video_mode . borderless_window ) ; <nl> GLOBAL_DEF ( " display / test_width " , 0 ) ; <nl> GLOBAL_DEF ( " display / test_height " , 0 ) ; <nl> OS : : get_singleton ( ) - > _pixel_snap = GLOBAL_DEF ( " display / use_2d_pixel_snap " , false ) ; <nl> mmm a / platform / windows / os_windows . cpp <nl> ppp b / platform / windows / os_windows . cpp <nl> void OS_Windows : : initialize_core ( ) { <nl> / / RedirectIOToConsole ( ) ; <nl> maximized = false ; <nl> minimized = false ; <nl> + borderless = false ; <nl> <nl> ThreadWindows : : make_default ( ) ; <nl> SemaphoreWindows : : make_default ( ) ; <nl> void OS_Windows : : initialize ( const VideoMode & p_desired , int p_video_driver , int p_ <nl> DWORD dwExStyle ; <nl> DWORD dwStyle ; <nl> <nl> - if ( video_mode . fullscreen ) { <nl> + if ( video_mode . fullscreen | | video_mode . borderless_window ) { <nl> <nl> dwExStyle = WS_EX_APPWINDOW ; <nl> dwStyle = WS_POPUP ; <nl> void OS_Windows : : initialize ( const VideoMode & p_desired , int p_video_driver , int p_ <nl> video_mode . fullscreen = false ; <nl> } else { <nl> <nl> - if ( ! ( hWnd = CreateWindowExW ( dwExStyle , L " Engine " , L " " , dwStyle | WS_CLIPSIBLINGS | WS_CLIPCHILDREN , 0 , 0 , WindowRect . right - WindowRect . left , WindowRect . bottom - WindowRect . top , NULL , NULL , hInstance , NULL ) ) ) { <nl> + if ( ! ( hWnd = CreateWindowExW ( dwExStyle , L " Engine " , L " " , dwStyle | WS_CLIPSIBLINGS | WS_CLIPCHILDREN , ( GetSystemMetrics ( SM_CXSCREEN ) - WindowRect . right ) / 2 , ( GetSystemMetrics ( SM_CYSCREEN ) - WindowRect . bottom ) / 2 , WindowRect . right - WindowRect . left , WindowRect . bottom - WindowRect . top , NULL , NULL , hInstance , NULL ) ) ) { <nl> MessageBoxW ( NULL , L " Window Creation Error . " , L " ERROR " , MB_OK | MB_ICONEXCLAMATION ) ; <nl> return ; / / Return FALSE <nl> } <nl> bool OS_Windows : : is_window_maximized ( ) const { <nl> } <nl> <nl> <nl> + void OS_Windows : : set_borderless_window ( int p_borderless ) { <nl> + video_mode . borderless_window = p_borderless ; <nl> + } <nl> + <nl> + bool OS_Windows : : get_borderless_window ( ) { <nl> + return video_mode . borderless_window ; <nl> + } <nl> + <nl> void OS_Windows : : print_error ( const char * p_function , const char * p_file , int p_line , const char * p_code , const char * p_rationale , ErrorType p_type ) { <nl> <nl> HANDLE hCon = GetStdHandle ( STD_OUTPUT_HANDLE ) ; <nl> mmm a / platform / windows / os_windows . h <nl> ppp b / platform / windows / os_windows . h <nl> class OS_Windows : public OS { <nl> Vector < MonitorInfo > monitor_info ; <nl> bool maximized ; <nl> bool minimized ; <nl> + bool borderless ; <nl> <nl> static BOOL CALLBACK MonitorEnumProc ( HMONITOR hMonitor , HDC hdcMonitor , LPRECT lprcMonitor , LPARAM dwData ) ; <nl> <nl> class OS_Windows : public OS { <nl> virtual void set_window_maximized ( bool p_enabled ) ; <nl> virtual bool is_window_maximized ( ) const ; <nl> <nl> + virtual void set_borderless_window ( int p_borderless ) ; <nl> + virtual bool get_borderless_window ( ) ; <nl> + <nl> virtual MainLoop * get_main_loop ( ) const ; <nl> <nl> virtual String get_name ( ) ; <nl>
Borderless window support for the Win32 build . Default window position is now also centred .
godotengine/godot
6eb4812317bc5207444ddbfe887d06969969b669
2016-03-12T16:38:12Z
mmm a / src / objects - inl . h <nl> ppp b / src / objects - inl . h <nl> void Name : : set_hash_field ( uint32_t value ) { <nl> <nl> bool Name : : Equals ( Name * other ) { <nl> if ( other = = this ) return true ; <nl> - if ( this - > IsUniqueName ( ) & & other - > IsUniqueName ( ) ) return false ; <nl> + if ( this - > IsSymbol ( ) | | other - > IsSymbol ( ) | | <nl> + ( this - > IsInternalizedString ( ) & & other - > IsInternalizedString ( ) ) ) { <nl> + return false ; <nl> + } <nl> return String : : cast ( this ) - > SlowEquals ( String : : cast ( other ) ) ; <nl> } <nl> <nl> mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> MaybeObject * Object : : ToObject ( ) { <nl> Isolate * isolate = HeapObject : : cast ( this ) - > GetIsolate ( ) ; <nl> Context * native_context = isolate - > context ( ) - > native_context ( ) ; <nl> return CreateJSValue ( native_context - > string_function ( ) , this ) ; <nl> + } else if ( IsSymbol ( ) ) { <nl> + Isolate * isolate = HeapObject : : cast ( this ) - > GetIsolate ( ) ; <nl> + Context * native_context = isolate - > context ( ) - > native_context ( ) ; <nl> + return CreateJSValue ( native_context - > symbol_function ( ) , this ) ; <nl> } <nl> <nl> / / Throw a type error . <nl> mmm a / src / runtime . js <nl> ppp b / src / runtime . js <nl> function EQUALS ( y ) { <nl> } else if ( IS_STRING ( x ) ) { <nl> while ( true ) { <nl> if ( IS_STRING ( y ) ) return % StringEquals ( x , y ) ; <nl> + if ( IS_SYMBOL ( y ) ) return 1 ; / / not equal <nl> if ( IS_NUMBER ( y ) ) return % NumberEquals ( % ToNumber ( x ) , y ) ; <nl> if ( IS_BOOLEAN ( y ) ) return % NumberEquals ( % ToNumber ( x ) , % ToNumber ( y ) ) ; <nl> if ( IS_NULL_OR_UNDEFINED ( y ) ) return 1 ; / / not equal <nl> y = % ToPrimitive ( y , NO_HINT ) ; <nl> } <nl> + } else if ( IS_SYMBOL ( x ) ) { <nl> + while ( true ) { <nl> + if ( IS_SYMBOL ( y ) ) return % _ObjectEquals ( x , y ) ? 0 : 1 ; <nl> + if ( ! IS_SPEC_OBJECT ( y ) ) return 1 ; / / not equal <nl> + y = % ToPrimitive ( y , NO_HINT ) ; <nl> + } <nl> } else if ( IS_BOOLEAN ( x ) ) { <nl> if ( IS_BOOLEAN ( y ) ) return % _ObjectEquals ( x , y ) ? 0 : 1 ; <nl> if ( IS_NULL_OR_UNDEFINED ( y ) ) return 1 ; <nl> if ( IS_NUMBER ( y ) ) return % NumberEquals ( % ToNumber ( x ) , y ) ; <nl> if ( IS_STRING ( y ) ) return % NumberEquals ( % ToNumber ( x ) , % ToNumber ( y ) ) ; <nl> + if ( IS_SYMBOL ( y ) ) return 1 ; / / not equal <nl> / / y is object . <nl> x = % ToNumber ( x ) ; <nl> y = % ToPrimitive ( y , NO_HINT ) ; <nl> function ToPrimitive ( x , hint ) { <nl> if ( IS_STRING ( x ) ) return x ; <nl> / / Normal behavior . <nl> if ( ! IS_SPEC_OBJECT ( x ) ) return x ; <nl> + if ( IS_SYMBOL_WRAPPER ( x ) ) return % _ValueOf ( x ) ; <nl> if ( hint = = NO_HINT ) hint = ( IS_DATE ( x ) ) ? STRING_HINT : NUMBER_HINT ; <nl> return ( hint = = NUMBER_HINT ) ? % DefaultNumber ( x ) : % DefaultString ( x ) ; <nl> } <nl> mmm a / test / mjsunit / harmony / symbols . js <nl> ppp b / test / mjsunit / harmony / symbols . js <nl> TestToNumber ( ) <nl> <nl> <nl> function TestEquality ( ) { <nl> - / / Every symbol should equal itself . <nl> + / / Every symbol should equal itself , and non - strictly equal its wrapper . <nl> for ( var i in symbols ) { <nl> assertSame ( symbols [ i ] , symbols [ i ] ) <nl> assertEquals ( symbols [ i ] , symbols [ i ] ) <nl> assertTrue ( Object . is ( symbols [ i ] , symbols [ i ] ) ) <nl> assertTrue ( symbols [ i ] = = = symbols [ i ] ) <nl> assertTrue ( symbols [ i ] = = symbols [ i ] ) <nl> + assertFalse ( symbols [ i ] = = = new Symbol ( symbols [ i ] ) ) <nl> + assertFalse ( new Symbol ( symbols [ i ] ) = = = symbols [ i ] ) <nl> + assertTrue ( symbols [ i ] = = new Symbol ( symbols [ i ] ) ) <nl> + assertTrue ( new Symbol ( symbols [ i ] ) = = symbols [ i ] ) <nl> } <nl> <nl> / / All symbols should be distinct . <nl> function TestEquality ( ) { <nl> assertFalse ( symbols [ i ] = = symbols [ j ] ) <nl> } <nl> } <nl> + <nl> + / / Symbols should not be equal to any other value ( and the test terminates ) . <nl> + var values = [ 347 , 1 . 275 , NaN , " string " , null , undefined , { } , function ( ) { } ] <nl> + for ( var i in symbols ) { <nl> + for ( var j in values ) { <nl> + assertFalse ( symbols [ i ] = = = values [ j ] ) <nl> + assertFalse ( values [ j ] = = = symbols [ i ] ) <nl> + assertFalse ( symbols [ i ] = = values [ j ] ) <nl> + assertFalse ( values [ j ] = = symbols [ i ] ) <nl> + } <nl> + } <nl> } <nl> TestEquality ( ) <nl> <nl>
ES6 symbols : fix corner cases of equality operators
v8/v8
b449691db2461437c5f409bfc273ca0f778b3c48
2013-04-03T17:06:22Z
mmm a / xbmc / utils / CPUInfo . cpp <nl> ppp b / xbmc / utils / CPUInfo . cpp <nl> CCPUInfo : : CCPUInfo ( void ) <nl> } <nl> } <nl> } <nl> - <nl> - / * Set some default for empty string variables * / <nl> - if ( m_cpuBogoMips . empty ( ) ) <nl> - m_cpuBogoMips = " N / A " ; <nl> - if ( m_cpuHardware . empty ( ) ) <nl> - m_cpuHardware = " N / A " ; <nl> - if ( m_cpuRevision . empty ( ) ) <nl> - m_cpuRevision = " N / A " ; <nl> - if ( m_cpuSerial . empty ( ) ) <nl> - m_cpuSerial = " N / A " ; <nl> } <nl> else <nl> { <nl> CCPUInfo : : CCPUInfo ( void ) <nl> } <nl> <nl> # endif <nl> + / * Set some default for empty string variables * / <nl> + if ( m_cpuBogoMips . empty ( ) ) <nl> + m_cpuBogoMips = " N / A " ; <nl> + if ( m_cpuHardware . empty ( ) ) <nl> + m_cpuHardware = " N / A " ; <nl> + if ( m_cpuRevision . empty ( ) ) <nl> + m_cpuRevision = " N / A " ; <nl> + if ( m_cpuSerial . empty ( ) ) <nl> + m_cpuSerial = " N / A " ; <nl> + <nl> readProcStat ( m_userTicks , m_niceTicks , m_systemTicks , m_idleTicks , m_ioTicks ) ; <nl> m_nextUsedReadTime . Set ( MINIMUM_TIME_BETWEEN_READS ) ; <nl> <nl>
Allow setting the default " N / A " strings for all platforms .
xbmc/xbmc
690f567d4988ee12605eefce1e372204dc79e7b5
2012-09-16T23:15:02Z
mmm a / 3rdParty / V8 / include / v8 . h <nl> ppp b / 3rdParty / V8 / include / v8 . h <nl> <nl> # define V8_H_ <nl> <nl> / / TODO ( svenpanne ) Remove me when the Chrome bindings are adapted . <nl> - # define V8_DISABLE_DEPRECATIONS 1 <nl> + # define V8_DISABLE_DEPRECATIONS 0 <nl> <nl> # include " v8stdint . h " <nl> <nl> mmm a / arangod / RestHandler / RestDocumentHandler . cpp <nl> ppp b / arangod / RestHandler / RestDocumentHandler . cpp <nl> bool RestDocumentHandler : : createDocument ( ) { <nl> return false ; <nl> } <nl> <nl> - <nl> / / find and load collection given by name or identifier <nl> SingleCollectionWriteTransaction < StandaloneTransaction < RestTransactionContext > , 1 > trx ( _vocbase , _resolver , collection ) ; <nl> <nl> bool RestDocumentHandler : : createDocument ( ) { <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> <nl> int res = trx . begin ( ) ; <nl> + <nl> if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> generateTransactionError ( collection , res ) ; <nl> return false ; <nl> bool RestDocumentHandler : : modifyDocument ( bool isPatch ) { <nl> ResourceHolder holder ; <nl> <nl> TRI_json_t * json = parseJsonBody ( ) ; <nl> + <nl> if ( ! holder . registerJson ( TRI_UNKNOWN_MEM_ZONE , json ) ) { <nl> return false ; <nl> } <nl> bool RestDocumentHandler : : modifyDocument ( bool isPatch ) { <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> <nl> int res = trx . begin ( ) ; <nl> + <nl> if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> generateTransactionError ( collection , res ) ; <nl> return false ; <nl> mmm a / arangod / RestHandler / RestVocbaseBaseHandler . cpp <nl> ppp b / arangod / RestHandler / RestVocbaseBaseHandler . cpp <nl> bool RestVocbaseBaseHandler : : checkCreateCollection ( const string & name , <nl> } <nl> <nl> TRI_vocbase_col_t * collection = TRI_FindCollectionByNameOrCreateVocBase ( _vocbase , name . c_str ( ) , type ) ; <nl> + <nl> if ( collection = = 0 ) { <nl> generateTransactionError ( name , TRI_errno ( ) ) ; <nl> return false ; <nl> mmm a / arangod / V8Server / v8 - actions . cpp <nl> ppp b / arangod / V8Server / v8 - actions . cpp <nl> void TRI_InitV8Actions ( v8 : : Handle < v8 : : Context > context , ApplicationV8 * applicat <nl> <nl> / / check the isolate <nl> v8 : : Isolate * isolate = v8 : : Isolate : : GetCurrent ( ) ; <nl> - TRI_v8_global_t * v8g = ( TRI_v8_global_t * ) isolate - > GetData ( ) ; <nl> - <nl> - assert ( v8g ! = 0 ) ; <nl> + TRI_v8_global_t * v8g = TRI_CreateV8Globals ( isolate ) ; <nl> <nl> GlobalV8Dealer = applicationV8 ; <nl> <nl> void TRI_InitV8Actions ( v8 : : Handle < v8 : : Context > context , ApplicationV8 * applicat <nl> <nl> TRI_AddGlobalFunctionVocbase ( context , " SYS_DEFINE_ACTION " , JS_DefineAction ) ; <nl> TRI_AddGlobalFunctionVocbase ( context , " SYS_EXECUTE_GLOBAL_CONTEXT_FUNCTION " , JS_ExecuteGlobalContextFunction ) ; <nl> - <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / keys <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - <nl> - v8g - > BodyKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " body " ) ) ; <nl> - v8g - > BodyFromFileKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " bodyFromFile " ) ) ; <nl> - v8g - > ContentTypeKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " contentType " ) ) ; <nl> - v8g - > HeadersKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " headers " ) ) ; <nl> - v8g - > ParametersKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " parameters " ) ) ; <nl> - v8g - > PathKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " path " ) ) ; <nl> - v8g - > PrefixKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " prefix " ) ) ; <nl> - v8g - > RequestBodyKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " requestBody " ) ) ; <nl> - v8g - > RequestTypeKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " requestType " ) ) ; <nl> - v8g - > ResponseCodeKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " responseCode " ) ) ; <nl> - v8g - > SuffixKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " suffix " ) ) ; <nl> - v8g - > TransformationsKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " transformations " ) ) ; <nl> - v8g - > UrlKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " url " ) ) ; <nl> - v8g - > UserKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " user " ) ) ; <nl> - <nl> - v8g - > DeleteConstant = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " DELETE " ) ) ; <nl> - v8g - > GetConstant = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " GET " ) ) ; <nl> - v8g - > HeadConstant = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " HEAD " ) ) ; <nl> - v8g - > OptionsConstant = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " OPTIONS " ) ) ; <nl> - v8g - > PatchConstant = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " PATCH " ) ) ; <nl> - v8g - > PostConstant = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " POST " ) ) ; <nl> - v8g - > PutConstant = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " PUT " ) ) ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / arangod / V8Server / v8 - vocbase . cpp <nl> ppp b / arangod / V8Server / v8 - vocbase . cpp <nl> static int ExtractDocumentKey ( v8 : : Handle < v8 : : Value > arg , <nl> if ( arg - > IsObject ( ) ) { <nl> v8 : : Handle < v8 : : Object > obj = arg - > ToObject ( ) ; <nl> <nl> - if ( obj - > Has ( v8g - > KeyKey ) ) { <nl> - v8 : : Handle < v8 : : Value > v = obj - > Get ( v8g - > KeyKey ) ; <nl> + if ( obj - > Has ( v8g - > _KeyKey ) ) { <nl> + v8 : : Handle < v8 : : Value > v = obj - > Get ( v8g - > _KeyKey ) ; <nl> <nl> if ( v - > IsString ( ) ) { <nl> / / string key <nl> static v8 : : Handle < v8 : : Value > ReplaceVocbaseCol ( const bool useCollection , <nl> TRI_v8_global_t * v8g = ( TRI_v8_global_t * ) v8 : : Isolate : : GetCurrent ( ) - > GetData ( ) ; <nl> <nl> v8 : : Handle < v8 : : Object > result = v8 : : Object : : New ( ) ; <nl> - result - > Set ( v8g - > DidKey , V8DocumentId ( resolver . getCollectionName ( col - > _cid ) , document . _key ) ) ; <nl> - result - > Set ( v8g - > RevKey , V8RevisionId ( document . _rid ) ) ; <nl> - result - > Set ( v8g - > OldRevKey , V8RevisionId ( actualRevision ) ) ; <nl> - result - > Set ( v8g - > KeyKey , v8 : : String : : New ( document . _key ) ) ; <nl> + result - > Set ( v8g - > _IdKey , V8DocumentId ( resolver . getCollectionName ( col - > _cid ) , document . _key ) ) ; <nl> + result - > Set ( v8g - > _RevKey , V8RevisionId ( document . _rid ) ) ; <nl> + result - > Set ( v8g - > _OldRevKey , V8RevisionId ( actualRevision ) ) ; <nl> + result - > Set ( v8g - > _KeyKey , v8 : : String : : New ( document . _key ) ) ; <nl> <nl> return scope . Close ( result ) ; <nl> } <nl> static v8 : : Handle < v8 : : Value > SaveVocbaseCol ( <nl> assert ( document . _key ! = 0 ) ; <nl> <nl> v8 : : Handle < v8 : : Object > result = v8 : : Object : : New ( ) ; <nl> - result - > Set ( v8g - > DidKey , V8DocumentId ( resolver . getCollectionName ( col - > _cid ) , document . _key ) ) ; <nl> - result - > Set ( v8g - > RevKey , V8RevisionId ( document . _rid ) ) ; <nl> - result - > Set ( v8g - > KeyKey , v8 : : String : : New ( document . _key ) ) ; <nl> + result - > Set ( v8g - > _IdKey , V8DocumentId ( resolver . getCollectionName ( col - > _cid ) , document . _key ) ) ; <nl> + result - > Set ( v8g - > _RevKey , V8RevisionId ( document . _rid ) ) ; <nl> + result - > Set ( v8g - > _KeyKey , v8 : : String : : New ( document . _key ) ) ; <nl> <nl> return scope . Close ( result ) ; <nl> } <nl> static v8 : : Handle < v8 : : Value > SaveEdgeCol ( <nl> assert ( document . _key ! = 0 ) ; <nl> <nl> v8 : : Handle < v8 : : Object > result = v8 : : Object : : New ( ) ; <nl> - result - > Set ( v8g - > DidKey , V8DocumentId ( resolver . getCollectionName ( col - > _cid ) , document . _key ) ) ; <nl> - result - > Set ( v8g - > RevKey , V8RevisionId ( document . _rid ) ) ; <nl> - result - > Set ( v8g - > KeyKey , v8 : : String : : New ( document . _key ) ) ; <nl> + result - > Set ( v8g - > _IdKey , V8DocumentId ( resolver . getCollectionName ( col - > _cid ) , document . _key ) ) ; <nl> + result - > Set ( v8g - > _RevKey , V8RevisionId ( document . _rid ) ) ; <nl> + result - > Set ( v8g - > _KeyKey , v8 : : String : : New ( document . _key ) ) ; <nl> <nl> return scope . Close ( result ) ; <nl> } <nl> static v8 : : Handle < v8 : : Value > UpdateVocbaseCol ( const bool useCollection , <nl> TRI_v8_global_t * v8g = ( TRI_v8_global_t * ) v8 : : Isolate : : GetCurrent ( ) - > GetData ( ) ; <nl> <nl> v8 : : Handle < v8 : : Object > result = v8 : : Object : : New ( ) ; <nl> - result - > Set ( v8g - > DidKey , V8DocumentId ( resolver . getCollectionName ( col - > _cid ) , document . _key ) ) ; <nl> - result - > Set ( v8g - > RevKey , V8RevisionId ( document . _rid ) ) ; <nl> - result - > Set ( v8g - > OldRevKey , V8RevisionId ( actualRevision ) ) ; <nl> - result - > Set ( v8g - > KeyKey , v8 : : String : : New ( document . _key ) ) ; <nl> + result - > Set ( v8g - > _IdKey , V8DocumentId ( resolver . getCollectionName ( col - > _cid ) , document . _key ) ) ; <nl> + result - > Set ( v8g - > _RevKey , V8RevisionId ( document . _rid ) ) ; <nl> + result - > Set ( v8g - > _OldRevKey , V8RevisionId ( actualRevision ) ) ; <nl> + result - > Set ( v8g - > _KeyKey , v8 : : String : : New ( document . _key ) ) ; <nl> <nl> return scope . Close ( result ) ; <nl> } <nl> static v8 : : Handle < v8 : : Value > JS_SaveOrReplaceVocbaseCol ( v8 : : Arguments const & ar <nl> TRI_v8_global_t * v8g = ( TRI_v8_global_t * ) v8 : : Isolate : : GetCurrent ( ) - > GetData ( ) ; <nl> <nl> v8 : : Handle < v8 : : Object > r = v8 : : Object : : New ( ) ; <nl> - r - > Set ( v8g - > DidKey , V8DocumentId ( resolver . getCollectionName ( col - > _cid ) , document . _key ) ) ; <nl> - r - > Set ( v8g - > RevKey , V8RevisionId ( document . _rid ) ) ; <nl> - r - > Set ( v8g - > OldRevKey , V8RevisionId ( actualRevision ) ) ; <nl> - r - > Set ( v8g - > KeyKey , v8 : : String : : New ( document . _key ) ) ; <nl> + r - > Set ( v8g - > _IdKey , V8DocumentId ( resolver . getCollectionName ( col - > _cid ) , document . _key ) ) ; <nl> + r - > Set ( v8g - > _RevKey , V8RevisionId ( document . _rid ) ) ; <nl> + r - > Set ( v8g - > _OldRevKey , V8RevisionId ( actualRevision ) ) ; <nl> + r - > Set ( v8g - > _KeyKey , v8 : : String : : New ( document . _key ) ) ; <nl> <nl> result = r ; <nl> } <nl> static v8 : : Handle < v8 : : Array > KeysOfShapedJson ( const v8 : : AccessorInfo & info ) { <nl> } <nl> } <nl> <nl> - result - > Set ( count + + , v8g - > DidKey ) ; <nl> - result - > Set ( count + + , v8g - > RevKey ) ; <nl> - result - > Set ( count + + , v8g - > KeyKey ) ; <nl> + result - > Set ( count + + , v8g - > _IdKey ) ; <nl> + result - > Set ( count + + , v8g - > _RevKey ) ; <nl> + result - > Set ( count + + , v8g - > _KeyKey ) ; <nl> <nl> return scope . Close ( result ) ; <nl> } <nl> bool ExtractDocumentHandle ( v8 : : Handle < v8 : : Value > val , <nl> TRI_v8_global_t * v8g = ( TRI_v8_global_t * ) v8 : : Isolate : : GetCurrent ( ) - > GetData ( ) ; <nl> <nl> v8 : : Handle < v8 : : Object > obj = val - > ToObject ( ) ; <nl> - v8 : : Handle < v8 : : Value > didVal = obj - > Get ( v8g - > DidKey ) ; <nl> + v8 : : Handle < v8 : : Value > didVal = obj - > Get ( v8g - > _IdKey ) ; <nl> <nl> if ( ! ParseDocumentHandle ( didVal , collectionName , key ) ) { <nl> return false ; <nl> } <nl> <nl> - rid = TRI_ObjectToUInt64 ( obj - > Get ( v8g - > RevKey ) , true ) ; <nl> + rid = TRI_ObjectToUInt64 ( obj - > Get ( v8g - > _RevKey ) , true ) ; <nl> <nl> if ( rid = = 0 ) { <nl> return false ; <nl> TRI_index_t * TRI_LookupIndexByHandle ( const CollectionNameResolver & resolver , <nl> TRI_v8_global_t * v8g = ( TRI_v8_global_t * ) v8 : : Isolate : : GetCurrent ( ) - > GetData ( ) ; <nl> <nl> v8 : : Handle < v8 : : Object > obj = val - > ToObject ( ) ; <nl> - v8 : : Handle < v8 : : Value > iidVal = obj - > Get ( v8g - > IidKey ) ; <nl> + v8 : : Handle < v8 : : Value > iidVal = obj - > Get ( v8g - > IdKey ) ; <nl> <nl> if ( ! IsIndexHandle ( iidVal , collectionName , iid ) ) { <nl> * err = TRI_CreateErrorObject ( TRI_ERROR_ARANGO_INDEX_HANDLE_BAD , <nl> v8 : : Handle < v8 : : Object > TRI_WrapCollection ( TRI_vocbase_col_t const * collection ) <nl> WRP_VOCBASE_COL_TYPE , <nl> const_cast < TRI_vocbase_col_t * > ( collection ) ) ; <nl> <nl> - result - > Set ( v8g - > DidKey , V8CollectionId ( collection - > _cid ) , v8 : : ReadOnly ) ; <nl> + result - > Set ( v8g - > _IdKey , V8CollectionId ( collection - > _cid ) , v8 : : ReadOnly ) ; <nl> <nl> return scope . Close ( result ) ; <nl> } <nl> v8 : : Handle < v8 : : Value > TRI_WrapShapedJson ( const CollectionNameResolver & resolver <nl> / / store the document reference <nl> TRI_voc_rid_t rid = document - > _rid ; <nl> <nl> - result - > Set ( v8g - > DidKey , V8DocumentId ( resolver . getCollectionName ( collection - > _cid ) , document - > _key ) , v8 : : ReadOnly ) ; <nl> - result - > Set ( v8g - > RevKey , V8RevisionId ( rid ) , v8 : : ReadOnly ) ; <nl> - result - > Set ( v8g - > KeyKey , v8 : : String : : New ( document - > _key ) , v8 : : ReadOnly ) ; <nl> + result - > Set ( v8g - > _IdKey , V8DocumentId ( resolver . getCollectionName ( collection - > _cid ) , document - > _key ) , v8 : : ReadOnly ) ; <nl> + result - > Set ( v8g - > _RevKey , V8RevisionId ( rid ) , v8 : : ReadOnly ) ; <nl> + result - > Set ( v8g - > _KeyKey , v8 : : String : : New ( document - > _key ) , v8 : : ReadOnly ) ; <nl> <nl> TRI_df_marker_type_t type = ( ( TRI_df_marker_t * ) document - > _data ) - > _type ; <nl> <nl> if ( type = = TRI_DOC_MARKER_KEY_EDGE ) { <nl> TRI_doc_edge_key_marker_t * marker = ( TRI_doc_edge_key_marker_t * ) document - > _data ; <nl> <nl> - result - > Set ( v8g - > FromKey , V8DocumentId ( resolver . getCollectionName ( marker - > _fromCid ) , ( ( char * ) marker ) + marker - > _offsetFromKey ) ) ; <nl> - result - > Set ( v8g - > ToKey , V8DocumentId ( resolver . getCollectionName ( marker - > _toCid ) , ( ( char * ) marker ) + marker - > _offsetToKey ) ) ; <nl> + result - > Set ( v8g - > _FromKey , V8DocumentId ( resolver . getCollectionName ( marker - > _fromCid ) , ( ( char * ) marker ) + marker - > _offsetFromKey ) ) ; <nl> + result - > Set ( v8g - > _ToKey , V8DocumentId ( resolver . getCollectionName ( marker - > _toCid ) , ( ( char * ) marker ) + marker - > _offsetToKey ) ) ; <nl> } <nl> <nl> / / and return <nl> TRI_v8_global_t * TRI_InitV8VocBridge ( v8 : : Handle < v8 : : Context > context , <nl> const size_t threadNumber ) { <nl> v8 : : HandleScope scope ; <nl> <nl> - v8 : : Handle < v8 : : ObjectTemplate > rt ; <nl> - v8 : : Handle < v8 : : FunctionTemplate > ft ; <nl> - v8 : : Handle < v8 : : Template > pt ; <nl> - <nl> / / check the isolate <nl> v8 : : Isolate * isolate = v8 : : Isolate : : GetCurrent ( ) ; <nl> - TRI_v8_global_t * v8g = ( TRI_v8_global_t * ) isolate - > GetData ( ) ; <nl> + TRI_v8_global_t * v8g = TRI_CreateV8Globals ( isolate ) ; <nl> <nl> - if ( v8g = = 0 ) { <nl> - v8g = new TRI_v8_global_t ; <nl> - isolate - > SetData ( v8g ) ; <nl> - } <nl> + / / set the default database <nl> v8g - > _vocbase = vocbase ; <nl> <nl> / / create the regular expressions <nl> TRI_v8_global_t * TRI_InitV8VocBridge ( v8 : : Handle < v8 : : Context > context , <nl> LOG_FATAL_AND_EXIT ( " cannot compile regular expression " ) ; <nl> } <nl> <nl> - <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / keys <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - <nl> - v8g - > IsSystemKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " isSystem " ) ) ; <nl> - v8g - > IsVolatileKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " isVolatile " ) ) ; <nl> - v8g - > JournalSizeKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " journalSize " ) ) ; <nl> - v8g - > KeyOptionsKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " keyOptions " ) ) ; <nl> - v8g - > WaitForSyncKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " waitForSync " ) ) ; <nl> - <nl> - if ( v8g - > DidKey . IsEmpty ( ) ) { <nl> - v8g - > DidKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " _id " ) ) ; <nl> - } <nl> - <nl> - if ( v8g - > KeyKey . IsEmpty ( ) ) { <nl> - v8g - > KeyKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " _key " ) ) ; <nl> - } <nl> - <nl> - if ( v8g - > FromKey . IsEmpty ( ) ) { <nl> - v8g - > FromKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " _from " ) ) ; <nl> - } <nl> - <nl> - if ( v8g - > IidKey . IsEmpty ( ) ) { <nl> - v8g - > IidKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " id " ) ) ; <nl> - } <nl> - <nl> - if ( v8g - > OldRevKey . IsEmpty ( ) ) { <nl> - v8g - > OldRevKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " _oldRev " ) ) ; <nl> - } <nl> - <nl> - if ( v8g - > RevKey . IsEmpty ( ) ) { <nl> - v8g - > RevKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " _rev " ) ) ; <nl> - } <nl> - <nl> - if ( v8g - > ToKey . IsEmpty ( ) ) { <nl> - v8g - > ToKey = v8 : : Persistent < v8 : : String > : : New ( TRI_V8_SYMBOL ( " _to " ) ) ; <nl> - } <nl> + v8 : : Handle < v8 : : ObjectTemplate > rt ; <nl> + v8 : : Handle < v8 : : FunctionTemplate > ft ; <nl> + v8 : : Handle < v8 : : Template > pt ; <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / generate the TRI_vocbase_t template <nl> TRI_v8_global_t * TRI_InitV8VocBridge ( v8 : : Handle < v8 : : Context > context , <nl> v8g - > VocbaseTempl = v8 : : Persistent < v8 : : ObjectTemplate > : : New ( rt ) ; <nl> TRI_AddGlobalFunctionVocbase ( context , " ArangoDatabase " , ft - > GetFunction ( ) ) ; <nl> <nl> - <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / generate the TRI_shaped_json_t template <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> mmm a / html / admin / js / bootstrap / module - internal . js <nl> ppp b / html / admin / js / bootstrap / module - internal . js <nl> <nl> / * jslint indent : 2 , nomen : true , maxlen : 120 , vars : true , white : true , plusplus : true , nonpropdel : true , proto : true * / <nl> - / * global require , module , Module , SYS_DOWNLOAD , <nl> - SYS_EXECUTE , SYS_LOAD , SYS_LOG_LEVEL , SYS_MD5 , SYS_OUTPUT , SYS_PROCESS_STAT , <nl> + / * global require , module , Module , ArangoError , SYS_DOWNLOAD , <nl> + SYS_EXECUTE , SYS_LOAD , SYS_LOG_LEVEL , SYS_MD5 , SYS_OUTPUT , SYS_PROCESS_STAT , <nl> SYS_RAND , SYS_SPRINTF , SYS_TIME , SYS_START_PAGER , SYS_STOP_PAGER , SYS_SHA256 , SYS_WAIT , <nl> SYS_PARSE , SYS_IMPORT_CSV_FILE , SYS_IMPORT_JSON_FILE , SYS_LOG , <nl> SYS_GEN_RANDOM_NUMBERS , SYS_GEN_RANDOM_ALPHA_NUMBERS , SYS_GEN_RANDOM_SALT , SYS_CREATE_NONCE , <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - public constants <nl> + / / - - SECTION - - public types <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> + / / / @ brief ArangoError <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + exports . ArangoError = ArangoError ; <nl> + delete ArangoError ; <nl> + <nl> + exports . ArangoError . prototype . _PRINT = function ( context ) { <nl> + context . output + = this . toString ( ) ; <nl> + } ; <nl> + <nl> + exports . ArangoError . prototype . toString = function ( ) { <nl> + var errorNum = this . errorNum ; <nl> + var errorMessage = this . errorMessage ; <nl> + <nl> + return " [ ArangoError " + errorNum + " : " + errorMessage + " ] " ; <nl> + } ; <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - public constants <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief threadNumber <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> delete REQUEST_TIME_DISTRIBUTION ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief download <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> exports . download = SYS_DOWNLOAD ; <nl> delete SYS_DOWNLOAD ; <nl> } <nl> - <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief executeScript <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> delete SYS_EXECUTE ; <nl> } <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief extend <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + exports . extend = function ( target , source ) { <nl> + ' use strict ' ; <nl> + <nl> + Object . getOwnPropertyNames ( source ) <nl> + . forEach ( function ( propName ) { <nl> + Object . defineProperty ( target , propName , <nl> + Object . getOwnPropertyDescriptor ( source , propName ) ) ; <nl> + } ) ; <nl> + <nl> + return target ; <nl> + } ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief load <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> exports . wait = SYS_WAIT ; <nl> delete SYS_WAIT ; <nl> } <nl> - <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief importCsvFile <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> delete SYS_REQUEST_STATISTICS ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - public functions <nl> + / / - - SECTION - - private functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief flushModuleCache <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> SYS_UNIT_TESTS_RESULT = value ; <nl> } ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief extend <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - exports . extend = function ( target , source ) { <nl> - ' use strict ' ; <nl> - <nl> - Object . getOwnPropertyNames ( source ) <nl> - . forEach ( function ( propName ) { <nl> - Object . defineProperty ( target , propName , <nl> - Object . getOwnPropertyDescriptor ( source , propName ) ) ; <nl> - } ) ; <nl> - <nl> - return target ; <nl> - } ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> } ( ) ) ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / - - SECTION - - public printing variables <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief COLORS <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> } <nl> else { <nl> [ ' COLOR_RED ' , ' COLOR_BOLD_RED ' , ' COLOR_GREEN ' , ' COLOR_BOLD_GREEN ' , <nl> - ' COLOR_BLUE ' , ' COLOR_BOLD_BLUE ' , ' COLOR_YELLOW ' , ' COLOR_BOLD_YELLOW ' , <nl> - ' COLOR_WHITE ' , ' COLOR_BOLD_WHITE ' , ' COLOR_CYAN ' , ' COLOR_BOLD_CYAN ' , <nl> - ' COLOR_MAGENTA ' , ' COLOR_BOLD_MAGENTA ' , ' COLOR_BLACK ' , ' COLOR_BOLD_BLACK ' , <nl> + ' COLOR_BLUE ' , ' COLOR_BOLD_BLUE ' , ' COLOR_YELLOW ' , ' COLOR_BOLD_YELLOW ' , <nl> + ' COLOR_WHITE ' , ' COLOR_BOLD_WHITE ' , ' COLOR_CYAN ' , ' COLOR_BOLD_CYAN ' , <nl> + ' COLOR_MAGENTA ' , ' COLOR_BOLD_MAGENTA ' , ' COLOR_BLACK ' , ' COLOR_BOLD_BLACK ' , <nl> ' COLOR_BLINK ' , ' COLOR_BRIGHT ' , ' COLOR_RESET ' ] . forEach ( function ( color ) { <nl> exports . COLORS [ color ] = ' ' ; <nl> } ) ; <nl> <nl> exports . COLORS . COLOR_NULL = exports . COLORS . COLOR_BRIGHT ; <nl> exports . COLORS . COLOR_UNDEFINED = exports . COLORS . COLOR_BRIGHT ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - private printing variables <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief quote cache <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> delete PRETTY_PRINT ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - private printing functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> var printRecursive ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> output ( " \ n " ) ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public printing functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief inspect <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> useColor = false ; <nl> } ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> } ( ) ) ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - global printing functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief print <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> function stop_color_print ( ) { <nl> require ( " internal " ) . stopColorPrint ( ) ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - END - OF - FILE <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> mmm a / html / admin / js / modules / org / arangodb - common . js <nl> ppp b / html / admin / js / modules / org / arangodb - common . js <nl> module . define ( " org / arangodb - common " , function ( exports , module ) { <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2012 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2004 - 2013 triAGENS GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> module . define ( " org / arangodb - common " , function ( exports , module ) { <nl> / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Dr . Frank Celler <nl> - / / / @ author Copyright 2012 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2012 - 2013 , triAGENS GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> var internal = require ( " internal " ) ; <nl> var mimetypes = require ( " org / arangodb / mimetypes " ) . mimeTypes ; <nl> / / - - SECTION - - public constants <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief errors <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> var mimetypes = require ( " org / arangodb / mimetypes " ) . mimeTypes ; <nl> <nl> exports . errors = internal . errors ; <nl> <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - public types <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> + / / / @ brief ArangoError <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + exports . ArangoError = internal . ArangoError ; <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief defines a module <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> exports . guessContentType = function ( filename ) { <nl> <nl> if ( match ! = = null ) { <nl> var extension = match [ 1 ] ; <nl> - <nl> + <nl> if ( mimetypes . hasOwnProperty ( extension ) ) { <nl> var type = mimetypes [ extension ] ; <nl> - <nl> + <nl> if ( type [ 1 ] ) { <nl> / / append charset <nl> return type [ 0 ] + " ; charset = utf - 8 " ; <nl> } <nl> <nl> - return type [ 0 ] ; <nl> + return type [ 0 ] ; <nl> } <nl> / / fall - through intentional <nl> } <nl> exports . printTable = function ( list , columns , framed ) { <nl> } <nl> else { <nl> what = columns ; <nl> - } <nl> + } <nl> <nl> j = 0 ; <nl> descriptions = [ ] ; <nl> exports . printTable = function ( list , columns , framed ) { <nl> } <nl> } ) ; <nl> } ) ; <nl> - <nl> + <nl> var divider = function ( ) { <nl> var parts = [ ] ; <nl> descriptions . forEach ( function ( desc ) { <nl> exports . stringPadding = function ( str , len , pad , dir ) { <nl> return str ; <nl> } ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - END - OF - FILE <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / Local Variables : <nl> / / mode : outline - minor <nl> - / / outline - regexp : " / / / @ brief \ \ | / / / @ addtogroup \ \ | / / - - SECTION - - \ \ | / / / @ page \ \ | / / / @ } \ \ | / \ \ * jslint " <nl> + / / outline - regexp : " / / / @ brief \ \ | / / / @ addtogroup \ \ | / / / @ page \ \ | / / - - SECTION - - \ \ | / / / @ \ \ } \ \ | / \ \ * jslint " <nl> / / End : <nl> } ) ; <nl> mmm a / html / admin / js / modules / org / arangodb . js <nl> ppp b / html / admin / js / modules / org / arangodb . js <nl> exports . ArangoConnection = internal . ArangoConnection ; <nl> / / cannot yet not use arangodb <nl> exports . ArangoDatabase = require ( " org / arangodb / arango - database " ) . ArangoDatabase ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief class " ArangoError " <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / cannot yet not use arangodb <nl> - exports . ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief class " ArangoStatement " <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / html / admin / js / modules / org / arangodb / arango - collection - common . js <nl> ppp b / html / admin / js / modules / org / arangodb / arango - collection - common . js <nl> module . define ( " org / arangodb / arango - collection - common " , function ( exports , module ) <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> var ArangoCollection = require ( " org / arangodb / arango - collection " ) . ArangoCollection ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArrangoError ; <nl> <nl> var arangodb = require ( " org / arangodb " ) ; <nl> <nl> + var ArangoError = arangodb . ArrangoError ; <nl> var output = arangodb . output ; <nl> var sprintf = arangodb . sprintf ; <nl> var db = arangodb . db ; <nl> mmm a / html / admin / js / modules / org / arangodb / arango - collection . js <nl> ppp b / html / admin / js / modules / org / arangodb / arango - collection . js <nl> module . define ( " org / arangodb / arango - collection " , function ( exports , module ) { <nl> var internal = require ( " internal " ) ; <nl> var arangosh = require ( " org / arangodb / arangosh " ) ; <nl> <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - ArangoCollection <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> exports . ArangoCollection = ArangoCollection ; <nl> / / must be called after exporting ArangoCollection <nl> require ( " org / arangodb / arango - collection - common " ) ; <nl> <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ } <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / html / admin / js / modules / org / arangodb / arango - database . js <nl> ppp b / html / admin / js / modules / org / arangodb / arango - database . js <nl> exports . ArangoDatabase = ArangoDatabase ; <nl> <nl> / / load after exporting ArangoDatabase <nl> ArangoCollection = require ( " org / arangodb / arango - collection " ) . ArangoCollection ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var ArangoStatement = require ( " org / arangodb / arango - statement " ) . ArangoStatement ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / html / admin / js / modules / org / arangodb / arangosh . js <nl> ppp b / html / admin / js / modules / org / arangodb / arangosh . js <nl> exports . createHelpHeadline = function ( text ) { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / must came after the export of createHelpHeadline <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> var arangodb = require ( " org / arangodb " ) ; <nl> + var ArangoError = arangodb . ArangoError ; <nl> <nl> exports . checkRequestResult = function ( requestResult ) { <nl> if ( requestResult = = = undefined ) { <nl> mmm a / html / admin / js / modules / org / arangodb / simple - query - common . js <nl> ppp b / html / admin / js / modules / org / arangodb / simple - query - common . js <nl> module . define ( " org / arangodb / simple - query - common " , function ( exports , module ) { <nl> <nl> var arangodb = require ( " org / arangodb " ) ; <nl> <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = arangodb . ArangoError ; <nl> <nl> / / forward declaration <nl> var SimpleQueryArray ; <nl> mmm a / js / Makefile . files <nl> ppp b / js / Makefile . files <nl> JAVASCRIPT_BROWSER = \ <nl> html / admin / js / modules / org / arangodb . js \ <nl> html / admin / js / modules / org / arangodb / arango - collection . js \ <nl> html / admin / js / modules / org / arangodb / arango - database . js \ <nl> - html / admin / js / modules / org / arangodb / arango - error . js \ <nl> html / admin / js / modules / org / arangodb / arango - query - cursor . js \ <nl> html / admin / js / modules / org / arangodb / arango - statement . js \ <nl> html / admin / js / modules / org / arangodb / arangosh . js \ <nl> JAVASCRIPT_BROWSER = \ <nl> \ <nl> html / admin / js / modules / org / arangodb - common . js \ <nl> html / admin / js / modules / org / arangodb / arango - collection - common . js \ <nl> - html / admin / js / modules / org / arangodb / arango - error - common . js \ <nl> html / admin / js / modules / org / arangodb / arango - statement - common . js \ <nl> html / admin / js / modules / org / arangodb / graph - common . js \ <nl> html / admin / js / modules / org / arangodb / graph / traversal . js \ <nl> mmm a / js / actions / api - cursor . js <nl> ppp b / js / actions / api - cursor . js <nl> <nl> var arangodb = require ( " org / arangodb " ) ; <nl> var actions = require ( " org / arangodb / actions " ) ; <nl> var internal = require ( " internal " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + <nl> + var ArangoError = arangodb . ArangoError ; <nl> var QUERY = internal . AQL_QUERY ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> mmm a / js / actions / api - explain . js <nl> ppp b / js / actions / api - explain . js <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> var actions = require ( " org / arangodb / actions " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var ERRORS = require ( " internal " ) . errors ; <nl> var EXPLAIN = require ( " internal " ) . AQL_EXPLAIN ; <nl> <nl> mmm a / js / actions / api - query . js <nl> ppp b / js / actions / api - query . js <nl> <nl> <nl> var arangodb = require ( " org / arangodb " ) ; <nl> var actions = require ( " org / arangodb / actions " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + <nl> + var ArangoError = arangodb . ArangoError ; <nl> var PARSE = require ( " internal " ) . AQL_PARSE ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> mmm a / js / actions / api - user . js <nl> ppp b / js / actions / api - user . js <nl> <nl> var arangodb = require ( " org / arangodb " ) ; <nl> var actions = require ( " org / arangodb / actions " ) ; <nl> var users = require ( " org / arangodb / users " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + <nl> + var ArangoError = arangodb . ArangoError ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - private functions <nl> mmm a / js / apps / aardvark / index . html <nl> ppp b / js / apps / aardvark / index . html <nl> <nl> < script src = " js / modules / org / arangodb / arango - collection - common . js " > < / script > <nl> < script src = " js / modules / org / arangodb / arango - collection . js " > < / script > <nl> < script src = " js / modules / org / arangodb / arango - database . js " > < / script > <nl> - < script src = " js / modules / org / arangodb / arango - error - common . js " > < / script > <nl> - < script src = " js / modules / org / arangodb / arango - error . js " > < / script > <nl> < script src = " js / modules / org / arangodb / arango - query - cursor . js " > < / script > <nl> < script src = " js / modules / org / arangodb / arango - statement - common . js " > < / script > <nl> < script src = " js / modules / org / arangodb / arango - statement . js " > < / script > <nl> mmm a / js / client / modules / org / arangodb . js <nl> ppp b / js / client / modules / org / arangodb . js <nl> exports . ArangoConnection = internal . ArangoConnection ; <nl> / / cannot yet not use arangodb <nl> exports . ArangoDatabase = require ( " org / arangodb / arango - database " ) . ArangoDatabase ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief class " ArangoError " <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / cannot yet not use arangodb <nl> - exports . ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief class " ArangoStatement " <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / client / modules / org / arangodb / aal . js <nl> ppp b / js / client / modules / org / arangodb / aal . js <nl> var fs = require ( " fs " ) ; <nl> <nl> var arangodb = require ( " org / arangodb " ) ; <nl> var arangosh = require ( " org / arangodb / arangosh " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> <nl> + var ArangoError = arangodb . ArangoError ; <nl> var arango = internal . arango ; <nl> var db = arangodb . db ; <nl> <nl> mmm a / js / client / modules / org / arangodb / arango - collection . js <nl> ppp b / js / client / modules / org / arangodb / arango - collection . js <nl> <nl> var internal = require ( " internal " ) ; <nl> var arangosh = require ( " org / arangodb / arangosh " ) ; <nl> <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - ArangoCollection <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> exports . ArangoCollection = ArangoCollection ; <nl> / / must be called after exporting ArangoCollection <nl> require ( " org / arangodb / arango - collection - common " ) ; <nl> <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ } <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / client / modules / org / arangodb / arango - database . js <nl> ppp b / js / client / modules / org / arangodb / arango - database . js <nl> exports . ArangoDatabase = ArangoDatabase ; <nl> <nl> / / load after exporting ArangoDatabase <nl> ArangoCollection = require ( " org / arangodb / arango - collection " ) . ArangoCollection ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var ArangoStatement = require ( " org / arangodb / arango - statement " ) . ArangoStatement ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> deleted file mode 100644 <nl> index 5a5d399f765 . . 00000000000 <nl> mmm a / js / client / modules / org / arangodb / arango - error . js <nl> ppp / dev / null <nl> <nl> - / * jslint indent : 2 , nomen : true , maxlen : 100 , sloppy : true , vars : true , white : true , plusplus : true * / <nl> - / * global require , exports * / <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief ArangoError <nl> - / / / <nl> - / / / @ file <nl> - / / / <nl> - / / / DISCLAIMER <nl> - / / / <nl> - / / / Copyright 2013 triagens GmbH , Cologne , Germany <nl> - / / / <nl> - / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - / / / you may not use this file except in compliance with the License . <nl> - / / / You may obtain a copy of the License at <nl> - / / / <nl> - / / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - / / / <nl> - / / / Unless required by applicable law or agreed to in writing , software <nl> - / / / distributed under the License is distributed on an " AS IS " BASIS , <nl> - / / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - / / / See the License for the specific language governing permissions and <nl> - / / / limitations under the License . <nl> - / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> - / / / <nl> - / / / @ author Achim Brandt <nl> - / / / @ author Dr . Frank Celler <nl> - / / / @ author Copyright 2012 - 2013 , triAGENS GmbH , Cologne , Germany <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - var internal = require ( " internal " ) ; <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - ArangoError <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - constructors and destructors <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief constructor <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - function ArangoError ( error ) { <nl> - if ( error ! = = undefined ) { <nl> - this . error = error . error ; <nl> - this . code = error . code ; <nl> - this . errorNum = error . errorNum ; <nl> - this . errorMessage = error . errorMessage ; <nl> - } <nl> - } <nl> - <nl> - exports . ArangoError = ArangoError ; <nl> - <nl> - / / must be called after exporting ArangoError <nl> - require ( " org / arangodb / arango - error - common " ) ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - END - OF - FILE <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " / / / @ brief \ \ | / / / @ addtogroup \ \ | / / - - SECTION - - \ \ | / / / @ } \ \ | / \ \ * jslint " <nl> - / / End : <nl> mmm a / js / client / modules / org / arangodb / arangosh . js <nl> ppp b / js / client / modules / org / arangodb / arangosh . js <nl> exports . createHelpHeadline = function ( text ) { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / must came after the export of createHelpHeadline <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> var arangodb = require ( " org / arangodb " ) ; <nl> + var ArangoError = arangodb . ArangoError ; <nl> <nl> exports . checkRequestResult = function ( requestResult ) { <nl> if ( requestResult = = = undefined ) { <nl> mmm a / js / common / bootstrap / module - internal . js <nl> ppp b / js / common / bootstrap / module - internal . js <nl> <nl> / * jslint indent : 2 , nomen : true , maxlen : 120 , vars : true , white : true , plusplus : true , nonpropdel : true , proto : true * / <nl> - / * global require , module , Module , SYS_DOWNLOAD , <nl> - SYS_EXECUTE , SYS_LOAD , SYS_LOG_LEVEL , SYS_MD5 , SYS_OUTPUT , SYS_PROCESS_STAT , <nl> + / * global require , module , Module , ArangoError , SYS_DOWNLOAD , <nl> + SYS_EXECUTE , SYS_LOAD , SYS_LOG_LEVEL , SYS_MD5 , SYS_OUTPUT , SYS_PROCESS_STAT , <nl> SYS_RAND , SYS_SPRINTF , SYS_TIME , SYS_START_PAGER , SYS_STOP_PAGER , SYS_SHA256 , SYS_WAIT , <nl> SYS_PARSE , SYS_IMPORT_CSV_FILE , SYS_IMPORT_JSON_FILE , SYS_LOG , <nl> SYS_GEN_RANDOM_NUMBERS , SYS_GEN_RANDOM_ALPHA_NUMBERS , SYS_GEN_RANDOM_SALT , SYS_CREATE_NONCE , <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - public constants <nl> + / / - - SECTION - - public types <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> + / / / @ brief ArangoError <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + exports . ArangoError = ArangoError ; <nl> + delete ArangoError ; <nl> + <nl> + exports . ArangoError . prototype . _PRINT = function ( context ) { <nl> + context . output + = this . toString ( ) ; <nl> + } ; <nl> + <nl> + exports . ArangoError . prototype . toString = function ( ) { <nl> + var errorNum = this . errorNum ; <nl> + var errorMessage = this . errorMessage ; <nl> + <nl> + return " [ ArangoError " + errorNum + " : " + errorMessage + " ] " ; <nl> + } ; <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - public constants <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief threadNumber <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> delete REQUEST_TIME_DISTRIBUTION ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief download <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> exports . download = SYS_DOWNLOAD ; <nl> delete SYS_DOWNLOAD ; <nl> } <nl> - <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief executeScript <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> delete SYS_EXECUTE ; <nl> } <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief extend <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + exports . extend = function ( target , source ) { <nl> + ' use strict ' ; <nl> + <nl> + Object . getOwnPropertyNames ( source ) <nl> + . forEach ( function ( propName ) { <nl> + Object . defineProperty ( target , propName , <nl> + Object . getOwnPropertyDescriptor ( source , propName ) ) ; <nl> + } ) ; <nl> + <nl> + return target ; <nl> + } ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief load <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> exports . wait = SYS_WAIT ; <nl> delete SYS_WAIT ; <nl> } <nl> - <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief importCsvFile <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> delete SYS_REQUEST_STATISTICS ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - public functions <nl> + / / - - SECTION - - private functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief flushModuleCache <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> SYS_UNIT_TESTS_RESULT = value ; <nl> } ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief extend <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - exports . extend = function ( target , source ) { <nl> - ' use strict ' ; <nl> - <nl> - Object . getOwnPropertyNames ( source ) <nl> - . forEach ( function ( propName ) { <nl> - Object . defineProperty ( target , propName , <nl> - Object . getOwnPropertyDescriptor ( source , propName ) ) ; <nl> - } ) ; <nl> - <nl> - return target ; <nl> - } ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> } ( ) ) ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / - - SECTION - - public printing variables <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief COLORS <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> } <nl> else { <nl> [ ' COLOR_RED ' , ' COLOR_BOLD_RED ' , ' COLOR_GREEN ' , ' COLOR_BOLD_GREEN ' , <nl> - ' COLOR_BLUE ' , ' COLOR_BOLD_BLUE ' , ' COLOR_YELLOW ' , ' COLOR_BOLD_YELLOW ' , <nl> - ' COLOR_WHITE ' , ' COLOR_BOLD_WHITE ' , ' COLOR_CYAN ' , ' COLOR_BOLD_CYAN ' , <nl> - ' COLOR_MAGENTA ' , ' COLOR_BOLD_MAGENTA ' , ' COLOR_BLACK ' , ' COLOR_BOLD_BLACK ' , <nl> + ' COLOR_BLUE ' , ' COLOR_BOLD_BLUE ' , ' COLOR_YELLOW ' , ' COLOR_BOLD_YELLOW ' , <nl> + ' COLOR_WHITE ' , ' COLOR_BOLD_WHITE ' , ' COLOR_CYAN ' , ' COLOR_BOLD_CYAN ' , <nl> + ' COLOR_MAGENTA ' , ' COLOR_BOLD_MAGENTA ' , ' COLOR_BLACK ' , ' COLOR_BOLD_BLACK ' , <nl> ' COLOR_BLINK ' , ' COLOR_BRIGHT ' , ' COLOR_RESET ' ] . forEach ( function ( color ) { <nl> exports . COLORS [ color ] = ' ' ; <nl> } ) ; <nl> <nl> exports . COLORS . COLOR_NULL = exports . COLORS . COLOR_BRIGHT ; <nl> exports . COLORS . COLOR_UNDEFINED = exports . COLORS . COLOR_BRIGHT ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - private printing variables <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief quote cache <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> delete PRETTY_PRINT ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - private printing functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> var printRecursive ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> output ( " \ n " ) ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public printing functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief inspect <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> useColor = false ; <nl> } ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> } ( ) ) ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - global printing functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief print <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> function stop_color_print ( ) { <nl> require ( " internal " ) . stopColorPrint ( ) ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - END - OF - FILE <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> mmm a / js / common / bootstrap / modules . js <nl> ppp b / js / common / bootstrap / modules . js <nl> function require ( path ) { <nl> | | norm = = = " / org / arangodb / actions " <nl> | | norm = = = " / org / arangodb / arango - collection " <nl> | | norm = = = " / org / arangodb / arango - database " <nl> - | | norm = = = " / org / arangodb / arango - error " <nl> | | norm = = = " / org / arangodb / arango - statement " <nl> | | norm = = = " / org / arangodb / shaped - json " ) { <nl> return ; <nl> mmm a / js / common / modules / org / arangodb - common . js <nl> ppp b / js / common / modules / org / arangodb - common . js <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2012 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2004 - 2013 triAGENS GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Dr . Frank Celler <nl> - / / / @ author Copyright 2012 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2012 - 2013 , triAGENS GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> var internal = require ( " internal " ) ; <nl> var mimetypes = require ( " org / arangodb / mimetypes " ) . mimeTypes ; <nl> / / - - SECTION - - public constants <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief errors <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> var mimetypes = require ( " org / arangodb / mimetypes " ) . mimeTypes ; <nl> <nl> exports . errors = internal . errors ; <nl> <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - public types <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> + / / / @ brief ArangoError <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + exports . ArangoError = internal . ArangoError ; <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief defines a module <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> exports . guessContentType = function ( filename ) { <nl> <nl> if ( match ! = = null ) { <nl> var extension = match [ 1 ] ; <nl> - <nl> + <nl> if ( mimetypes . hasOwnProperty ( extension ) ) { <nl> var type = mimetypes [ extension ] ; <nl> - <nl> + <nl> if ( type [ 1 ] ) { <nl> / / append charset <nl> return type [ 0 ] + " ; charset = utf - 8 " ; <nl> } <nl> <nl> - return type [ 0 ] ; <nl> + return type [ 0 ] ; <nl> } <nl> / / fall - through intentional <nl> } <nl> exports . printTable = function ( list , columns , framed ) { <nl> } <nl> else { <nl> what = columns ; <nl> - } <nl> + } <nl> <nl> j = 0 ; <nl> descriptions = [ ] ; <nl> exports . printTable = function ( list , columns , framed ) { <nl> } <nl> } ) ; <nl> } ) ; <nl> - <nl> + <nl> var divider = function ( ) { <nl> var parts = [ ] ; <nl> descriptions . forEach ( function ( desc ) { <nl> exports . stringPadding = function ( str , len , pad , dir ) { <nl> return str ; <nl> } ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - END - OF - FILE <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / Local Variables : <nl> / / mode : outline - minor <nl> - / / outline - regexp : " / / / @ brief \ \ | / / / @ addtogroup \ \ | / / - - SECTION - - \ \ | / / / @ page \ \ | / / / @ } \ \ | / \ \ * jslint " <nl> + / / outline - regexp : " / / / @ brief \ \ | / / / @ addtogroup \ \ | / / / @ page \ \ | / / - - SECTION - - \ \ | / / / @ \ \ } \ \ | / \ \ * jslint " <nl> / / End : <nl> mmm a / js / common / modules / org / arangodb / aql / functions . js <nl> ppp b / js / common / modules / org / arangodb / aql / functions . js <nl> <nl> <nl> var internal = require ( " internal " ) ; <nl> var arangodb = require ( " org / arangodb " ) ; <nl> + <nl> var db = arangodb . db ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = arangodb . ArangoError ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - module " org / arangodb / aql / functions " <nl> mmm a / js / common / modules / org / arangodb / arango - collection - common . js <nl> ppp b / js / common / modules / org / arangodb / arango - collection - common . js <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> var ArangoCollection = require ( " org / arangodb / arango - collection " ) . ArangoCollection ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArrangoError ; <nl> <nl> var arangodb = require ( " org / arangodb " ) ; <nl> <nl> + var ArangoError = arangodb . ArrangoError ; <nl> var output = arangodb . output ; <nl> var sprintf = arangodb . sprintf ; <nl> var db = arangodb . db ; <nl> deleted file mode 100644 <nl> index c69a01fda44 . . 00000000000 <nl> mmm a / js / common / modules / org / arangodb / arango - error - common . js <nl> ppp / dev / null <nl> <nl> - / * jslint indent : 2 , nomen : true , maxlen : 100 , sloppy : true , vars : true , white : true , plusplus : true * / <nl> - / * global require * / <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief ArangoError <nl> - / / / <nl> - / / / @ file <nl> - / / / <nl> - / / / DISCLAIMER <nl> - / / / <nl> - / / / Copyright 2013 triagens GmbH , Cologne , Germany <nl> - / / / <nl> - / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - / / / you may not use this file except in compliance with the License . <nl> - / / / You may obtain a copy of the License at <nl> - / / / <nl> - / / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - / / / <nl> - / / / Unless required by applicable law or agreed to in writing , software <nl> - / / / distributed under the License is distributed on an " AS IS " BASIS , <nl> - / / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - / / / See the License for the specific language governing permissions and <nl> - / / / limitations under the License . <nl> - / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> - / / / <nl> - / / / @ author Achim Brandt <nl> - / / / @ author Dr . Frank Celler <nl> - / / / @ author Copyright 2012 - 2013 , triAGENS GmbH , Cologne , Germany <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - var arangodb = require ( " org / arangodb " ) ; <nl> - <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - ArangoError <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - private methods <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief prints the object <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - ArangoError . prototype . _PRINT = function ( context ) { <nl> - context . output + = this . toString ( ) ; <nl> - } ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief converts into a string <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - ArangoError . prototype . toString = function ( ) { <nl> - var errorNum = this . errorNum ; <nl> - var errorMessage = this . errorMessage ; <nl> - <nl> - return " [ ArangoError " + errorNum + " : " + errorMessage + " ] " ; <nl> - } ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - END - OF - FILE <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " / / / @ brief \ \ | / / / @ addtogroup \ \ | / / - - SECTION - - \ \ | / / / @ } \ \ | / \ \ * jslint " <nl> - / / End : <nl> mmm a / js / common / modules / org / arangodb / simple - query - common . js <nl> ppp b / js / common / modules / org / arangodb / simple - query - common . js <nl> <nl> <nl> var arangodb = require ( " org / arangodb " ) ; <nl> <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = arangodb . ArangoError ; <nl> <nl> / / forward declaration <nl> var SimpleQueryArray ; <nl> mmm a / js / common / modules / org / arangodb / users - common . js <nl> ppp b / js / common / modules / org / arangodb / users - common . js <nl> <nl> var internal = require ( " internal " ) ; / / OK : time <nl> var arangodb = require ( " org / arangodb " ) ; <nl> var crypto = require ( " org / arangodb / crypto " ) ; <nl> + <nl> var db = arangodb . db ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = arangodb . ArangoError ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - module " org / arangodb / users " <nl> mmm a / js / server / bootstrap / module - internal . js <nl> ppp b / js / server / bootstrap / module - internal . js <nl> <nl> / * jslint indent : 2 , nomen : true , maxlen : 120 , sloppy : true , vars : true , white : true , plusplus : true , nonpropdel : true * / <nl> - / * global require , db , ArangoCollection , ArangoDatabase , ArangoError , ArangoCursor , <nl> + / * global require , db , ArangoCollection , ArangoDatabase , ArangoCursor , <nl> ShapedJson , RELOAD_AUTH , SYS_DEFINE_ACTION , SYS_EXECUTE_GLOBAL_CONTEXT_FUNCTION , <nl> AHUACATL_RUN , AHUACATL_PARSE , AHUACATL_EXPLAIN * / <nl> <nl> <nl> internal . ArangoDatabase = ArangoDatabase ; <nl> delete ArangoDatabase ; <nl> <nl> - internal . ArangoError = ArangoError ; <nl> - delete ArangoError ; <nl> - <nl> internal . ArangoCursor = ArangoCursor ; <nl> delete ArangoCursor ; <nl> <nl> mmm a / js / server / modules / org / arangodb . js <nl> ppp b / js / server / modules / org / arangodb . js <nl> exports . ArangoCursor = internal . ArangoCursor ; <nl> / / cannot yet not use arangodb <nl> exports . ArangoDatabase = require ( " org / arangodb / arango - database " ) . ArangoDatabase ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief class " ArangoError " <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / cannot yet not use arangodb <nl> - exports . ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief class " ArangoStatement " <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / server / modules / org / arangodb / ahuacatl . js <nl> ppp b / js / server / modules / org / arangodb / ahuacatl . js <nl> <nl> <nl> var INTERNAL = require ( " internal " ) ; <nl> var TRAVERSAL = require ( " org / arangodb / graph / traversal " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - private variables <nl> mmm a / js / server / modules / org / arangodb / arango - collection . js <nl> ppp b / js / server / modules / org / arangodb / arango - collection . js <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> var internal = require ( " internal " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - ArangoCollection <nl> exports . ArangoCollection = ArangoCollection ; <nl> require ( " org / arangodb / arango - collection - common " ) ; <nl> <nl> var simple = require ( " org / arangodb / simple - query " ) ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var ArangoDatabase = require ( " org / arangodb / arango - database " ) . ArangoDatabase ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / server / modules / org / arangodb / arango - database . js <nl> ppp b / js / server / modules / org / arangodb / arango - database . js <nl> var ArangoDatabase = exports . ArangoDatabase ; <nl> <nl> / / must called after export <nl> var ArangoCollection = require ( " org / arangodb / arango - collection " ) . ArangoCollection ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var ArangoStatement = require ( " org / arangodb / arango - statement " ) . ArangoStatement ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> deleted file mode 100644 <nl> index 5af2c7e4da4 . . 00000000000 <nl> mmm a / js / server / modules / org / arangodb / arango - error . js <nl> ppp / dev / null <nl> <nl> - / * jslint indent : 2 , nomen : true , maxlen : 100 , sloppy : true , vars : true , white : true , plusplus : true * / <nl> - / * global require , exports * / <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief ArangoError <nl> - / / / <nl> - / / / @ file <nl> - / / / <nl> - / / / DISCLAIMER <nl> - / / / <nl> - / / / Copyright 2013 triagens GmbH , Cologne , Germany <nl> - / / / <nl> - / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - / / / you may not use this file except in compliance with the License . <nl> - / / / You may obtain a copy of the License at <nl> - / / / <nl> - / / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - / / / <nl> - / / / Unless required by applicable law or agreed to in writing , software <nl> - / / / distributed under the License is distributed on an " AS IS " BASIS , <nl> - / / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - / / / See the License for the specific language governing permissions and <nl> - / / / limitations under the License . <nl> - / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> - / / / <nl> - / / / @ author Achim Brandt <nl> - / / / @ author Dr . Frank Celler <nl> - / / / @ author Copyright 2012 - 2013 , triAGENS GmbH , Cologne , Germany <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - var internal = require ( " internal " ) ; <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - ArangoError <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - constructors and destructors <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup ArangoShell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief constructor <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - exports . ArangoError = internal . ArangoError ; <nl> - <nl> - / / must be called after exporting ArangoError <nl> - require ( " org / arangodb / arango - error - common " ) ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - END - OF - FILE <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " / / / @ brief \ \ | / / / @ addtogroup \ \ | / / - - SECTION - - \ \ | / / / @ } \ \ | / \ \ * jslint " <nl> - / / End : <nl> mmm a / js / server / modules / org / arangodb / simple - query . js <nl> ppp b / js / server / modules / org / arangodb / simple - query . js <nl> <nl> var internal = require ( " internal " ) ; <nl> var console = require ( " console " ) ; <nl> <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> <nl> var sq = require ( " org / arangodb / simple - query - common " ) ; <nl> <nl> mmm a / js / server / tests / ahuacatl - arithmetic . js <nl> ppp b / js / server / tests / ahuacatl - arithmetic . js <nl> <nl> <nl> var internal = require ( " internal " ) ; <nl> var jsunity = require ( " jsunity " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var QUERY = internal . AQL_QUERY ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / server / tests / ahuacatl - cross . js <nl> ppp b / js / server / tests / ahuacatl - cross . js <nl> <nl> <nl> var jsunity = require ( " jsunity " ) ; <nl> var db = require ( " org / arangodb " ) . db ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var QUERY = require ( " internal " ) . AQL_QUERY ; <nl> <nl> <nl> mmm a / js / server / tests / ahuacatl - edges . js <nl> ppp b / js / server / tests / ahuacatl - edges . js <nl> <nl> <nl> var jsunity = require ( " jsunity " ) ; <nl> var internal = require ( " internal " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var EXPLAIN = internal . AQL_EXPLAIN ; <nl> var QUERY = internal . AQL_QUERY ; <nl> <nl> mmm a / js / server / tests / ahuacatl - escaping . js <nl> ppp b / js / server / tests / ahuacatl - escaping . js <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> var jsunity = require ( " jsunity " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var QUERY = require ( " internal " ) . AQL_QUERY ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / server / tests / ahuacatl - functions . js <nl> ppp b / js / server / tests / ahuacatl - functions . js <nl> <nl> <nl> var internal = require ( " internal " ) ; <nl> var jsunity = require ( " jsunity " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var QUERY = internal . AQL_QUERY ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / server / tests / ahuacatl - graph . js <nl> ppp b / js / server / tests / ahuacatl - graph . js <nl> <nl> <nl> var jsunity = require ( " jsunity " ) ; <nl> var db = require ( " org / arangodb " ) . db ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var QUERY = require ( " internal " ) . AQL_QUERY ; <nl> var errors = require ( " internal " ) . errors ; <nl> <nl> mmm a / js / server / tests / ahuacatl - queries - collection . js <nl> ppp b / js / server / tests / ahuacatl - queries - collection . js <nl> <nl> <nl> var jsunity = require ( " jsunity " ) ; <nl> var internal = require ( " internal " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var QUERY = internal . AQL_QUERY ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / server / tests / ahuacatl - queries - fulltext . js <nl> ppp b / js / server / tests / ahuacatl - queries - fulltext . js <nl> <nl> <nl> var db = require ( " org / arangodb " ) . db ; <nl> var jsunity = require ( " jsunity " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var ERRORS = require ( " org / arangodb " ) . errors ; <nl> var QUERY = require ( " internal " ) . AQL_QUERY ; <nl> <nl> mmm a / js / server / tests / ahuacatl - queries - geo . js <nl> ppp b / js / server / tests / ahuacatl - queries - geo . js <nl> <nl> <nl> var jsunity = require ( " jsunity " ) ; <nl> var db = require ( " org / arangodb " ) . db ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var ERRORS = require ( " org / arangodb " ) . errors ; <nl> var QUERY = require ( " internal " ) . AQL_QUERY ; <nl> <nl> mmm a / js / server / tests / ahuacatl - queries - noncollection . js <nl> ppp b / js / server / tests / ahuacatl - queries - noncollection . js <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> var jsunity = require ( " jsunity " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var QUERY = require ( " internal " ) . AQL_QUERY ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / server / tests / ahuacatl - queries - optimiser - limit . js <nl> ppp b / js / server / tests / ahuacatl - queries - optimiser - limit . js <nl> <nl> <nl> var jsunity = require ( " jsunity " ) ; <nl> var internal = require ( " internal " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var EXPLAIN = internal . AQL_EXPLAIN ; <nl> var QUERY = internal . AQL_QUERY ; <nl> <nl> mmm a / js / server / tests / ahuacatl - queries - optimiser - ref . js <nl> ppp b / js / server / tests / ahuacatl - queries - optimiser - ref . js <nl> <nl> <nl> var jsunity = require ( " jsunity " ) ; <nl> var internal = require ( " internal " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var QUERY = internal . AQL_QUERY ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / server / tests / ahuacatl - queries - optimiser - sort . js <nl> ppp b / js / server / tests / ahuacatl - queries - optimiser - sort . js <nl> <nl> <nl> var jsunity = require ( " jsunity " ) ; <nl> var internal = require ( " internal " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var EXPLAIN = internal . AQL_EXPLAIN ; <nl> var QUERY = internal . AQL_QUERY ; <nl> <nl> mmm a / js / server / tests / ahuacatl - queries - simple . js <nl> ppp b / js / server / tests / ahuacatl - queries - simple . js <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> var jsunity = require ( " jsunity " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var QUERY = require ( " internal " ) . AQL_QUERY ; <nl> var errors = require ( " internal " ) . errors ; <nl> <nl> mmm a / js / server / tests / ahuacatl - queries - variables . js <nl> ppp b / js / server / tests / ahuacatl - queries - variables . js <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> var jsunity = require ( " jsunity " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var QUERY = require ( " internal " ) . AQL_QUERY ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / server / tests / ahuacatl - relational . js <nl> ppp b / js / server / tests / ahuacatl - relational . js <nl> <nl> <nl> var internal = require ( " internal " ) ; <nl> var jsunity = require ( " jsunity " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var QUERY = internal . AQL_QUERY ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / server / tests / ahuacatl - ternary . js <nl> ppp b / js / server / tests / ahuacatl - ternary . js <nl> <nl> <nl> var internal = require ( " internal " ) ; <nl> var jsunity = require ( " jsunity " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> var QUERY = internal . AQL_QUERY ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / js / server / tests / import . js <nl> ppp b / js / server / tests / import . js <nl> <nl> <nl> var internal = require ( " internal " ) ; <nl> var jsunity = require ( " jsunity " ) ; <nl> - var ArangoError = require ( " org / arangodb / arango - error " ) . ArangoError ; <nl> + var ArangoError = require ( " org / arangodb " ) . ArangoError ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief test suite <nl> mmm a / lib / V8 / v8 - conv . cpp <nl> ppp b / lib / V8 / v8 - conv . cpp <nl> static v8 : : Handle < v8 : : Value > JsonShapeData ( TRI_shaper_t * shaper , <nl> / / - - SECTION - - private functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Conversions <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief converts a null into TRI_shape_value_t <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> static v8 : : Handle < v8 : : Value > ObjectJsonList ( TRI_json_t const * json ) { <nl> return object ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Conversions <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief pushes the names of an associative char * array into a V8 array <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> v8 : : Handle < v8 : : Array > TRI_ArrayAssociativePointer ( const TRI_associative_pointer <nl> <nl> uint32_t j = 0 ; <nl> uint32_t n = ( uint32_t ) array - > _nrAlloc ; <nl> + <nl> for ( uint32_t i = 0 ; i < n ; + + i ) { <nl> char * value = ( char * ) array - > _table [ i ] ; <nl> <nl> TRI_shaped_json_t * TRI_ShapedJsonV8Object ( v8 : : Handle < v8 : : Value > object , TRI_sha <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief converts a V8 object to a TRI_shaped_json_t <nl> + / / / @ brief converts a V8 object to a TRI_shaped_json_t in place <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> int TRI_FillShapedJsonV8Object ( v8 : : Handle < v8 : : Value > object , <nl> double TRI_ObjectToDouble ( v8 : : Handle < v8 : : Value > value ) { <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief converts an V8 object to a double <nl> + / / / @ brief converts an V8 object to a double with error handling <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> double TRI_ObjectToDouble ( v8 : : Handle < v8 : : Value > value , bool & error ) { <nl> bool TRI_ObjectToBoolean ( v8 : : Handle < v8 : : Value > value ) { <nl> return false ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Conversions <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief initialises the V8 conversion module <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> void TRI_InitV8Conversions ( v8 : : Handle < v8 : : Context > context ) { <nl> - v8 : : HandleScope scope ; <nl> - <nl> - / / check the isolate <nl> - v8 : : Isolate * isolate = v8 : : Isolate : : GetCurrent ( ) ; <nl> - TRI_v8_global_t * v8g = ( TRI_v8_global_t * ) isolate - > GetData ( ) ; <nl> - <nl> - assert ( v8g ! = 0 ) ; <nl> - <nl> / / nothing special to do here <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - END - OF - FILE <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> mmm a / lib / V8 / v8 - conv . h <nl> ppp b / lib / V8 / v8 - conv . h <nl> <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Conversions <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief pushes the names of an associative char * array into a V8 array <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> v8 : : Handle < v8 : : Value > TRI_JsonShapeData ( TRI_shaper_t * , <nl> TRI_shaped_json_t * TRI_ShapedJsonV8Object ( v8 : : Handle < v8 : : Value > , TRI_shaper_t * ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief converts a V8 object to a TRI_shaped_json_t <nl> + / / / @ brief converts a V8 object to a TRI_shaped_json_t in place <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> int TRI_FillShapedJsonV8Object ( v8 : : Handle < v8 : : Value > object , <nl> double TRI_ObjectToDouble ( v8 : : Handle < v8 : : Value > , bool & error ) ; <nl> <nl> bool TRI_ObjectToBoolean ( v8 : : Handle < v8 : : Value > ) ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - GENERAL <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> bool TRI_ObjectToBoolean ( v8 : : Handle < v8 : : Value > ) ; <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Conversions <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief initialises the V8 conversion module <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> void TRI_InitV8Conversions ( v8 : : Handle < v8 : : Context > ) ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> # endif <nl> <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - END - OF - FILE <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> / / Local Variables : <nl> / / mode : outline - minor <nl> / / outline - regexp : " / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / / @ page \ \ | / / - - SECTION - - \ \ | / / / @ \ \ } " <nl> mmm a / lib / V8 / v8 - execution . cpp <nl> ppp b / lib / V8 / v8 - execution . cpp <nl> using namespace std ; <nl> / / - - SECTION - - public types <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Utils <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief execution context <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> typedef struct js_exec_context_s { <nl> + v8 : : Isolate * _isolate ; <nl> v8 : : Persistent < v8 : : Function > _func ; <nl> v8 : : Persistent < v8 : : Object > _arguments ; <nl> } <nl> js_exec_context_t ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - constructors and destructors <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Utils <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief creates a new execution context <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> TRI_js_exec_context_t TRI_CreateExecutionContext ( char const * script ) { <nl> + v8 : : Isolate * isolate = v8 : : Isolate : : GetCurrent ( ) ; <nl> js_exec_context_t * ctx ; <nl> <nl> / / execute script inside the context <nl> v8 : : Handle < v8 : : Script > compiled = v8 : : Script : : Compile ( v8 : : String : : New ( script ) , <nl> v8 : : String : : New ( " - - script - - " ) ) ; <nl> <nl> - / / compilation failed , print errors that happened during compilation <nl> + / / compilation failed , return <nl> if ( compiled . IsEmpty ( ) ) { <nl> return 0 ; <nl> } <nl> TRI_js_exec_context_t TRI_CreateExecutionContext ( char const * script ) { <nl> <nl> ctx = new js_exec_context_t ; <nl> <nl> - ctx - > _func = v8 : : Persistent < v8 : : Function > : : New ( v8 : : Handle < v8 : : Function > : : Cast ( val ) ) ; <nl> - ctx - > _arguments = v8 : : Persistent < v8 : : Object > : : New ( v8 : : Object : : New ( ) ) ; <nl> + ctx - > _func = v8 : : Persistent < v8 : : Function > : : New ( isolate , v8 : : Handle < v8 : : Function > : : Cast ( val ) ) ; <nl> + ctx - > _arguments = v8 : : Persistent < v8 : : Object > : : New ( isolate , v8 : : Object : : New ( ) ) ; <nl> + ctx - > _isolate = isolate ; <nl> <nl> / / return the handle <nl> return ( TRI_js_exec_context_t ) ctx ; <nl> void TRI_FreeExecutionContext ( TRI_js_exec_context_t context ) { <nl> <nl> ctx = ( js_exec_context_t * ) context ; <nl> <nl> - ctx - > _func . Dispose ( ) ; <nl> + ctx - > _func . Dispose ( ctx - > _isolate ) ; <nl> ctx - > _func . Clear ( ) ; <nl> <nl> - ctx - > _arguments . Dispose ( ) ; <nl> + ctx - > _arguments . Dispose ( ctx - > _isolate ) ; <nl> ctx - > _arguments . Clear ( ) ; <nl> <nl> delete ctx ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Utils <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief executes a result context <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> TRI_json_t * TRI_ExecuteResultContext ( TRI_js_exec_context_t context ) { <nl> return TRI_ObjectToJson ( result ) ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - END - OF - FILE <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / Local Variables : <nl> / / mode : outline - minor <nl> mmm a / lib / V8 / v8 - execution . h <nl> ppp b / lib / V8 / v8 - execution . h <nl> <nl> <nl> # include " BasicsC / common . h " <nl> <nl> - # include " BasicsC / json . h " <nl> - <nl> # ifdef __cplusplus <nl> extern " C " { <nl> # endif <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - EXECUTION CONTEXT <nl> + / / - - SECTION - - forward declarations <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> + struct TRI_json_s ; <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public types <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Utils <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief execution context <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> typedef void * TRI_js_exec_context_t ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - constructors and destructors <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Utils <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief creates a new execution context <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - TRI_js_exec_context_t TRI_CreateExecutionContext ( char const * script ) ; <nl> + TRI_js_exec_context_t TRI_CreateExecutionContext ( const char * script ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief frees an new execution context <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - void TRI_FreeExecutionContext ( TRI_js_exec_context_t context ) ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + void TRI_FreeExecutionContext ( TRI_js_exec_context_t ) ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Utils <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief executes a result context <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - TRI_json_t * TRI_ExecuteResultContext ( TRI_js_exec_context_t context ) ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + struct TRI_json_s * TRI_ExecuteResultContext ( TRI_js_exec_context_t context ) ; <nl> <nl> # ifdef __cplusplus <nl> } <nl> TRI_json_t * TRI_ExecuteResultContext ( TRI_js_exec_context_t context ) ; <nl> <nl> # endif <nl> <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - END - OF - FILE <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> / / Local Variables : <nl> / / mode : outline - minor <nl> / / outline - regexp : " / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / / @ page \ \ | / / - - SECTION - - \ \ | / / / @ \ \ } " <nl> mmm a / lib / V8 / v8 - globals . cpp <nl> ppp b / lib / V8 / v8 - globals . cpp <nl> <nl> <nl> # include " v8 - globals . h " <nl> <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - public types <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief constructor <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + TRI_v8_global_s : : TRI_v8_global_s ( v8 : : Isolate * isolate ) <nl> + : JSBarriers ( ) , <nl> + <nl> + ErrorTempl ( ) , <nl> + GeneralCursorTempl ( ) , <nl> + ShapedJsonTempl ( ) , <nl> + TransactionTempl ( ) , <nl> + VocbaseColTempl ( ) , <nl> + VocbaseTempl ( ) , <nl> + <nl> + DeleteConstant ( ) , <nl> + GetConstant ( ) , <nl> + HeadConstant ( ) , <nl> + OptionsConstant ( ) , <nl> + PatchConstant ( ) , <nl> + PostConstant ( ) , <nl> + PutConstant ( ) , <nl> + <nl> + BodyFromFileKey ( ) , <nl> + BodyKey ( ) , <nl> + CodeKey ( ) , <nl> + ContentTypeKey ( ) , <nl> + ErrorKey ( ) , <nl> + ErrorMessageKey ( ) , <nl> + ErrorNumKey ( ) , <nl> + HeadersKey ( ) , <nl> + IdKey ( ) , <nl> + IsSystemKey ( ) , <nl> + IsVolatileKey ( ) , <nl> + JournalSizeKey ( ) , <nl> + KeyOptionsKey ( ) , <nl> + ParametersKey ( ) , <nl> + PathKey ( ) , <nl> + PrefixKey ( ) , <nl> + RequestBodyKey ( ) , <nl> + RequestTypeKey ( ) , <nl> + ResponseCodeKey ( ) , <nl> + SuffixKey ( ) , <nl> + TransformationsKey ( ) , <nl> + UrlKey ( ) , <nl> + UserKey ( ) , <nl> + WaitForSyncKey ( ) , <nl> + <nl> + _FromKey ( ) , <nl> + _IdKey ( ) , <nl> + _KeyKey ( ) , <nl> + _OldRevKey ( ) , <nl> + _RevKey ( ) , <nl> + _ToKey ( ) , <nl> + <nl> + DocumentIdRegex ( ) , <nl> + DocumentKeyRegex ( ) , <nl> + IdRegex ( ) , <nl> + IndexIdRegex ( ) , <nl> + <nl> + _currentTransaction ( 0 ) , <nl> + _vocbase ( 0 ) { <nl> + v8 : : HandleScope scope ; <nl> + <nl> + DeleteConstant = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " DELETE " ) ) ; <nl> + GetConstant = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " GET " ) ) ; <nl> + HeadConstant = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " HEAD " ) ) ; <nl> + OptionsConstant = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " OPTIONS " ) ) ; <nl> + PatchConstant = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " PATCH " ) ) ; <nl> + PostConstant = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " POST " ) ) ; <nl> + PutConstant = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " PUT " ) ) ; <nl> + <nl> + BodyFromFileKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " bodyFromFile " ) ) ; <nl> + BodyKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " body " ) ) ; <nl> + CodeKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " code " ) ) ; <nl> + ContentTypeKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " contentType " ) ) ; <nl> + ErrorKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " error " ) ) ; <nl> + ErrorMessageKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " errorMessage " ) ) ; <nl> + ErrorNumKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " errorNum " ) ) ; <nl> + HeadersKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " headers " ) ) ; <nl> + IdKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " id " ) ) ; <nl> + IsSystemKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " isSystem " ) ) ; <nl> + IsVolatileKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " isVolatile " ) ) ; <nl> + JournalSizeKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " journalSize " ) ) ; <nl> + KeyOptionsKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " keyOptions " ) ) ; <nl> + ParametersKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " parameters " ) ) ; <nl> + PathKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " path " ) ) ; <nl> + PrefixKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " prefix " ) ) ; <nl> + RequestBodyKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " requestBody " ) ) ; <nl> + RequestTypeKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " requestType " ) ) ; <nl> + ResponseCodeKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " responseCode " ) ) ; <nl> + SuffixKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " suffix " ) ) ; <nl> + TransformationsKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " transformations " ) ) ; <nl> + UrlKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " url " ) ) ; <nl> + UserKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " user " ) ) ; <nl> + WaitForSyncKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " waitForSync " ) ) ; <nl> + <nl> + _FromKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " _from " ) ) ; <nl> + _IdKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " _id " ) ) ; <nl> + _KeyKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " _key " ) ) ; <nl> + _OldRevKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " _oldRev " ) ) ; <nl> + _RevKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " _rev " ) ) ; <nl> + _ToKey = v8 : : Persistent < v8 : : String > : : New ( isolate , TRI_V8_SYMBOL ( " _to " ) ) ; <nl> + } <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief destructor <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + TRI_v8_global_s : : ~ TRI_v8_global_s ( ) { <nl> + regfree ( & DocumentIdRegex ) ; <nl> + regfree ( & DocumentKeyRegex ) ; <nl> + regfree ( & IndexIdRegex ) ; <nl> + regfree ( & IdRegex ) ; <nl> + } <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - GLOBAL FUNCTIONS <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Globals <nl> - / / / @ { <nl> + / / / @ brief creates a global context <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + TRI_v8_global_t * TRI_CreateV8Globals ( v8 : : Isolate * isolate ) { <nl> + TRI_v8_global_t * v8g = ( TRI_v8_global_t * ) isolate - > GetData ( ) ; <nl> + <nl> + if ( v8g = = 0 ) { <nl> + v8g = new TRI_v8_global_t ( isolate ) ; <nl> + isolate - > SetData ( v8g ) ; <nl> + } <nl> + <nl> + return v8g ; <nl> + } <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief add a method to a prototype object <nl> + / / / @ brief adds a method to a prototype object <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> void TRI_AddProtoMethodVocbase ( v8 : : Handle < v8 : : Template > tpl , <nl> void TRI_AddProtoMethodVocbase ( v8 : : Handle < v8 : : Template > tpl , <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief add a method to an object <nl> + / / / @ brief adds a method to an object <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> void TRI_AddMethodVocbase ( v8 : : Handle < v8 : : ObjectTemplate > tpl , <nl> void TRI_AddMethodVocbase ( v8 : : Handle < v8 : : ObjectTemplate > tpl , <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief add a global function to the given context <nl> + / / / @ brief adds a global function to the given context <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> void TRI_AddGlobalFunctionVocbase ( v8 : : Handle < v8 : : Context > context , <nl> void TRI_AddGlobalFunctionVocbase ( v8 : : Handle < v8 : : Context > context , <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief add a global function to the given context <nl> + / / / @ brief adds a global function to the given context <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> void TRI_AddGlobalFunctionVocbase ( v8 : : Handle < v8 : : Context > context , <nl> void TRI_AddGlobalVariableVocbase ( v8 : : Handle < v8 : : Context > context , <nl> context - > Global ( ) - > Set ( TRI_V8_SYMBOL ( name ) , value , v8 : : ReadOnly ) ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - END - OF - FILE <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / Local Variables : <nl> / / mode : outline - minor <nl> mmm a / lib / V8 / v8 - globals . h <nl> ppp b / lib / V8 / v8 - globals . h <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8VocBase <nl> - / / / @ { <nl> + / / / @ brief create a v8 symbol for the specified string <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + # define TRI_V8_SYMBOL ( name ) \ <nl> + v8 : : String : : NewSymbol ( name , strlen ( name ) ) <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief create a v8 symbol for the specified string <nl> + / / / @ brief create a v8 string for the specified string <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - # define TRI_V8_SYMBOL ( name ) v8 : : String : : NewSymbol ( name , strlen ( name ) ) <nl> + # define TRI_V8_STRING ( name ) \ <nl> + v8 : : String : : New ( name ) <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief shortcut for throwing an exception with an error code <nl> <nl> # define TRI_V8_SYNTAX_ERROR ( scope , message ) \ <nl> return scope . Close ( v8 : : ThrowException ( v8 : : Exception : : SyntaxError ( v8 : : String : : New ( message ) ) ) ) <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public types <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Globals <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief globals stored in the isolate <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> typedef struct TRI_v8_global_s { <nl> / / / @ brief constructor <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - TRI_v8_global_s ( ) <nl> - : JSBarriers ( ) , <nl> - ErrorTempl ( ) , <nl> - GeneralCursorTempl ( ) , <nl> - TransactionTempl ( ) , <nl> - VocbaseColTempl ( ) , <nl> - VocbaseTempl ( ) , <nl> - DictionaryTempl ( ) , <nl> - DidKey ( ) , <nl> - FromKey ( ) , <nl> - IidKey ( ) , <nl> - OldRevKey ( ) , <nl> - RevKey ( ) , <nl> - ToKey ( ) , <nl> - BodyKey ( ) , <nl> - BodyFromFileKey ( ) , <nl> - ContentTypeKey ( ) , <nl> - IsSystemKey ( ) , <nl> - IsVolatileKey ( ) , <nl> - JournalSizeKey ( ) , <nl> - KeyOptionsKey ( ) , <nl> - ParametersKey ( ) , <nl> - PathKey ( ) , <nl> - PrefixKey ( ) , <nl> - ResponseCodeKey ( ) , <nl> - SuffixKey ( ) , <nl> - UrlKey ( ) , <nl> - UserKey ( ) , <nl> - WaitForSyncKey ( ) , <nl> - DocumentIdRegex ( ) , <nl> - DocumentKeyRegex ( ) , <nl> - IndexIdRegex ( ) , <nl> - IdRegex ( ) , <nl> - _currentTransaction ( ) , <nl> - _vocbase ( 0 ) { <nl> - } <nl> - <nl> - ~ TRI_v8_global_s ( ) { <nl> - regfree ( & DocumentIdRegex ) ; <nl> - regfree ( & DocumentKeyRegex ) ; <nl> - regfree ( & IndexIdRegex ) ; <nl> - regfree ( & IdRegex ) ; <nl> - } <nl> + TRI_v8_global_s ( v8 : : Isolate * ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief barrier mapping for weak pointers <nl> + / / / @ brief destructor <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - std : : map < void * , v8 : : Persistent < v8 : : Value > > JSBarriers ; <nl> + ~ TRI_v8_global_s ( ) ; <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - HELPERS <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> + / / / @ brief barrier mapping for weak pointers <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + std : : map < void * , v8 : : Persistent < v8 : : Value > > JSBarriers ; <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - JAVASCRIPT OBJECT TEMPLATES <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Globals <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief error template <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> typedef struct TRI_v8_global_s { <nl> <nl> v8 : : Persistent < v8 : : ObjectTemplate > GeneralCursorTempl ; <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief TRI_shaped_json_t template <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + v8 : : Persistent < v8 : : ObjectTemplate > ShapedJsonTempl ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief transaction template <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> typedef struct TRI_v8_global_s { <nl> <nl> v8 : : Persistent < v8 : : ObjectTemplate > VocbaseTempl ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief TRI_shaped_json_t template <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - v8 : : Persistent < v8 : : ObjectTemplate > ShapedJsonTempl ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief dictionary template <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - v8 : : Persistent < v8 : : ObjectTemplate > DictionaryTempl ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - JAVASCRIPT CONSTANTS <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Globals <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief " DELETE " function name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> typedef struct TRI_v8_global_s { <nl> <nl> v8 : : Persistent < v8 : : String > PutConstant ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - JAVASCRIPT KEY NAMES <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Globals <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief " _id " key name <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - v8 : : Persistent < v8 : : String > DidKey ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief " _key " key name <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - v8 : : Persistent < v8 : : String > KeyKey ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief " _from " key name <nl> + / / / @ brief " bodyFromFile " key name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - v8 : : Persistent < v8 : : String > FromKey ; <nl> + v8 : : Persistent < v8 : : String > BodyFromFileKey ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief " id " key name <nl> + / / / @ brief " body " key name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - v8 : : Persistent < v8 : : String > IidKey ; <nl> + v8 : : Persistent < v8 : : String > BodyKey ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief " _oldRev " key name <nl> + / / / @ brief " code " key name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - v8 : : Persistent < v8 : : String > OldRevKey ; <nl> + v8 : : Persistent < v8 : : String > CodeKey ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief " _rev " key name <nl> + / / / @ brief " contentType " key name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - v8 : : Persistent < v8 : : String > RevKey ; <nl> + v8 : : Persistent < v8 : : String > ContentTypeKey ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief " _to " key name <nl> + / / / @ brief " error " key name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - v8 : : Persistent < v8 : : String > ToKey ; <nl> + v8 : : Persistent < v8 : : String > ErrorKey ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief " body " key name <nl> + / / / @ brief " errorMessage " key name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - v8 : : Persistent < v8 : : String > BodyKey ; <nl> + v8 : : Persistent < v8 : : String > ErrorMessageKey ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief " bodyFromFile " key name <nl> + / / / @ brief " errorNum " key name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - v8 : : Persistent < v8 : : String > BodyFromFileKey ; <nl> + v8 : : Persistent < v8 : : String > ErrorNumKey ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief " contentType " key name <nl> + / / / @ brief " headers " key name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - v8 : : Persistent < v8 : : String > ContentTypeKey ; <nl> + v8 : : Persistent < v8 : : String > HeadersKey ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief " headers " key name <nl> + / / / @ brief " id " key name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - v8 : : Persistent < v8 : : String > HeadersKey ; <nl> + v8 : : Persistent < v8 : : String > IdKey ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief " isSystem " key name <nl> typedef struct TRI_v8_global_s { <nl> v8 : : Persistent < v8 : : String > WaitForSyncKey ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> + / / / @ brief " _from " key name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - REGULAR EXPRESSIONS <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + v8 : : Persistent < v8 : : String > _FromKey ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief " _id " key name <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + v8 : : Persistent < v8 : : String > _IdKey ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief " _key " key name <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + v8 : : Persistent < v8 : : String > _KeyKey ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief " _oldRev " key name <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + v8 : : Persistent < v8 : : String > _OldRevKey ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief " _rev " key name <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + v8 : : Persistent < v8 : : String > _RevKey ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Globals <nl> - / / / @ { <nl> + / / / @ brief " _to " key name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + v8 : : Persistent < v8 : : String > _ToKey ; <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - REGULAR EXPRESSIONS <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief document identifier as collection name / key <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> typedef struct TRI_v8_global_s { <nl> regex_t DocumentKeyRegex ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief index identifier as collection - name : index - id <nl> + / / / @ brief numeric id , used for index ids <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - regex_t IndexIdRegex ; <nl> + regex_t IdRegex ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief numeric id , used for index ids <nl> + / / / @ brief index identifier as collection - name : index - id <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - regex_t IdRegex ; <nl> + regex_t IndexIdRegex ; <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - DATABASE <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief currently running transaction <nl> typedef struct TRI_v8_global_s { <nl> } <nl> TRI_v8_global_t ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - GLOBAL FUNCTIONS <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Globals <nl> - / / / @ { <nl> + / / / @ brief creates a global context <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + TRI_v8_global_t * TRI_CreateV8Globals ( v8 : : Isolate * ) ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief adds a method to a prototype object <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> void TRI_AddGlobalVariableVocbase ( v8 : : Handle < v8 : : Context > context , <nl> const char * const name , <nl> v8 : : Handle < v8 : : Value > value ) ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> # endif <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> mmm a / lib / V8 / v8 - json . cpp <nl> ppp b / lib / V8 / v8 - json . cpp <nl> static v8 : : Handle < v8 : : Value > ParseObject ( yyscan_t scanner , int c ) ; <nl> / / - - SECTION - - private functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup Json <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief parses a list <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> static v8 : : Handle < v8 : : Value > ParseObject ( yyscan_t scanner , int c ) { <nl> return scope . Close ( v8 : : Undefined ( ) ) ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> v8 : : Handle < v8 : : Value > TRI_FromJsonString ( char const * text , char * * error ) { <nl> return scope . Close ( object ) ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - END - OF - FILE <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / Local Variables : <nl> / / mode : C <nl> mmm a / lib / V8 / v8 - json . ll <nl> ppp b / lib / V8 / v8 - json . ll <nl> static v8 : : Handle < v8 : : Value > ParseObject ( yyscan_t scanner , int c ) ; <nl> / / - - SECTION - - private functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup Json <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief parses a list <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> static v8 : : Handle < v8 : : Value > ParseObject ( yyscan_t scanner , int c ) { <nl> return scope . Close ( v8 : : Undefined ( ) ) ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> v8 : : Handle < v8 : : Value > TRI_FromJsonString ( char const * text , char * * error ) { <nl> return scope . Close ( object ) ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - END - OF - FILE <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / Local Variables : <nl> / / mode : C <nl> mmm a / lib / V8 / v8 - shell . cpp <nl> ppp b / lib / V8 / v8 - shell . cpp <nl> using namespace std ; <nl> / / - - SECTION - - private functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Shell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief begins a new CSV line <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> static void ProcessCsvBegin ( TRI_csv_parser_t * parser , size_t row ) { <nl> / / / @ brief adds a new CSV field <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - static void ProcessCsvAdd ( TRI_csv_parser_t * parser , char const * field , size_t row , size_t column , bool escaped ) { <nl> + static void ProcessCsvAdd ( TRI_csv_parser_t * parser , const char * field , size_t row , size_t column , bool escaped ) { <nl> v8 : : Handle < v8 : : Array > * array = reinterpret_cast < v8 : : Handle < v8 : : Array > * > ( parser - > _dataBegin ) ; <nl> <nl> ( * array ) - > Set ( column , v8 : : String : : New ( field ) ) ; <nl> static void ProcessCsvAdd ( TRI_csv_parser_t * parser , char const * field , size_t r <nl> / / / @ brief ends a CSV line <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - static void ProcessCsvEnd ( TRI_csv_parser_t * parser , char const * field , size_t row , size_t column , bool escaped ) { <nl> + static void ProcessCsvEnd ( TRI_csv_parser_t * parser , const char * field , size_t row , size_t column , bool escaped ) { <nl> v8 : : Handle < v8 : : Array > * array = reinterpret_cast < v8 : : Handle < v8 : : Array > * > ( parser - > _dataBegin ) ; <nl> <nl> ( * array ) - > Set ( column , v8 : : String : : New ( field ) ) ; <nl> static void ProcessCsvEnd ( TRI_csv_parser_t * parser , char const * field , size_t r <nl> ( * cb ) - > Call ( * cb , 2 , args ) ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - JS functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Shell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief processes a CSV file <nl> / / / <nl> static v8 : : Handle < v8 : : Value > JS_ProcessJsonFile ( v8 : : Arguments const & argv ) { <nl> <nl> getline ( file , line ) ; <nl> <nl> - char const * ptr = line . c_str ( ) ; <nl> - char const * end = ptr + line . length ( ) ; <nl> + const char * ptr = line . c_str ( ) ; <nl> + const char * end = ptr + line . length ( ) ; <nl> <nl> while ( ptr < end & & ( * ptr = = ' ' | | * ptr = = ' \ t ' | | * ptr = = ' \ r ' ) ) { <nl> + + ptr ; <nl> static v8 : : Handle < v8 : : Value > JS_ProcessJsonFile ( v8 : : Arguments const & argv ) { <nl> return scope . Close ( v8 : : Undefined ( ) ) ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - GENERAL <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> static v8 : : Handle < v8 : : Value > JS_ProcessJsonFile ( v8 : : Arguments const & argv ) { <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Shell <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief stores the V8 shell functions inside the global variable <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> static v8 : : Handle < v8 : : Value > JS_ProcessJsonFile ( v8 : : Arguments const & argv ) { <nl> void TRI_InitV8Shell ( v8 : : Handle < v8 : : Context > context ) { <nl> v8 : : HandleScope scope ; <nl> <nl> + v8 : : Isolate * isolate = v8 : : Isolate : : GetCurrent ( ) ; <nl> + <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / create the global functions <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> void TRI_InitV8Shell ( v8 : : Handle < v8 : : Context > context ) { <nl> TRI_AddGlobalFunctionVocbase ( context , " SYS_PROCESS_CSV_FILE " , JS_ProcessCsvFile ) ; <nl> TRI_AddGlobalFunctionVocbase ( context , " SYS_PROCESS_JSON_FILE " , JS_ProcessJsonFile ) ; <nl> <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / create the global variables <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> v8 : : Handle < v8 : : Object > colors = v8 : : Object : : New ( ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_RED " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_RED ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_RED " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_RED ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_BOLD_RED " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_BOLD_RED ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_BOLD_RED " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_BOLD_RED ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_GREEN " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_GREEN ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_GREEN " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_GREEN ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_BOLD_GREEN " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_BOLD_GREEN ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_BOLD_GREEN " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_BOLD_GREEN ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_BLUE " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_BLUE ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_BLUE " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_BLUE ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_BOLD_BLUE " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_BOLD_BLUE ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_BOLD_BLUE " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_BOLD_BLUE ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_YELLOW " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_YELLOW ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_YELLOW " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_YELLOW ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_BOLD_YELLOW " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_BOLD_YELLOW ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_BOLD_YELLOW " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_BOLD_YELLOW ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_WHITE " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_WHITE ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_WHITE " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_WHITE ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_BOLD_WHITE " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_BOLD_WHITE ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_BOLD_WHITE " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_BOLD_WHITE ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_CYAN " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_CYAN ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_CYAN " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_CYAN ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_BOLD_CYAN " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_BOLD_CYAN ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_BOLD_CYAN " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_BOLD_CYAN ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_MAGENTA " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_MAGENTA ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_MAGENTA " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_MAGENTA ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_BOLD_MAGENTA " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_BOLD_MAGENTA ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_BOLD_MAGENTA " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_BOLD_MAGENTA ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_BLACK " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_BLACK ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_BLACK " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_BLACK ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_BOLD_BLACK " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_BOLD_BLACK ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_BOLD_BLACK " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_BOLD_BLACK ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_BLINK " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_BLINK ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_BLINK " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_BLINK ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_BRIGHT " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_BRIGHT ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_BRIGHT " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_BRIGHT ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - colors - > Set ( v8 : : String : : New ( " COLOR_RESET " ) , <nl> - v8 : : String : : New ( TRI_SHELL_COLOR_RESET ) , <nl> + colors - > Set ( TRI_V8_STRING ( " COLOR_RESET " ) , <nl> + TRI_V8_STRING ( TRI_SHELL_COLOR_RESET ) , <nl> v8 : : ReadOnly ) ; <nl> <nl> - context - > Global ( ) - > Set ( v8 : : String : : New ( " COLORS " ) , colors , v8 : : ReadOnly ) ; <nl> + TRI_AddGlobalVariableVocbase ( context , " COLORS " , colors ) ; <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - END - OF - FILE <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / Local Variables : <nl> / / mode : outline - minor <nl> mmm a / lib / V8 / v8 - utils . cpp <nl> ppp b / lib / V8 / v8 - utils . cpp <nl> static v8 : : Handle < v8 : : Value > JS_RequestStatistics ( v8 : : Arguments const & argv ) { <nl> return scope . Close ( result ) ; <nl> } <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief ArangoError <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + static v8 : : Handle < v8 : : Value > JS_ArangoError ( const v8 : : Arguments & args ) { <nl> + v8 : : HandleScope scope ; <nl> + <nl> + TRI_v8_global_t * v8g = ( TRI_v8_global_t * ) v8 : : Isolate : : GetCurrent ( ) - > GetData ( ) ; <nl> + <nl> + v8 : : Handle < v8 : : Object > self = args . Holder ( ) - > ToObject ( ) ; <nl> + <nl> + self - > Set ( v8g - > ErrorKey , v8 : : True ( ) ) ; <nl> + self - > Set ( v8g - > ErrorNumKey , v8 : : Integer : : New ( TRI_ERROR_FAILED ) ) ; <nl> + <nl> + if ( 0 < args . Length ( ) & & args [ 0 ] - > IsObject ( ) ) { <nl> + v8 : : Handle < v8 : : Object > data = args [ 0 ] - > ToObject ( ) ; <nl> + <nl> + if ( data - > Has ( v8g - > ErrorKey ) ) { <nl> + self - > Set ( v8g - > ErrorKey , data - > Get ( v8g - > ErrorKey ) ) ; <nl> + } <nl> + <nl> + if ( data - > Has ( v8g - > CodeKey ) ) { <nl> + self - > Set ( v8g - > CodeKey , data - > Get ( v8g - > CodeKey ) ) ; <nl> + } <nl> + <nl> + if ( data - > Has ( v8g - > ErrorNumKey ) ) { <nl> + self - > Set ( v8g - > ErrorNumKey , data - > Get ( v8g - > ErrorNumKey ) ) ; <nl> + } <nl> + <nl> + if ( data - > Has ( v8g - > ErrorMessageKey ) ) { <nl> + self - > Set ( v8g - > ErrorMessageKey , data - > Get ( v8g - > ErrorMessageKey ) ) ; <nl> + } <nl> + } <nl> + <nl> + return scope . Close ( self ) ; <nl> + } <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> void TRI_InitV8Utils ( v8 : : Handle < v8 : : Context > context , <nl> string const & tempPath ) { <nl> v8 : : HandleScope scope ; <nl> <nl> - v8 : : Handle < v8 : : FunctionTemplate > ft ; <nl> - v8 : : Handle < v8 : : ObjectTemplate > rt ; <nl> - <nl> / / check the isolate <nl> v8 : : Isolate * isolate = v8 : : Isolate : : GetCurrent ( ) ; <nl> - TRI_v8_global_t * v8g = ( TRI_v8_global_t * ) isolate - > GetData ( ) ; <nl> + TRI_v8_global_t * v8g = TRI_CreateV8Globals ( isolate ) ; <nl> <nl> - if ( v8g = = 0 ) { <nl> - / / this check is necessary because when building arangosh , we do not include v8 - vocbase and <nl> - / / this init function is the first one we call <nl> - v8g = new TRI_v8_global_t ; <nl> - isolate - > SetData ( v8g ) ; <nl> - } <nl> + v8 : : Handle < v8 : : FunctionTemplate > ft ; <nl> + v8 : : Handle < v8 : : ObjectTemplate > rt ; <nl> <nl> TempPath = tempPath ; <nl> <nl> void TRI_InitV8Utils ( v8 : : Handle < v8 : : Context > context , <nl> <nl> ft = v8 : : FunctionTemplate : : New ( ) ; <nl> ft - > SetClassName ( TRI_V8_SYMBOL ( " ArangoError " ) ) ; <nl> + ft - > SetCallHandler ( JS_ArangoError ) ; <nl> + <nl> + / / ArangoError is a " sub - class " of Error <nl> + v8 : : Handle < v8 : : Function > ArangoErrorFunc = ft - > GetFunction ( ) ; <nl> + v8 : : Handle < v8 : : Value > ErrorObject = context - > Global ( ) - > Get ( TRI_V8_STRING ( " Error " ) ) ; <nl> + v8 : : Handle < v8 : : Value > ErrorPrototype = ErrorObject - > ToObject ( ) - > Get ( TRI_V8_STRING ( " prototype " ) ) ; <nl> + <nl> + ArangoErrorFunc - > Get ( TRI_V8_SYMBOL ( " prototype " ) ) - > ToObject ( ) - > SetPrototype ( ErrorPrototype ) ; <nl> + <nl> + TRI_AddGlobalFunctionVocbase ( context , " ArangoError " , ArangoErrorFunc ) ; <nl> <nl> rt = ft - > InstanceTemplate ( ) ; <nl> - v8g - > ErrorTempl = v8 : : Persistent < v8 : : ObjectTemplate > : : New ( rt ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " ArangoError " , ft - > GetFunction ( ) ) ; <nl> + v8g - > ErrorTempl = v8 : : Persistent < v8 : : ObjectTemplate > : : New ( isolate , rt ) ; <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / create the global functions <nl> mmm a / lib / V8 / v8 - utils . h <nl> ppp b / lib / V8 / v8 - utils . h <nl> <nl> / / - - SECTION - - public classes <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Utils <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Converts an object to a UTF - 8 - encoded and normalized character array . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> class TRI_Utf8ValueNFC { <nl> void operator = ( const TRI_Utf8ValueNFC & ) ; <nl> } ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public constants <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Utils <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief temporary path <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> static int const SLOT_CLASS_TYPE = 0 ; <nl> <nl> static int const SLOT_CLASS = 1 ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - public functions <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup V8Utils <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief unwraps a C + + class given a v8 : : Object <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> void TRI_InitV8Utils ( v8 : : Handle < v8 : : Context > , <nl> std : : string const & nodes , <nl> std : : string const & tempPath ) ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> # endif <nl> <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - END - OF - FILE <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> / / Local Variables : <nl> / / mode : outline - minor <nl> / / outline - regexp : " / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / / @ page \ \ | / / - - SECTION - - \ \ | / / / @ \ \ } " <nl> mmm a / utils / cleanupCFiles <nl> ppp b / utils / cleanupCFiles <nl> sub formatFile ( $ $ $ ) { <nl> if ( $ line = ~ / ^ \ / \ / \ / \ @ addtogroup / ) { <nl> $ lastLine = undef ; <nl> $ skipGroup = 1 ; <nl> - print STDERR " skip group 1 \ n " ; <nl> next ; <nl> } <nl> <nl> if ( $ line = ~ / ^ \ / \ / \ / @ } / ) { <nl> $ lastLine = undef ; <nl> $ skipGroup = 1 ; <nl> - print STDERR " skip group 1a \ n " ; <nl> next ; <nl> } <nl> <nl> if ( $ skipGroup = = 1 ) { <nl> if ( $ line = ~ / ^ \ / \ / \ / \ / \ / \ / / ) { <nl> $ skipGroup = 2 ; <nl> - print STDERR " skip group 2 \ n " ; <nl> - print STDERR " $ line \ n " ; <nl> } <nl> <nl> next ; <nl> sub formatFile ( $ $ $ ) { <nl> if ( $ skipGroup = = 2 ) { <nl> if ( $ line ! ~ / ^ [ \ t ] * $ / ) { <nl> $ skipGroup = 0 ; <nl> - print STDERR " skip group 0 \ n " ; <nl> - print STDERR " $ line \ n " ; <nl> } <nl> else { <nl> next ; <nl>
fixed isolates , made ArangoError internal
arangodb/arangodb
828b9390538b9dfceaa90b7d06fc0fede64b707d
2013-04-10T19:22:09Z
mmm a / include / rocksdb / utilities / lua / rocks_lua_compaction_filter . h <nl> ppp b / include / rocksdb / utilities / lua / rocks_lua_compaction_filter . h <nl> class RocksLuaCompactionFilter : public rocksdb : : CompactionFilter { <nl> explicit RocksLuaCompactionFilter ( const RocksLuaCompactionFilterOptions & opt ) <nl> : options_ ( opt ) , <nl> lua_state_wrapper_ ( opt . lua_script , opt . libraries ) , <nl> - error_count_ ( 0 ) { } <nl> + error_count_ ( 0 ) , <nl> + name_ ( " " ) { } <nl> <nl> virtual bool Filter ( int level , const Slice & key , const Slice & existing_value , <nl> std : : string * new_value , <nl> class RocksLuaCompactionFilter : public rocksdb : : CompactionFilter { <nl> RocksLuaCompactionFilterOptions options_ ; <nl> LuaStateWrapper lua_state_wrapper_ ; <nl> mutable int error_count_ ; <nl> + mutable std : : string name_ ; <nl> } ; <nl> <nl> } / / namespace lua <nl> mmm a / utilities / lua / rocks_lua_compaction_filter . cc <nl> ppp b / utilities / lua / rocks_lua_compaction_filter . cc <nl> bool RocksLuaCompactionFilter : : Filter ( int level , const Slice & key , <nl> } <nl> <nl> const char * RocksLuaCompactionFilter : : Name ( ) const { <nl> - std : : string name = " " ; <nl> + if ( name_ ! = " " ) { <nl> + return name_ . c_str ( ) ; <nl> + } <nl> auto * lua_state = lua_state_wrapper_ . GetLuaState ( ) ; <nl> / / push the right function into the lua stack <nl> lua_getglobal ( lua_state , kNameFunctionName . c_str ( ) ) ; <nl> const char * RocksLuaCompactionFilter : : Name ( ) const { <nl> lua_tostring ( lua_state , - 1 ) ) ; <nl> / / pops out the lua error from stack <nl> lua_pop ( lua_state , 1 ) ; <nl> - return name . c_str ( ) ; <nl> + return name_ . c_str ( ) ; <nl> } <nl> <nl> / / check the return value <nl> const char * RocksLuaCompactionFilter : : Name ( ) const { <nl> const size_t name_size = lua_strlen ( lua_state , - 1 ) ; <nl> assert ( name_buf [ name_size ] = = ' \ 0 ' ) ; <nl> assert ( strlen ( name_buf ) < = name_size ) ; <nl> - name = name_buf ; <nl> + name_ = name_buf ; <nl> } <nl> lua_pop ( lua_state , 1 ) ; <nl> - return name . c_str ( ) ; <nl> + return name_ . c_str ( ) ; <nl> } <nl> <nl> / * Not yet supported <nl>
Fix heap use after free ASAN / Valgrind
facebook/rocksdb
f39452e81fe77649b8fd1f0898d42418ac267bdd
2016-11-17T20:24:12Z
mmm a / dbms / programs / server / CMakeLists . txt <nl> ppp b / dbms / programs / server / CMakeLists . txt <nl> <nl> set ( CLICKHOUSE_SERVER_SOURCES <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / HTTPHandler . cpp <nl> + $ { CMAKE_CURRENT_SOURCE_DIR } / HTTPHandlerFactory . cpp <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / InterserverIOHTTPHandler . cpp <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / MetricsTransmitter . cpp <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / NotFoundHandler . cpp <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / PingRequestHandler . cpp <nl> + $ { CMAKE_CURRENT_SOURCE_DIR } / PrometheusMetricsWriter . cpp <nl> + $ { CMAKE_CURRENT_SOURCE_DIR } / PrometheusRequestHandler . cpp <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / ReplicasStatusHandler . cpp <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / RootRequestHandler . cpp <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / Server . cpp <nl> new file mode 100644 <nl> index 00000000000 . . ab8fb4f7336 <nl> mmm / dev / null <nl> ppp b / dbms / programs / server / HTTPHandlerFactory . cpp <nl> <nl> + # include " HTTPHandlerFactory . h " <nl> + <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + HTTPRequestHandlerFactoryMain : : HTTPRequestHandlerFactoryMain ( IServer & server_ , const std : : string & name_ ) <nl> + : server ( server_ ) , log ( & Logger : : get ( name_ ) ) , name ( name_ ) <nl> + { <nl> + } <nl> + <nl> + Poco : : Net : : HTTPRequestHandler * HTTPRequestHandlerFactoryMain : : createRequestHandler ( <nl> + const Poco : : Net : : HTTPServerRequest & request ) / / override <nl> + { <nl> + LOG_TRACE ( log , " HTTP Request for " < < name < < " . " <nl> + < < " Method : " <nl> + < < request . getMethod ( ) <nl> + < < " , Address : " <nl> + < < request . clientAddress ( ) . toString ( ) <nl> + < < " , User - Agent : " <nl> + < < ( request . has ( " User - Agent " ) ? request . get ( " User - Agent " ) : " none " ) <nl> + < < ( request . hasContentLength ( ) ? ( " , Length : " + std : : to_string ( request . getContentLength ( ) ) ) : ( " " ) ) <nl> + < < " , Content Type : " < < request . getContentType ( ) <nl> + < < " , Transfer Encoding : " < < request . getTransferEncoding ( ) ) ; <nl> + <nl> + for ( auto & handlerFactory : child_handler_factories ) <nl> + { <nl> + auto handler = handlerFactory - > createRequestHandler ( request ) ; <nl> + if ( handler ! = nullptr ) <nl> + return handler ; <nl> + } <nl> + <nl> + if ( request . getMethod ( ) = = Poco : : Net : : HTTPRequest : : HTTP_GET <nl> + | | request . getMethod ( ) = = Poco : : Net : : HTTPRequest : : HTTP_HEAD <nl> + | | request . getMethod ( ) = = Poco : : Net : : HTTPRequest : : HTTP_POST ) <nl> + { <nl> + return new NotFoundHandler ; <nl> + } <nl> + <nl> + return nullptr ; <nl> + } <nl> + <nl> + } <nl> mmm a / dbms / programs / server / HTTPHandlerFactory . h <nl> ppp b / dbms / programs / server / HTTPHandlerFactory . h <nl> <nl> # include " InterserverIOHTTPHandler . h " <nl> # include " NotFoundHandler . h " <nl> # include " PingRequestHandler . h " <nl> + # include " PrometheusRequestHandler . h " <nl> # include " ReplicasStatusHandler . h " <nl> # include " RootRequestHandler . h " <nl> <nl> <nl> namespace DB <nl> { <nl> <nl> - template < typename HandlerType > <nl> - class HTTPRequestHandlerFactory : public Poco : : Net : : HTTPRequestHandlerFactory <nl> + / / / Handle request using child handlers <nl> + class HTTPRequestHandlerFactoryMain : public Poco : : Net : : HTTPRequestHandlerFactory <nl> { <nl> private : <nl> + using TThis = HTTPRequestHandlerFactoryMain ; <nl> + <nl> IServer & server ; <nl> Logger * log ; <nl> std : : string name ; <nl> <nl> + std : : vector < std : : unique_ptr < Poco : : Net : : HTTPRequestHandlerFactory > > child_handler_factories ; <nl> + <nl> public : <nl> - HTTPRequestHandlerFactory ( IServer & server_ , const std : : string & name_ ) : server ( server_ ) , log ( & Logger : : get ( name_ ) ) , name ( name_ ) <nl> + HTTPRequestHandlerFactoryMain ( IServer & server_ , const std : : string & name_ ) ; <nl> + <nl> + Poco : : Net : : HTTPRequestHandler * createRequestHandler ( const Poco : : Net : : HTTPServerRequest & request ) override ; <nl> + <nl> + template < typename T , typename . . . TArgs > <nl> + TThis * addHandler ( TArgs & & . . . args ) <nl> { <nl> + child_handler_factories . emplace_back ( std : : make_unique < T > ( server , std : : forward < TArgs > ( args ) . . . ) ) ; <nl> + return this ; <nl> } <nl> + } ; <nl> + <nl> + <nl> + / / / Handle POST or GET with params <nl> + template < typename HandleType > <nl> + class HTTPQueryRequestHandlerFactory : public Poco : : Net : : HTTPRequestHandlerFactory <nl> + { <nl> + private : <nl> + IServer & server ; <nl> + <nl> + public : <nl> + HTTPQueryRequestHandlerFactory ( IServer & server_ ) : server ( server_ ) { } <nl> <nl> Poco : : Net : : HTTPRequestHandler * createRequestHandler ( const Poco : : Net : : HTTPServerRequest & request ) override <nl> { <nl> - LOG_TRACE ( log , " HTTP Request for " < < name < < " . " <nl> - < < " Method : " <nl> - < < request . getMethod ( ) <nl> - < < " , Address : " <nl> - < < request . clientAddress ( ) . toString ( ) <nl> - < < " , User - Agent : " <nl> - < < ( request . has ( " User - Agent " ) ? request . get ( " User - Agent " ) : " none " ) <nl> - < < ( request . hasContentLength ( ) ? ( " , Length : " + std : : to_string ( request . getContentLength ( ) ) ) : ( " " ) ) <nl> - < < " , Content Type : " < < request . getContentType ( ) <nl> - < < " , Transfer Encoding : " < < request . getTransferEncoding ( ) ) ; <nl> - <nl> - const auto & uri = request . getURI ( ) ; <nl> - <nl> - if ( request . getMethod ( ) = = Poco : : Net : : HTTPRequest : : HTTP_GET | | request . getMethod ( ) = = Poco : : Net : : HTTPRequest : : HTTP_HEAD ) <nl> - { <nl> - if ( uri = = " / " ) <nl> - return new RootRequestHandler ( server ) ; <nl> - if ( uri = = " / ping " ) <nl> - return new PingRequestHandler ( server ) ; <nl> - else if ( startsWith ( uri , " / replicas_status " ) ) <nl> - return new ReplicasStatusHandler ( server . context ( ) ) ; <nl> - } <nl> - <nl> - if ( uri . find ( ' ? ' ) ! = std : : string : : npos | | request . getMethod ( ) = = Poco : : Net : : HTTPRequest : : HTTP_POST ) <nl> - { <nl> - return new HandlerType ( server ) ; <nl> - } <nl> - <nl> - if ( request . getMethod ( ) = = Poco : : Net : : HTTPRequest : : HTTP_GET | | request . getMethod ( ) = = Poco : : Net : : HTTPRequest : : HTTP_HEAD <nl> - | | request . getMethod ( ) = = Poco : : Net : : HTTPRequest : : HTTP_POST ) <nl> - { <nl> - return new NotFoundHandler ; <nl> - } <nl> + if ( request . getURI ( ) . find ( ' ? ' ) ! = std : : string : : npos | | request . getMethod ( ) = = Poco : : Net : : HTTPRequest : : HTTP_POST ) <nl> + return new HandleType ( server ) ; <nl> + return nullptr ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> + / / / Handle GET or HEAD endpoint on specified path <nl> + template < typename TGetEndpoint > <nl> + class HTTPGetRequestHandlerFactory : public Poco : : Net : : HTTPRequestHandlerFactory <nl> + { <nl> + private : <nl> + IServer & server ; <nl> + public : <nl> + HTTPGetRequestHandlerFactory ( IServer & server_ ) : server ( server_ ) { } <nl> + <nl> + Poco : : Net : : HTTPRequestHandler * createRequestHandler ( const Poco : : Net : : HTTPServerRequest & request ) override <nl> + { <nl> + auto & method = request . getMethod ( ) ; <nl> + if ( ! ( method = = Poco : : Net : : HTTPRequest : : HTTP_GET | | method = = Poco : : Net : : HTTPRequest : : HTTP_HEAD ) ) <nl> + return nullptr ; <nl> + <nl> + auto & uri = request . getURI ( ) ; <nl> + bool uri_match = TGetEndpoint : : strict_path ? uri = = TGetEndpoint : : path : startsWith ( uri , TGetEndpoint : : path ) ; <nl> + if ( uri_match ) <nl> + return new typename TGetEndpoint : : HandleType ( server ) ; <nl> <nl> return nullptr ; <nl> } <nl> } ; <nl> <nl> - using HTTPHandlerFactory = HTTPRequestHandlerFactory < HTTPHandler > ; <nl> - using InterserverIOHTTPHandlerFactory = HTTPRequestHandlerFactory < InterserverIOHTTPHandler > ; <nl> + <nl> + struct RootEndpoint <nl> + { <nl> + static constexpr auto path = " / " ; <nl> + static constexpr auto strict_path = true ; <nl> + using HandleType = RootRequestHandler ; <nl> + } ; <nl> + <nl> + struct PingEndpoint <nl> + { <nl> + static constexpr auto path = " / ping " ; <nl> + static constexpr auto strict_path = true ; <nl> + using HandleType = PingRequestHandler ; <nl> + } ; <nl> + <nl> + struct ReplicasStatusEndpoint <nl> + { <nl> + static constexpr auto path = " / replicas_status " ; <nl> + static constexpr auto strict_path = false ; <nl> + using HandleType = ReplicasStatusHandler ; <nl> + } ; <nl> + <nl> + using HTTPRootRequestHandlerFactory = HTTPGetRequestHandlerFactory < RootEndpoint > ; <nl> + using HTTPPingRequestHandlerFactory = HTTPGetRequestHandlerFactory < PingEndpoint > ; <nl> + using HTTPReplicasStatusRequestHandlerFactory = HTTPGetRequestHandlerFactory < ReplicasStatusEndpoint > ; <nl> + <nl> + template < typename HandleType > <nl> + HTTPRequestHandlerFactoryMain * createDefaultHandlerFatory ( IServer & server , const std : : string & name ) <nl> + { <nl> + auto handlerFactory = new HTTPRequestHandlerFactoryMain ( server , name ) ; <nl> + handlerFactory - > addHandler < HTTPRootRequestHandlerFactory > ( ) <nl> + - > addHandler < HTTPPingRequestHandlerFactory > ( ) <nl> + - > addHandler < HTTPReplicasStatusRequestHandlerFactory > ( ) <nl> + - > addHandler < HTTPQueryRequestHandlerFactory < HandleType > > ( ) ; <nl> + return handlerFactory ; <nl> + } <nl> + <nl> <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . b45e66f7056 <nl> mmm / dev / null <nl> ppp b / dbms / programs / server / PrometheusMetricsWriter . cpp <nl> <nl> + # include " PrometheusMetricsWriter . h " <nl> + <nl> + # include < IO / WriteHelpers . h > <nl> + <nl> + namespace <nl> + { <nl> + <nl> + template < typename T > <nl> + void writeOutLine ( DB : : WriteBuffer & wb , T & & val ) <nl> + { <nl> + DB : : writeText ( std : : forward < T > ( val ) , wb ) ; <nl> + DB : : writeChar ( ' \ n ' , wb ) ; <nl> + } <nl> + <nl> + template < typename T , typename . . . TArgs > <nl> + void writeOutLine ( DB : : WriteBuffer & wb , T & & val , TArgs & & . . . args ) <nl> + { <nl> + DB : : writeText ( std : : forward < T > ( val ) , wb ) ; <nl> + DB : : writeChar ( ' ' , wb ) ; <nl> + writeOutLine ( wb , std : : forward < TArgs > ( args ) . . . ) ; <nl> + } <nl> + <nl> + } <nl> + <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + PrometheusMetricsWriter : : PrometheusMetricsWriter ( <nl> + const Poco : : Util : : AbstractConfiguration & config , const std : : string & config_name , <nl> + const AsynchronousMetrics & async_metrics_ ) <nl> + : async_metrics ( async_metrics_ ) <nl> + , send_events ( config . getBool ( config_name + " . events " , true ) ) <nl> + , send_metrics ( config . getBool ( config_name + " . metrics " , true ) ) <nl> + , send_asynchronous_metrics ( config . getBool ( config_name + " . asynchronous_metrics " , true ) ) <nl> + { <nl> + } <nl> + <nl> + void PrometheusMetricsWriter : : write ( WriteBuffer & wb ) const <nl> + { <nl> + if ( send_events ) <nl> + { <nl> + for ( size_t i = 0 , end = ProfileEvents : : end ( ) ; i < end ; + + i ) <nl> + { <nl> + const auto counter = ProfileEvents : : global_counters [ i ] . load ( std : : memory_order_relaxed ) ; <nl> + <nl> + std : : string metric_name { ProfileEvents : : getName ( static_cast < ProfileEvents : : Event > ( i ) ) } ; <nl> + std : : string metric_doc { ProfileEvents : : getDocumentation ( static_cast < ProfileEvents : : Event > ( i ) ) } ; <nl> + <nl> + std : : string key { profile_events_prefix + metric_name } ; <nl> + <nl> + writeOutLine ( wb , " # HELP " , key , metric_doc ) ; <nl> + writeOutLine ( wb , " # TYPE " , key , " counter " ) ; <nl> + writeOutLine ( wb , key , counter ) ; <nl> + } <nl> + } <nl> + <nl> + if ( send_metrics ) <nl> + { <nl> + for ( size_t i = 0 , end = CurrentMetrics : : end ( ) ; i < end ; + + i ) <nl> + { <nl> + const auto value = CurrentMetrics : : values [ i ] . load ( std : : memory_order_relaxed ) ; <nl> + <nl> + std : : string metric_name { CurrentMetrics : : getName ( static_cast < CurrentMetrics : : Metric > ( i ) ) } ; <nl> + std : : string metric_doc { CurrentMetrics : : getDocumentation ( static_cast < CurrentMetrics : : Metric > ( i ) ) } ; <nl> + <nl> + std : : string key { current_metrics_prefix + metric_name } ; <nl> + <nl> + writeOutLine ( wb , " # HELP " , key , metric_doc ) ; <nl> + writeOutLine ( wb , " # TYPE " , key , " gauge " ) ; <nl> + writeOutLine ( wb , key , value ) ; <nl> + } <nl> + } <nl> + <nl> + if ( send_asynchronous_metrics ) <nl> + { <nl> + auto async_metrics_values = async_metrics . getValues ( ) ; <nl> + for ( const auto & name_value : async_metrics_values ) <nl> + { <nl> + std : : string key { asynchronous_metrics_prefix + name_value . first } ; <nl> + auto value = name_value . second ; <nl> + <nl> + / / TODO : add HELP section ? asynchronous_metrics contains only key and value <nl> + writeOutLine ( wb , " # TYPE " , key , " gauge " ) ; <nl> + writeOutLine ( wb , key , value ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 1b253b2b954 <nl> mmm / dev / null <nl> ppp b / dbms / programs / server / PrometheusMetricsWriter . h <nl> <nl> + # pragma once <nl> + <nl> + # include < string > <nl> + <nl> + # include < Interpreters / AsynchronousMetrics . h > <nl> + <nl> + # include < IO / WriteBuffer . h > <nl> + <nl> + # include < Poco / Util / AbstractConfiguration . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + / / / Write metrics in Prometheus format <nl> + class PrometheusMetricsWriter <nl> + { <nl> + public : <nl> + PrometheusMetricsWriter ( <nl> + const Poco : : Util : : AbstractConfiguration & config , const std : : string & config_name , <nl> + const AsynchronousMetrics & async_metrics_ ) ; <nl> + <nl> + void write ( WriteBuffer & wb ) const ; <nl> + <nl> + private : <nl> + const AsynchronousMetrics & async_metrics ; <nl> + <nl> + const bool send_events ; <nl> + const bool send_metrics ; <nl> + const bool send_asynchronous_metrics ; <nl> + <nl> + static inline constexpr auto profile_events_prefix = " ClickHouseProfileEvents " ; <nl> + static inline constexpr auto current_metrics_prefix = " ClickHouseMetrics " ; <nl> + static inline constexpr auto asynchronous_metrics_prefix = " ClickHouseAsyncMetrics " ; <nl> + } ; <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . b5a48d13b64 <nl> mmm / dev / null <nl> ppp b / dbms / programs / server / PrometheusRequestHandler . cpp <nl> <nl> + # include " PrometheusRequestHandler . h " <nl> + <nl> + # include < IO / HTTPCommon . h > <nl> + <nl> + # include < Common / Exception . h > <nl> + <nl> + # include < Poco / Net / HTTPServerRequest . h > <nl> + # include < Poco / Net / HTTPServerResponse . h > <nl> + <nl> + # include < Common / ProfileEvents . h > <nl> + # include < Common / CurrentMetrics . h > <nl> + <nl> + # include < IO / WriteBufferFromHTTPServerResponse . h > <nl> + <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + void PrometheusRequestHandler : : handleRequest ( <nl> + Poco : : Net : : HTTPServerRequest & request , <nl> + Poco : : Net : : HTTPServerResponse & response ) <nl> + { <nl> + try <nl> + { <nl> + const auto & config = server . config ( ) ; <nl> + unsigned keep_alive_timeout = config . getUInt ( " keep_alive_timeout " , 10 ) ; <nl> + <nl> + setResponseDefaultHeaders ( response , keep_alive_timeout ) ; <nl> + <nl> + response . setContentType ( " text / plain ; version = 0 . 0 . 4 ; charset = UTF - 8 " ) ; <nl> + <nl> + auto wb = WriteBufferFromHTTPServerResponse ( request , response , keep_alive_timeout ) ; <nl> + metrics_writer . write ( wb ) ; <nl> + wb . finalize ( ) ; <nl> + } <nl> + catch ( . . . ) <nl> + { <nl> + tryLogCurrentException ( " PrometheusRequestHandler " ) ; <nl> + } <nl> + } <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 439a01c7d6f <nl> mmm / dev / null <nl> ppp b / dbms / programs / server / PrometheusRequestHandler . h <nl> <nl> + # pragma once <nl> + <nl> + # include " IServer . h " <nl> + # include " PrometheusMetricsWriter . h " <nl> + <nl> + # include < Poco / Net / HTTPServerRequest . h > <nl> + # include < Poco / Net / HTTPServerResponse . h > <nl> + # include < Poco / Net / HTTPRequestHandler . h > <nl> + # include < Poco / Net / HTTPRequestHandlerFactory . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + class PrometheusRequestHandler : public Poco : : Net : : HTTPRequestHandler <nl> + { <nl> + private : <nl> + IServer & server ; <nl> + const PrometheusMetricsWriter & metrics_writer ; <nl> + <nl> + public : <nl> + explicit PrometheusRequestHandler ( IServer & server_ , PrometheusMetricsWriter & metrics_writer_ ) <nl> + : server ( server_ ) <nl> + , metrics_writer ( metrics_writer_ ) <nl> + { <nl> + } <nl> + <nl> + void handleRequest ( <nl> + Poco : : Net : : HTTPServerRequest & request , <nl> + Poco : : Net : : HTTPServerResponse & response ) override ; <nl> + } ; <nl> + <nl> + <nl> + template < typename HandlerType > <nl> + class PrometeusRequestHandlerFactory : public Poco : : Net : : HTTPRequestHandlerFactory <nl> + { <nl> + private : <nl> + IServer & server ; <nl> + std : : string endpoint_path ; <nl> + PrometheusMetricsWriter metrics_writer ; <nl> + <nl> + public : <nl> + PrometeusRequestHandlerFactory ( IServer & server_ , const AsynchronousMetrics & async_metrics_ ) <nl> + : server ( server_ ) <nl> + , endpoint_path ( server_ . config ( ) . getString ( " prometheus . endpoint " , " / metrics " ) ) <nl> + , metrics_writer ( server_ . config ( ) , " prometheus " , async_metrics_ ) <nl> + { <nl> + } <nl> + <nl> + Poco : : Net : : HTTPRequestHandler * createRequestHandler ( const Poco : : Net : : HTTPServerRequest & request ) override <nl> + { <nl> + if ( request . getMethod ( ) = = Poco : : Net : : HTTPRequest : : HTTP_GET <nl> + & & request . getURI ( ) = = endpoint_path ) <nl> + return new HandlerType ( server , metrics_writer ) ; <nl> + <nl> + return nullptr ; <nl> + } <nl> + } ; <nl> + <nl> + using PrometeusHandlerFactory = PrometeusRequestHandlerFactory < PrometheusRequestHandler > ; <nl> + <nl> + } <nl> mmm a / dbms / programs / server / ReplicasStatusHandler . cpp <nl> ppp b / dbms / programs / server / ReplicasStatusHandler . cpp <nl> namespace DB <nl> { <nl> <nl> <nl> - ReplicasStatusHandler : : ReplicasStatusHandler ( Context & context_ ) <nl> - : context ( context_ ) <nl> + ReplicasStatusHandler : : ReplicasStatusHandler ( IServer & server ) <nl> + : context ( server . context ( ) ) <nl> { <nl> } <nl> <nl> mmm a / dbms / programs / server / ReplicasStatusHandler . h <nl> ppp b / dbms / programs / server / ReplicasStatusHandler . h <nl> <nl> # pragma once <nl> <nl> + # include " IServer . h " <nl> + <nl> # include < Poco / Net / HTTPRequestHandler . h > <nl> <nl> <nl> class ReplicasStatusHandler : public Poco : : Net : : HTTPRequestHandler <nl> Context & context ; <nl> <nl> public : <nl> - explicit ReplicasStatusHandler ( Context & context_ ) ; <nl> + explicit ReplicasStatusHandler ( IServer & server ) ; <nl> <nl> void handleRequest ( Poco : : Net : : HTTPServerRequest & request , Poco : : Net : : HTTPServerResponse & response ) override ; <nl> } ; <nl> mmm a / dbms / programs / server / Server . cpp <nl> ppp b / dbms / programs / server / Server . cpp <nl> int Server : : main ( const std : : vector < std : : string > & / * args * / ) <nl> return address ; <nl> } ; <nl> <nl> + / / / This object will periodically calculate some metrics . <nl> + AsynchronousMetrics async_metrics ( * global_context ) ; <nl> + attachSystemTablesAsync ( * global_context - > getDatabase ( " system " ) , async_metrics ) ; <nl> + <nl> for ( const auto & listen_host : listen_hosts ) <nl> { <nl> auto create_server = [ & ] ( const char * port_name , auto & & func ) <nl> int Server : : main ( const std : : vector < std : : string > & / * args * / ) <nl> auto address = socket_bind_listen ( socket , listen_host , port ) ; <nl> socket . setReceiveTimeout ( settings . http_receive_timeout ) ; <nl> socket . setSendTimeout ( settings . http_send_timeout ) ; <nl> + auto handler_factory = createDefaultHandlerFatory < HTTPHandler > ( * this , " HTTPHandler - factory " ) ; <nl> + if ( config ( ) . has ( " prometheus " ) & & config ( ) . getInt ( " prometheus . port " , 0 ) = = 0 ) <nl> + handler_factory - > addHandler < PrometeusHandlerFactory > ( async_metrics ) ; <nl> + <nl> servers . emplace_back ( std : : make_unique < Poco : : Net : : HTTPServer > ( <nl> - new HTTPHandlerFactory ( * this , " HTTPHandler - factory " ) , <nl> + handler_factory , <nl> server_pool , <nl> socket , <nl> http_params ) ) ; <nl> int Server : : main ( const std : : vector < std : : string > & / * args * / ) <nl> socket . setReceiveTimeout ( settings . http_receive_timeout ) ; <nl> socket . setSendTimeout ( settings . http_send_timeout ) ; <nl> servers . emplace_back ( std : : make_unique < Poco : : Net : : HTTPServer > ( <nl> - new HTTPHandlerFactory ( * this , " HTTPSHandler - factory " ) , <nl> + createDefaultHandlerFatory < HTTPHandler > ( * this , " HTTPSHandler - factory " ) , <nl> server_pool , <nl> socket , <nl> http_params ) ) ; <nl> int Server : : main ( const std : : vector < std : : string > & / * args * / ) <nl> socket . setReceiveTimeout ( settings . http_receive_timeout ) ; <nl> socket . setSendTimeout ( settings . http_send_timeout ) ; <nl> servers . emplace_back ( std : : make_unique < Poco : : Net : : HTTPServer > ( <nl> - new InterserverIOHTTPHandlerFactory ( * this , " InterserverIOHTTPHandler - factory " ) , <nl> + createDefaultHandlerFatory < InterserverIOHTTPHandler > ( * this , " InterserverIOHTTPHandler - factory " ) , <nl> server_pool , <nl> socket , <nl> http_params ) ) ; <nl> int Server : : main ( const std : : vector < std : : string > & / * args * / ) <nl> socket . setReceiveTimeout ( settings . http_receive_timeout ) ; <nl> socket . setSendTimeout ( settings . http_send_timeout ) ; <nl> servers . emplace_back ( std : : make_unique < Poco : : Net : : HTTPServer > ( <nl> - new InterserverIOHTTPHandlerFactory ( * this , " InterserverIOHTTPHandler - factory " ) , <nl> + createDefaultHandlerFatory < InterserverIOHTTPHandler > ( * this , " InterserverIOHTTPHandler - factory " ) , <nl> server_pool , <nl> socket , <nl> http_params ) ) ; <nl> int Server : : main ( const std : : vector < std : : string > & / * args * / ) <nl> <nl> LOG_INFO ( log , " Listening for MySQL compatibility protocol : " + address . toString ( ) ) ; <nl> } ) ; <nl> + <nl> + / / / Prometheus ( if defined and not setup yet with http_port ) <nl> + create_server ( " prometheus . port " , [ & ] ( UInt16 port ) <nl> + { <nl> + Poco : : Net : : ServerSocket socket ; <nl> + auto address = socket_bind_listen ( socket , listen_host , port ) ; <nl> + socket . setReceiveTimeout ( settings . http_receive_timeout ) ; <nl> + socket . setSendTimeout ( settings . http_send_timeout ) ; <nl> + auto handler_factory = new HTTPRequestHandlerFactoryMain ( * this , " PrometheusHandler - factory " ) ; <nl> + handler_factory - > addHandler < PrometeusHandlerFactory > ( async_metrics ) ; <nl> + servers . emplace_back ( std : : make_unique < Poco : : Net : : HTTPServer > ( <nl> + handler_factory , <nl> + server_pool , <nl> + socket , <nl> + http_params ) ) ; <nl> + <nl> + LOG_INFO ( log , " Listening http : / / " + address . toString ( ) ) ; <nl> + } ) ; <nl> } <nl> <nl> if ( servers . empty ( ) ) <nl> int Server : : main ( const std : : vector < std : : string > & / * args * / ) <nl> throw ; <nl> } <nl> <nl> - / / / This object will periodically calculate some metrics . <nl> - AsynchronousMetrics async_metrics ( * global_context ) ; <nl> - attachSystemTablesAsync ( * global_context - > getDatabase ( " system " ) , async_metrics ) ; <nl> - <nl> std : : vector < std : : unique_ptr < MetricsTransmitter > > metrics_transmitters ; <nl> for ( const auto & graphite_key : DB : : getMultipleKeysFromConfig ( config ( ) , " " , " graphite " ) ) <nl> { <nl> mmm a / dbms / programs / server / config . xml <nl> ppp b / dbms / programs / server / config . xml <nl> <nl> < / graphite > <nl> - - > <nl> <nl> + < ! - - Serve endpoint fot Prometheus monitoring . - - > <nl> + < ! - - <nl> + endpoint - mertics path ( relative to root , statring with " / " ) <nl> + port - port to setup server . If not defined or 0 than http_port used <nl> + metrics - send data from table system . metrics <nl> + events - send data from table system . events <nl> + asynchronous_metrics - send data from table system . asynchronous_metrics <nl> + - - > <nl> + < ! - - <nl> + < prometheus > <nl> + < endpoint > / metrics < / endpoint > <nl> + < port > 8001 < / port > <nl> + <nl> + < metrics > true < / metrics > <nl> + < events > true < / events > <nl> + < asynchronous_metrics > true < / asynchronous_metrics > <nl> + < / prometheus > <nl> + - - > <nl> <nl> < ! - - Query log . Used only for queries with setting log_queries = 1 . - - > <nl> < query_log > <nl> new file mode 100644 <nl> index 00000000000 . . e69de29bb2d <nl> new file mode 100644 <nl> index 00000000000 . . 6e31324eac2 <nl> mmm / dev / null <nl> ppp b / dbms / tests / integration / test_prometheus_endpoint / configs / prom_conf . xml <nl> <nl> + < yandex > <nl> + < http_port > 8123 < / http_port > <nl> + < tcp_port > 9000 < / tcp_port > <nl> + <nl> + < prometheus > <nl> + < endpoint > / metrics < / endpoint > <nl> + < port > 8001 < / port > <nl> + <nl> + < metrics > true < / metrics > <nl> + < events > true < / events > <nl> + < asynchronous_metrics > true < / asynchronous_metrics > <nl> + < / prometheus > <nl> + < / yandex > <nl> new file mode 100644 <nl> index 00000000000 . . 10f49c23072 <nl> mmm / dev / null <nl> ppp b / dbms / tests / integration / test_prometheus_endpoint / test . py <nl> <nl> + from __future__ import print_function <nl> + import pytest <nl> + <nl> + import re <nl> + import requests <nl> + <nl> + from helpers . cluster import ClickHouseCluster <nl> + <nl> + cluster = ClickHouseCluster ( __file__ ) <nl> + node = cluster . add_instance ( ' node ' , main_configs = [ ' configs / prom_conf . xml ' ] ) <nl> + <nl> + @ pytest . fixture ( scope = " module " ) <nl> + def start_cluster ( ) : <nl> + try : <nl> + cluster . start ( ) <nl> + yield cluster <nl> + finally : <nl> + cluster . shutdown ( ) <nl> + <nl> + <nl> + def parse_response_line ( line ) : <nl> + allowed_prefixes = [ <nl> + " ClickHouse " , <nl> + " # HELP " , <nl> + " # TYPE " , <nl> + ] <nl> + assert any ( line . startswith ( prefix ) for prefix in allowed_prefixes ) , msg <nl> + <nl> + if line . startswith ( " # " ) : <nl> + return { } <nl> + match = re . match ( ' ^ ( [ a - zA - Z_ : ] [ a - zA - Z0 - 9_ : ] + ) ( \ { . * \ } ) ? ( \ d ) ' , line ) <nl> + assert match , line <nl> + name , _ , val = match . groups ( ) <nl> + return { name : int ( val ) } <nl> + <nl> + <nl> + def get_and_check_metrics ( ) : <nl> + response = requests . get ( " http : / / { host } : { port } / metrics " . format ( <nl> + host = node . ip_address , port = 8001 ) , allow_redirects = False ) <nl> + <nl> + if response . status_code ! = 200 : <nl> + response . raise_for_status ( ) <nl> + <nl> + assert response . headers [ ' content - type ' ] . startswith ( ' text / plain ' ) <nl> + <nl> + results = { } <nl> + for resp_line in response . text . split ( ' \ n ' ) : <nl> + resp_line = resp_line . rstrip ( ) <nl> + if not resp_line : <nl> + continue <nl> + res = parse_response_line ( resp_line ) <nl> + results . update ( res ) <nl> + return results <nl> + <nl> + <nl> + def test_prometheus_endpoint ( start_cluster ) : <nl> + <nl> + metrics_dict = get_and_check_metrics ( ) <nl> + assert metrics_dict [ ' ClickHouseProfileEventsQuery ' ] > = 0 <nl> + prev_query_count = metrics_dict [ ' ClickHouseProfileEventsQuery ' ] <nl> + <nl> + resp = node . query ( " SELECT 1 " ) <nl> + resp = node . query ( " SELECT 2 " ) <nl> + resp = node . query ( " SELECT 3 " ) <nl> + <nl> + metrics_dict = get_and_check_metrics ( ) <nl> + assert metrics_dict [ ' ClickHouseProfileEventsQuery ' ] > = prev_query_count + 3 <nl>
Merge pull request from Vdimir / prometheus - metrics - 7369
ClickHouse/ClickHouse
d498e14ff8f5412b021961c901e9a7b352d7c42f
2019-12-15T22:16:52Z
mmm a / db / dbeval . cpp <nl> ppp b / db / dbeval . cpp <nl> namespace mongo { <nl> return false ; <nl> } <nl> <nl> - auto_ptr < Scope > s ( globalScriptEngine - > createScope ( ) ) ; <nl> - <nl> + auto_ptr < Scope > s = globalScriptEngine - > getPooledScope ( ns ) ; <nl> ScriptingFunction f = s - > createFunction ( code ) ; <nl> if ( f = = 0 ) { <nl> errmsg = ( string ) " compile failed : " + s - > getError ( ) ; <nl> mmm a / db / matcher . cpp <nl> ppp b / db / matcher . cpp <nl> namespace mongo { <nl> class Where { <nl> public : <nl> Where ( ) { <nl> - scope = 0 ; <nl> jsScope = 0 ; <nl> } <nl> ~ Where ( ) { <nl> <nl> - if ( scope ) <nl> - delete scope ; <nl> - <nl> - if ( jsScope ) <nl> + if ( jsScope ) { <nl> delete jsScope ; <nl> - scope = 0 ; <nl> + } <nl> func = 0 ; <nl> } <nl> <nl> - Scope * scope ; <nl> + auto_ptr < Scope > scope ; <nl> ScriptingFunction func ; <nl> BSONObj * jsScope ; <nl> <nl> void setFunc ( const char * code ) { <nl> - massert ( " scope has to be created first ! " , scope ) ; <nl> + massert ( " scope has to be created first ! " , scope . get ( ) ) ; <nl> func = scope - > createFunction ( code ) ; <nl> } <nl> <nl> namespace mongo { <nl> where = new Where ( ) ; <nl> uassert ( " $ where query , but no script engine " , globalScriptEngine ) ; <nl> <nl> - where - > scope = globalScriptEngine - > createScope ( ) ; <nl> + assert ( curNs ) ; <nl> + where - > scope = globalScriptEngine - > getPooledScope ( curNs ) ; <nl> where - > scope - > localConnect ( database - > name . c_str ( ) ) ; <nl> <nl> if ( e . type ( ) = = CodeWScope ) { <nl> mmm a / scripting / engine . cpp <nl> ppp b / scripting / engine . cpp <nl> namespace mongo { <nl> <nl> return exec ( data , filename , printResult , reportError , assertOnError , timeoutMs ) ; <nl> } <nl> + <nl> + typedef map < string , list < Scope * > > PoolToScopes ; <nl> + <nl> + class ScopeCache { <nl> + public : <nl> + <nl> + ~ ScopeCache ( ) { <nl> + for ( PoolToScopes : : iterator i = _pools . begin ( ) ; i ! = _pools . end ( ) ; i + + ) { <nl> + for ( list < Scope * > : : iterator j = i - > second . begin ( ) ; j ! = i - > second . end ( ) ; j + + ) <nl> + delete * j ; <nl> + } <nl> + } <nl> <nl> + void done ( const string & pool , Scope * s ) { <nl> + boostlock lk ( _mutex ) ; <nl> + list < Scope * > & l = _pools [ pool ] ; <nl> + if ( l . size ( ) > 10 ) { <nl> + delete s ; <nl> + } <nl> + else { <nl> + l . push_back ( s ) ; <nl> + s - > reset ( ) ; <nl> + } <nl> + } <nl> + <nl> + Scope * get ( const string & pool ) { <nl> + boostlock lk ( _mutex ) ; <nl> + list < Scope * > & l = _pools [ pool ] ; <nl> + if ( l . size ( ) = = 0 ) <nl> + return 0 ; <nl> + <nl> + Scope * s = l . back ( ) ; <nl> + l . pop_back ( ) ; <nl> + s - > reset ( ) ; <nl> + return s ; <nl> + } <nl> <nl> + private : <nl> + PoolToScopes _pools ; <nl> + mutex _mutex ; <nl> + } ; <nl> <nl> + thread_specific_ptr < ScopeCache > scopeCache ; <nl> + <nl> + class PooledScope : public Scope { <nl> + public : <nl> + PooledScope ( const string pool , Scope * real ) : _pool ( pool ) , _real ( real ) { } ; <nl> + virtual ~ PooledScope ( ) { <nl> + scopeCache - > done ( _pool , _real ) ; <nl> + _real = 0 ; <nl> + } <nl> + <nl> + void reset ( ) { <nl> + _real - > reset ( ) ; <nl> + } <nl> + void init ( BSONObj * data ) { <nl> + _real - > init ( data ) ; <nl> + } <nl> + <nl> + void localConnect ( const char * dbName ) { <nl> + _real - > localConnect ( dbName ) ; <nl> + } <nl> + void externalSetup ( ) { <nl> + _real - > externalSetup ( ) ; <nl> + } <nl> + <nl> + double getNumber ( const char * field ) { <nl> + return _real - > getNumber ( field ) ; <nl> + } <nl> + string getString ( const char * field ) { <nl> + return _real - > getString ( field ) ; <nl> + } <nl> + bool getBoolean ( const char * field ) { <nl> + return _real - > getBoolean ( field ) ; <nl> + } <nl> + BSONObj getObject ( const char * field ) { <nl> + return _real - > getObject ( field ) ; <nl> + } <nl> + <nl> + int type ( const char * field ) { <nl> + return _real - > type ( field ) ; <nl> + } <nl> + <nl> + void setNumber ( const char * field , double val ) { <nl> + _real - > setNumber ( field , val ) ; <nl> + } <nl> + void setString ( const char * field , const char * val ) { <nl> + _real - > setString ( field , val ) ; <nl> + } <nl> + void setObject ( const char * field , const BSONObj & obj , bool readOnly = true ) { <nl> + _real - > setObject ( field , obj , readOnly ) ; <nl> + } <nl> + void setBoolean ( const char * field , bool val ) { <nl> + _real - > setBoolean ( field , val ) ; <nl> + } <nl> + void setThis ( const BSONObj * obj ) { <nl> + _real - > setThis ( obj ) ; <nl> + } <nl> + <nl> + ScriptingFunction createFunction ( const char * code ) { <nl> + return _real - > createFunction ( code ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ return 0 on success <nl> + * / <nl> + int invoke ( ScriptingFunction func , const BSONObj & args , int timeoutMs = 0 ) { <nl> + return _real - > invoke ( func , args , timeoutMs ) ; <nl> + } <nl> + <nl> + string getError ( ) { <nl> + return _real - > getError ( ) ; <nl> + } <nl> + <nl> + bool exec ( const string & code , const string & name , bool printResult , bool reportError , bool assertOnError , int timeoutMs = 0 ) { <nl> + return _real - > exec ( code , name , printResult , reportError , assertOnError , timeoutMs ) ; <nl> + } <nl> + bool execFile ( const string & filename , bool printResult , bool reportError , bool assertOnError , int timeoutMs = 0 ) { <nl> + return _real - > execFile ( filename , printResult , reportError , assertOnError , timeoutMs ) ; <nl> + } <nl> + <nl> + void injectNative ( const char * field , NativeFunction func ) { <nl> + _real - > injectNative ( field , func ) ; <nl> + } <nl> + <nl> + void gc ( ) { <nl> + _real - > gc ( ) ; <nl> + } <nl> + <nl> + private : <nl> + string _pool ; <nl> + Scope * _real ; <nl> + } ; <nl> + <nl> + auto_ptr < Scope > ScriptEngine : : getPooledScope ( const string & pool ) { <nl> + if ( ! scopeCache . get ( ) ) { <nl> + scopeCache . reset ( new ScopeCache ( ) ) ; <nl> + } <nl> + <nl> + Scope * s = scopeCache - > get ( pool ) ; <nl> + if ( ! s ) { <nl> + s = createScope ( ) ; <nl> + } <nl> + <nl> + auto_ptr < Scope > p ; <nl> + p . reset ( new PooledScope ( pool , s ) ) ; <nl> + return p ; <nl> + } <nl> + <nl> ScriptEngine * globalScriptEngine ; <nl> } <nl> mmm a / scripting / engine . h <nl> ppp b / scripting / engine . h <nl> namespace mongo { <nl> virtual bool utf8Ok ( ) const = 0 ; <nl> <nl> static void setup ( ) ; <nl> + <nl> + auto_ptr < Scope > getPooledScope ( const string & pool ) ; <nl> } ; <nl> <nl> extern ScriptEngine * globalScriptEngine ; <nl> mmm a / scripting / engine_spidermonkey . cpp <nl> ppp b / scripting / engine_spidermonkey . cpp <nl> namespace mongo { <nl> _modified = false ; <nl> _magic = 17 ; <nl> } <nl> + <nl> + ~ BSONHolder ( ) { <nl> + _magic = 18 ; <nl> + } <nl> <nl> void check ( ) { <nl> - uassert ( " holder magic value is wrong " , _magic = = 17 ) ; <nl> + uassert ( " holder magic value is wrong " , _magic = = 17 & & _obj . isValid ( ) ) ; <nl> } <nl> <nl> BSONFieldIterator * it ( ) ; <nl> namespace mongo { <nl> JS_SetCStringsAreUTF8 ( ) ; <nl> # endif <nl> <nl> + # ifdef JS_THREADSAFE <nl> _runtime = JS_NewRuntime ( 8L * 1024L * 1024L ) ; <nl> uassert ( " JS_NewRuntime failed " , _runtime ) ; <nl> + # else <nl> + # warning spider monkey compiled without THREADSAFE will be slower <nl> + cerr < < " * * * warning : spider monkey compiled without THREADSAFE will be slower " < < endl ; <nl> + # endif <nl> + <nl> <nl> if ( ! utf8Ok ( ) ) { <nl> cerr < < " * * * warning : spider monkey build without utf8 support . consider rebuilding with utf8 support " < < endl ; <nl> namespace mongo { <nl> } <nl> <nl> ~ SMEngine ( ) { <nl> + # ifdef JS_THREADSAFE <nl> JS_DestroyRuntime ( _runtime ) ; <nl> + # endif <nl> JS_ShutDown ( ) ; <nl> } <nl> <nl> namespace mongo { <nl> virtual bool utf8Ok ( ) const { return JS_CStringsAreUTF8 ( ) ; } <nl> <nl> private : <nl> + # ifdef JS_THREADSAFE <nl> JSRuntime * _runtime ; <nl> + # endif <nl> friend class SMScope ; <nl> } ; <nl> <nl> namespace mongo { <nl> return JS_FALSE ; <nl> } <nl> <nl> + JSBool yes_gc ( JSContext * cx , JSGCStatus status ) { <nl> + return JS_TRUE ; <nl> + } <nl> + <nl> class SMScope : public Scope { <nl> public : <nl> SMScope ( ) { <nl> + # ifdef JS_THREADSAFE <nl> _context = JS_NewContext ( globalSMEngine - > _runtime , 8192 ) ; <nl> + # else <nl> + _runtime = JS_NewRuntime ( 512L * 1024L ) ; <nl> + massert ( " JS_NewRuntime failed " , _runtime ) ; <nl> + _context = JS_NewContext ( _runtime , 8192 ) ; <nl> + # endif <nl> _convertor = new Convertor ( _context ) ; <nl> massert ( " JS_NewContext failed " , _context ) ; <nl> <nl> namespace mongo { <nl> " keySet " , object_keyset , 0 , JSPROP_READONLY ) ) ; <nl> <nl> _this = 0 ; <nl> + _externalSetup = false ; <nl> + _localConnect = false ; <nl> / / JS_SetGCCallback ( _context , no_gc ) ; / / this is useful for seeing if something is a gc problem <nl> } <nl> - <nl> + <nl> ~ SMScope ( ) { <nl> uassert ( " deleted SMScope twice ? " , _convertor ) ; <nl> <nl> for ( list < void * > : : iterator i = _roots . begin ( ) ; i ! = _roots . end ( ) ; i + + ) { <nl> JS_RemoveRoot ( _context , * i ) ; <nl> } <nl> - <nl> - if ( _this ) <nl> + _roots . clear ( ) ; <nl> + <nl> + if ( _this ) { <nl> JS_RemoveRoot ( _context , & _this ) ; <nl> + _this = 0 ; <nl> + } <nl> <nl> if ( _convertor ) { <nl> delete _convertor ; <nl> _convertor = 0 ; <nl> } <nl> - <nl> + <nl> if ( _context ) { <nl> JS_DestroyContext ( _context ) ; <nl> _context = 0 ; <nl> } <nl> - } <nl> <nl> + # ifndef JS_THREADSAFE <nl> + JS_DestroyRuntime ( _runtime ) ; <nl> + _runtime = 0 ; <nl> + # endif <nl> + } <nl> + <nl> void reset ( ) { <nl> - massert ( " SMScope : : reset ( ) not implemented yet " , 0 ) ; <nl> + assert ( _convertor ) ; <nl> + return ; <nl> + if ( _this ) { <nl> + JS_RemoveRoot ( _context , & _this ) ; <nl> + _this = 0 ; <nl> + } <nl> + currentScope . reset ( this ) ; <nl> + _error = " " ; <nl> } <nl> - <nl> + <nl> void addRoot ( void * root , const char * name ) { <nl> JS_AddNamedRoot ( _context , root , name ) ; <nl> _roots . push_back ( root ) ; <nl> namespace mongo { <nl> while ( i . more ( ) ) { <nl> BSONElement e = i . next ( ) ; <nl> _convertor - > setProperty ( _global , e . fieldName ( ) , _convertor - > toval ( e ) ) ; <nl> + _initFieldNames . insert ( e . fieldName ( ) ) ; <nl> } <nl> <nl> } <nl> <nl> void externalSetup ( ) { <nl> + uassert ( " already local connected " , ! _localConnect ) ; <nl> + if ( _externalSetup ) <nl> + return ; <nl> initMongoJS ( this , _context , _global , false ) ; <nl> + _externalSetup = true ; <nl> } <nl> <nl> void localConnect ( const char * dbName ) { <nl> + uassert ( " already setup for external db " , ! _externalSetup ) ; <nl> + if ( _localConnect ) { <nl> + uassert ( " connected to different db " , _dbName = = dbName ) ; <nl> + return ; <nl> + } <nl> + <nl> initMongoJS ( this , _context , _global , true ) ; <nl> - <nl> + <nl> exec ( " _mongo = new Mongo ( ) ; " ) ; <nl> exec ( ( ( string ) " db = _mongo . getDB ( \ " " + dbName + " \ " ) ; " ) . c_str ( ) ) ; <nl> + <nl> + _localConnect = true ; <nl> + _dbName = dbName ; <nl> } <nl> <nl> / / mmm - - getters mmmmmm <nl> namespace mongo { <nl> void setThis ( const BSONObj * obj ) { <nl> if ( _this ) <nl> JS_RemoveRoot ( _context , & _this ) ; <nl> - <nl> + <nl> _this = _convertor - > toJSObject ( obj ) ; <nl> <nl> JS_AddNamedRoot ( _context , & _this , " scope this " ) ; <nl> namespace mongo { <nl> JSContext * context ( ) const { return _context ; } <nl> <nl> private : <nl> + # ifndef JS_THREADSAFE <nl> + JSRuntime * _runtime ; <nl> + # endif <nl> JSContext * _context ; <nl> Convertor * _convertor ; <nl> <nl> namespace mongo { <nl> <nl> string _error ; <nl> list < void * > _roots ; <nl> + <nl> + bool _externalSetup ; <nl> + bool _localConnect ; <nl> + string _dbName ; <nl> + <nl> + set < string > _initFieldNames ; <nl> } ; <nl> <nl> void errorReporter ( JSContext * cx , const char * message , JSErrorReport * report ) { <nl>
caching js context , also THREADSAFE mangling
mongodb/mongo
c9ad39db791c46076571bdc2fdaa7cc6066593ad
2009-08-05T17:14:06Z
mmm a / cocos / platform / linux / CCStdC - linux . cpp <nl> ppp b / cocos / platform / linux / CCStdC - linux . cpp <nl> THE SOFTWARE . <nl> * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> <nl> # include " platform / CCPlatformConfig . h " <nl> - # if CC_TARGET_PLATFORM = = CC_PLATFORM_LINUX <nl> - <nl> # include " CCStdC - linux . h " <nl> <nl> - # if ( CC_TARGET_PLATFORM = = CC_PLATFORM_WIN32 ) <nl> - <nl> - int CC_DLL gettimeofday ( struct timeval * val , struct timezone * ) <nl> - { <nl> - if ( val ) <nl> - { <nl> - SYSTEMTIME wtm ; <nl> - GetLocalTime ( & wtm ) ; <nl> - <nl> - struct tm tTm ; <nl> - tTm . tm_year = wtm . wYear - 1900 ; <nl> - tTm . tm_mon = wtm . wMonth - 1 ; <nl> - tTm . tm_mday = wtm . wDay ; <nl> - tTm . tm_hour = wtm . wHour ; <nl> - tTm . tm_min = wtm . wMinute ; <nl> - tTm . tm_sec = wtm . wSecond ; <nl> - tTm . tm_isdst = - 1 ; <nl> - <nl> - val - > tv_sec = ( long ) mktime ( & tTm ) ; / / time_t is 64 - bit on win32 <nl> - val - > tv_usec = wtm . wMilliseconds * 1000 ; <nl> - } <nl> - return 0 ; <nl> - } <nl> - # elif ( CC_TARGET_PLATFORM = = CC_PLATFORM_BADA ) <nl> - <nl> - using namespace Osp : : System ; <nl> - <nl> - int CC_DLL gettimeofday ( struct timeval * val , struct timezone * ) <nl> - { <nl> - if ( val ) <nl> - { <nl> - long long curTick = 0 ; <nl> - SystemTime : : GetTicks ( curTick ) ; <nl> - unsigned int ms = curTick ; <nl> - val - > tv_sec = ms / 1000 ; <nl> - val - > tv_usec = ( ms % 1000 ) * 1000 ; <nl> - } <nl> - return 0 ; <nl> - } <nl> + # if CC_TARGET_PLATFORM = = CC_PLATFORM_LINUX <nl> <nl> - # endif / / CC_PLATFORM_WIN32 <nl> <nl> # endif / / CC_TARGET_PLATFORM = = CC_PLATFORM_LINUX <nl>
: Removes unused codes in CCStdC - linux . cpp
cocos2d/cocos2d-x
cb6d81599b21399496b3d68b32789bed10db9501
2014-11-24T07:01:57Z
mmm a / src / core / ext / transport / chttp2 / transport / hpack_encoder . c <nl> ppp b / src / core / ext / transport / chttp2 / transport / hpack_encoder . c <nl> void grpc_chttp2_hpack_compressor_set_max_table_size ( <nl> <nl> void grpc_chttp2_encode_header ( grpc_exec_ctx * exec_ctx , <nl> grpc_chttp2_hpack_compressor * c , <nl> - uint32_t stream_id , <nl> - grpc_metadata_batch * metadata , int is_eof , <nl> - size_t max_frame_size , <nl> - grpc_transport_one_way_stats * stats , <nl> + grpc_metadata_batch * metadata , <nl> + const grpc_encode_header_options * options , <nl> grpc_slice_buffer * outbuf ) { <nl> framer_state st ; <nl> grpc_linked_mdelem * l ; <nl> gpr_timespec deadline ; <nl> <nl> - GPR_ASSERT ( stream_id ! = 0 ) ; <nl> + GPR_ASSERT ( options - > stream_id ! = 0 ) ; <nl> <nl> st . seen_regular_header = 0 ; <nl> - st . stream_id = stream_id ; <nl> + st . stream_id = options - > stream_id ; <nl> st . output = outbuf ; <nl> st . is_first_frame = 1 ; <nl> - st . stats = stats ; <nl> - st . max_frame_size = max_frame_size ; <nl> + st . stats = options - > stats ; <nl> + st . max_frame_size = options - > max_frame_size ; <nl> <nl> / * Encode a metadata batch ; store the returned values , representing <nl> a metadata element that needs to be unreffed back into the metadata <nl> void grpc_chttp2_encode_header ( grpc_exec_ctx * exec_ctx , <nl> deadline_enc ( exec_ctx , c , deadline , & st ) ; <nl> } <nl> <nl> - finish_frame ( & st , 1 , is_eof ) ; <nl> + finish_frame ( & st , 1 , options - > is_eof ) ; <nl> } <nl> mmm a / src / core / ext / transport / chttp2 / transport / hpack_encoder . h <nl> ppp b / src / core / ext / transport / chttp2 / transport / hpack_encoder . h <nl> void grpc_chttp2_hpack_compressor_set_max_table_size ( <nl> void grpc_chttp2_hpack_compressor_set_max_usable_size ( <nl> grpc_chttp2_hpack_compressor * c , uint32_t max_table_size ) ; <nl> <nl> + typedef struct { <nl> + uint32_t stream_id ; <nl> + bool is_eof ; <nl> + bool use_true_binary_metadata ; <nl> + size_t max_frame_size ; <nl> + grpc_transport_one_way_stats * stats ; <nl> + } grpc_encode_header_options ; <nl> + <nl> void grpc_chttp2_encode_header ( grpc_exec_ctx * exec_ctx , <nl> - grpc_chttp2_hpack_compressor * c , uint32_t id , <nl> - grpc_metadata_batch * metadata , int is_eof , <nl> - size_t max_frame_size , <nl> - grpc_transport_one_way_stats * stats , <nl> + grpc_chttp2_hpack_compressor * c , <nl> + grpc_metadata_batch * metadata , <nl> + const grpc_encode_header_options * options , <nl> grpc_slice_buffer * outbuf ) ; <nl> <nl> # endif / * GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_ENCODER_H * / <nl> mmm a / src / core / ext / transport / chttp2 / transport / writing . c <nl> ppp b / src / core / ext / transport / chttp2 / transport / writing . c <nl> bool grpc_chttp2_begin_write ( grpc_exec_ctx * exec_ctx , <nl> <nl> / * send initial metadata if it ' s available * / <nl> if ( ! sent_initial_metadata & & s - > send_initial_metadata ) { <nl> - grpc_chttp2_encode_header ( <nl> - exec_ctx , & t - > hpack_compressor , s - > id , s - > send_initial_metadata , 0 , <nl> - t - > settings [ GRPC_ACKED_SETTINGS ] [ GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE ] , <nl> - & s - > stats . outgoing , & t - > outbuf ) ; <nl> + grpc_encode_header_options hopt = { <nl> + . stream_id = s - > id , <nl> + . is_eof = false , <nl> + . use_true_binary_metadata = <nl> + t - > settings <nl> + [ GRPC_ACKED_SETTINGS ] <nl> + [ GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA ] ! = 0 , <nl> + . max_frame_size = t - > settings [ GRPC_ACKED_SETTINGS ] <nl> + [ GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE ] , <nl> + . stats = & s - > stats . outgoing } ; <nl> + grpc_chttp2_encode_header ( exec_ctx , & t - > hpack_compressor , <nl> + s - > send_initial_metadata , & hopt , & t - > outbuf ) ; <nl> s - > send_initial_metadata = NULL ; <nl> s - > sent_initial_metadata = true ; <nl> sent_initial_metadata = true ; <nl> bool grpc_chttp2_begin_write ( grpc_exec_ctx * exec_ctx , <nl> grpc_chttp2_encode_data ( s - > id , & s - > flow_controlled_buffer , 0 , true , <nl> & s - > stats . outgoing , & t - > outbuf ) ; <nl> } else { <nl> - grpc_chttp2_encode_header ( <nl> - exec_ctx , & t - > hpack_compressor , s - > id , s - > send_trailing_metadata , <nl> - true , t - > settings [ GRPC_ACKED_SETTINGS ] <nl> - [ GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE ] , <nl> - & s - > stats . outgoing , & t - > outbuf ) ; <nl> + grpc_encode_header_options hopt = { <nl> + . stream_id = s - > id , <nl> + . is_eof = true , <nl> + . use_true_binary_metadata = <nl> + t - > settings <nl> + [ GRPC_ACKED_SETTINGS ] <nl> + [ GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA ] ! = <nl> + 0 , <nl> + . max_frame_size = <nl> + t - > settings [ GRPC_ACKED_SETTINGS ] <nl> + [ GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE ] , <nl> + . stats = & s - > stats . outgoing } ; <nl> + grpc_chttp2_encode_header ( exec_ctx , & t - > hpack_compressor , <nl> + s - > send_trailing_metadata , & hopt , <nl> + & t - > outbuf ) ; <nl> } <nl> s - > send_trailing_metadata = NULL ; <nl> s - > sent_trailing_metadata = true ; <nl>
Starting the encode path
grpc/grpc
83f7b9559c52d3c7f857a8ac80f5c21dcb9d2490
2017-04-04T14:58:18Z
mmm a / test / DebugInfo / attributes . swift <nl> ppp b / test / DebugInfo / attributes . swift <nl> <nl> @ objc class ObjCClass { <nl> @ IBAction func click ( _ : AnyObject ? ) - > ( ) { } <nl> } <nl> - / / DW_LANG_Swift = 0xa000 [ FIXME : this number will change ! ] <nl> + <nl> / / CHECK - DAG : ! [ [ TY1 : [ 0 - 9 ] + ] ] = ! DICompositeType ( tag : DW_TAG_structure_type , name : " SwiftClass " , { { . * } } line : [ [ @ LINE + 1 ] ] , { { . * } } runtimeLang : DW_LANG_Swift <nl> class SwiftClass { <nl> @ objc func objcmethod ( ) - > ( ) { } <nl> class SwiftClass { <nl> func f ( someBlock : @ convention ( block ) ( Int ) - > Int ) { <nl> } <nl> } <nl> - <nl> + x <nl> / / FIXME : This is currently elided , but should reappear eventually as <nl> / / an artificial variable . <nl> / / DISABLED : [ DW_TAG_variable ] [ OBJC_METACLASS_ $ __TtC10attributes9ObjCClass ] <nl>
Remove a stale comment from a debug info testcase .
apple/swift
9fdecb42a496dc8c59f36f9dc688d2336f5581a4
2015-05-07T16:11:51Z
mmm a / src / clustering / reactor / reactor_be_secondary . tcc <nl> ppp b / src / clustering / reactor / reactor_be_secondary . tcc <nl> void reactor_t < protocol_t > : : be_secondary ( typename protocol_t : : region_t region , s <nl> / * We lost the replier which means we should retry , just <nl> * going back to the top of the while loop accomplishes this . <nl> * * / <nl> + } catch ( typename listener_t < protocol_t > : : broadcaster_lost_exc_t ) { <nl> + / * We didn ' t find the broadcaster which means we should retry , <nl> + * same deal as above . * / <nl> } <nl> } <nl> } catch ( interrupted_exc_t ) { <nl>
Catches the exception thrown by listener_t constructor .
rethinkdb/rethinkdb
abd467517645d3ca5f27966978a51f9c79a15b78
2012-02-23T22:18:11Z
mmm a / src / base / utils / misc . cpp <nl> ppp b / src / base / utils / misc . cpp <nl> namespace <nl> QT_TRANSLATE_NOOP3 ( " misc " , " PiB " , " pebibytes ( 1024 tebibytes ) " ) , <nl> QT_TRANSLATE_NOOP3 ( " misc " , " EiB " , " exbibytes ( 1024 pebibytes ) " ) <nl> } ; <nl> + <nl> + / / return best userfriendly storage unit ( B , KiB , MiB , GiB , TiB , . . . ) <nl> + / / use Binary prefix standards from IEC 60027 - 2 <nl> + / / see http : / / en . wikipedia . org / wiki / Kilobyte <nl> + / / value must be given in bytes <nl> + / / to send numbers instead of strings with suffixes <nl> + bool splitToFriendlyUnit ( const qint64 sizeInBytes , qreal & val , Utils : : Misc : : SizeUnit & unit ) <nl> + { <nl> + if ( sizeInBytes < 0 ) return false ; <nl> + <nl> + int i = 0 ; <nl> + qreal rawVal = static_cast < qreal > ( sizeInBytes ) ; <nl> + <nl> + while ( ( rawVal > = 1024 . ) & & ( i < = static_cast < int > ( Utils : : Misc : : SizeUnit : : ExbiByte ) ) ) { <nl> + rawVal / = 1024 . ; <nl> + + + i ; <nl> + } <nl> + val = rawVal ; <nl> + unit = static_cast < Utils : : Misc : : SizeUnit > ( i ) ; <nl> + return true ; <nl> + } <nl> } <nl> <nl> void Utils : : Misc : : shutdownComputer ( const ShutdownDialogAction & action ) <nl> QPoint Utils : : Misc : : screenCenter ( const QWidget * w ) <nl> } <nl> # endif <nl> <nl> - QString Utils : : Misc : : unitString ( Utils : : Misc : : SizeUnit unit ) <nl> - { <nl> - return QCoreApplication : : translate ( " misc " , <nl> - units [ static_cast < int > ( unit ) ] . source , units [ static_cast < int > ( unit ) ] . comment ) ; <nl> - } <nl> - <nl> - / / return best userfriendly storage unit ( B , KiB , MiB , GiB , TiB , . . . ) <nl> - / / use Binary prefix standards from IEC 60027 - 2 <nl> - / / see http : / / en . wikipedia . org / wiki / Kilobyte <nl> - / / value must be given in bytes <nl> - / / to send numbers instead of strings with suffixes <nl> - bool Utils : : Misc : : friendlyUnit ( qint64 sizeInBytes , qreal & val , Utils : : Misc : : SizeUnit & unit ) <nl> + QString Utils : : Misc : : unitString ( const SizeUnit unit , const bool isSpeed ) <nl> { <nl> - if ( sizeInBytes < 0 ) return false ; <nl> - <nl> - int i = 0 ; <nl> - qreal rawVal = static_cast < qreal > ( sizeInBytes ) ; <nl> - <nl> - while ( ( rawVal > = 1024 . ) & & ( i < = static_cast < int > ( SizeUnit : : ExbiByte ) ) ) { <nl> - rawVal / = 1024 . ; <nl> - + + i ; <nl> - } <nl> - val = rawVal ; <nl> - unit = static_cast < SizeUnit > ( i ) ; <nl> - return true ; <nl> + const auto & unitString = units [ static_cast < int > ( unit ) ] ; <nl> + QString ret = QCoreApplication : : translate ( " misc " , unitString . source , unitString . comment ) ; <nl> + if ( isSpeed ) <nl> + ret + = QCoreApplication : : translate ( " misc " , " / s " , " per second " ) ; <nl> + return ret ; <nl> } <nl> <nl> QString Utils : : Misc : : friendlyUnit ( qint64 bytesValue , bool isSpeed ) <nl> { <nl> SizeUnit unit ; <nl> qreal friendlyVal ; <nl> - if ( ! friendlyUnit ( bytesValue , friendlyVal , unit ) ) <nl> + if ( ! splitToFriendlyUnit ( bytesValue , friendlyVal , unit ) ) <nl> return QCoreApplication : : translate ( " misc " , " Unknown " , " Unknown ( size ) " ) ; <nl> - QString ret ; <nl> - if ( unit = = SizeUnit : : Byte ) <nl> - ret = QString : : number ( bytesValue ) + QString : : fromUtf8 ( C_NON_BREAKING_SPACE ) + unitString ( unit ) ; <nl> - else <nl> - ret = Utils : : String : : fromDouble ( friendlyVal , friendlyUnitPrecision ( unit ) ) + QString : : fromUtf8 ( C_NON_BREAKING_SPACE ) + unitString ( unit ) ; <nl> - if ( isSpeed ) <nl> - ret + = QCoreApplication : : translate ( " misc " , " / s " , " per second " ) ; <nl> - return ret ; <nl> + return Utils : : String : : fromDouble ( friendlyVal , friendlyUnitPrecision ( unit ) ) <nl> + + QString : : fromUtf8 ( C_NON_BREAKING_SPACE ) <nl> + + unitString ( unit , isSpeed ) ; <nl> } <nl> <nl> int Utils : : Misc : : friendlyUnitPrecision ( SizeUnit unit ) <nl> { <nl> / / friendlyUnit ' s number of digits after the decimal point <nl> + if ( unit = = SizeUnit : : Byte ) return 0 ; <nl> if ( unit < = SizeUnit : : MebiByte ) return 1 ; <nl> else if ( unit = = SizeUnit : : GibiByte ) return 2 ; <nl> else return 3 ; <nl> mmm a / src / base / utils / misc . h <nl> ppp b / src / base / utils / misc . h <nl> namespace Utils <nl> QString boostVersionString ( ) ; <nl> QString libtorrentVersionString ( ) ; <nl> <nl> - QString unitString ( SizeUnit unit ) ; <nl> + QString unitString ( SizeUnit unit , bool isSpeed = false ) ; <nl> <nl> / / return the best user friendly storage unit ( B , KiB , MiB , GiB , TiB ) <nl> / / value must be given in bytes <nl> - bool friendlyUnit ( qint64 sizeInBytes , qreal & val , SizeUnit & unit ) ; <nl> QString friendlyUnit ( qint64 bytesValue , bool isSpeed = false ) ; <nl> int friendlyUnitPrecision ( SizeUnit unit ) ; <nl> qint64 sizeInBytes ( qreal size , SizeUnit unit ) ; <nl> mmm a / src / gui / properties / speedplotview . cpp <nl> ppp b / src / gui / properties / speedplotview . cpp <nl> <nl> <nl> # include " speedplotview . h " <nl> <nl> + # include < QLocale > <nl> # include < QPainter > <nl> # include < QPen > <nl> # include " base / global . h " <nl> + # include " base / unicodestrings . h " <nl> # include " base / utils / misc . h " <nl> <nl> namespace <nl> namespace <nl> const int HOUR6_BUF_SIZE = 5 * 60 ; <nl> const int DIVIDER_30MIN = MIN30_SEC / MIN30_BUF_SIZE ; <nl> const int DIVIDER_6HOUR = HOUR6_SEC / HOUR6_BUF_SIZE ; <nl> + <nl> + <nl> + / / table of supposed nice steps for grid marks to get nice looking quarters of scale <nl> + const double roundingTable [ ] = { 1 . 2 , 1 . 6 , 2 , 2 . 4 , 2 . 8 , 3 . 2 , 4 , 6 , 8 } ; <nl> + <nl> + struct SplittedValue <nl> + { <nl> + double arg ; <nl> + Utils : : Misc : : SizeUnit unit ; <nl> + qint64 sizeInBytes ( ) const <nl> + { <nl> + return Utils : : Misc : : sizeInBytes ( arg , unit ) ; <nl> + } <nl> + } ; <nl> + <nl> + SplittedValue getRoundedYScale ( double value ) <nl> + { <nl> + using Utils : : Misc : : SizeUnit ; <nl> + <nl> + if ( value = = 0 . 0 ) return { 0 , SizeUnit : : Byte } ; <nl> + if ( value < = 12 . 0 ) return { 12 , SizeUnit : : Byte } ; <nl> + <nl> + SizeUnit calculatedUnit = SizeUnit : : Byte ; <nl> + while ( value > 1024 ) { <nl> + value / = 1024 ; <nl> + calculatedUnit = static_cast < SizeUnit > ( static_cast < int > ( calculatedUnit ) + 1 ) ; <nl> + } <nl> + <nl> + if ( value > 100 . 0 ) { <nl> + int roundedValue = static_cast < int > ( value / 40 ) * 40 ; <nl> + while ( roundedValue < value ) <nl> + roundedValue + = 40 ; <nl> + return { static_cast < double > ( roundedValue ) , calculatedUnit } ; <nl> + } <nl> + <nl> + if ( value > 10 . 0 ) { <nl> + int roundedValue = static_cast < int > ( value / 4 ) * 4 ; <nl> + while ( roundedValue < value ) <nl> + roundedValue + = 4 ; <nl> + return { static_cast < double > ( roundedValue ) , calculatedUnit } ; <nl> + } <nl> + <nl> + for ( const auto & roundedValue : roundingTable ) { <nl> + if ( value < = roundedValue ) <nl> + return { roundedValue , calculatedUnit } ; <nl> + } <nl> + return { 10 . 0 , calculatedUnit } ; <nl> + } <nl> + <nl> + QString formatLabel ( const double argValue , const Utils : : Misc : : SizeUnit unit ) <nl> + { <nl> + / / check is there need for digits after decimal separator <nl> + const int precision = ( argValue < 10 ) ? friendlyUnitPrecision ( unit ) : 0 ; <nl> + return QLocale : : system ( ) . toString ( argValue , ' f ' , precision ) <nl> + + QString : : fromUtf8 ( C_NON_BREAKING_SPACE ) <nl> + + unitString ( unit , true ) ; <nl> + } <nl> } <nl> <nl> SpeedPlotView : : Averager : : Averager ( int divider , boost : : circular_buffer < PointData > & sink ) <nl> void SpeedPlotView : : paintEvent ( QPaintEvent * ) <nl> QFontMetrics fontMetrics = painter . fontMetrics ( ) ; <nl> <nl> rect . adjust ( 4 , 4 , 0 , - 4 ) ; / / Add padding <nl> - <nl> - quint64 maxY = maxYValue ( ) ; <nl> - <nl> + const SplittedValue niceScale = getRoundedYScale ( maxYValue ( ) ) ; <nl> rect . adjust ( 0 , fontMetrics . height ( ) , 0 , 0 ) ; / / Add top padding for top speed text <nl> <nl> / / draw Y axis speed labels <nl> QVector < QString > speedLabels = { <nl> - Utils : : Misc : : friendlyUnit ( maxY , true ) , <nl> - Utils : : Misc : : friendlyUnit ( 0 . 75 * maxY , true ) , <nl> - Utils : : Misc : : friendlyUnit ( 0 . 5 * maxY , true ) , <nl> - Utils : : Misc : : friendlyUnit ( 0 . 25 * maxY , true ) , <nl> - Utils : : Misc : : friendlyUnit ( 0 , true ) <nl> + formatLabel ( niceScale . arg , niceScale . unit ) , <nl> + formatLabel ( ( 0 . 75 * niceScale . arg ) , niceScale . unit ) , <nl> + formatLabel ( ( 0 . 50 * niceScale . arg ) , niceScale . unit ) , <nl> + formatLabel ( ( 0 . 25 * niceScale . arg ) , niceScale . unit ) , <nl> + formatLabel ( 0 . 0 , niceScale . unit ) , <nl> } ; <nl> <nl> int yAxisWidth = 0 ; <nl> void SpeedPlotView : : paintEvent ( QPaintEvent * ) <nl> / / draw graphs <nl> rect . adjust ( 3 , 0 , 0 , 0 ) ; / / Need , else graphs cross left gridline <nl> <nl> - double yMultiplier = ( maxY = = 0 ) ? 0 . 0 : static_cast < double > ( rect . height ( ) ) / maxY ; <nl> - double xTickSize = static_cast < double > ( rect . width ( ) ) / m_viewablePointsCount ; <nl> + const double yMultiplier = ( niceScale . arg = = 0 . 0 ) ? 0 . 0 : ( rect . height ( ) / niceScale . sizeInBytes ( ) ) ; <nl> + const double xTickSize = static_cast < double > ( rect . width ( ) ) / m_viewablePointsCount ; <nl> <nl> boost : : circular_buffer < PointData > & queue = getCurrentData ( ) ; <nl> <nl> mmm a / src / gui / search / searchjobwidget . cpp <nl> ppp b / src / gui / search / searchjobwidget . cpp <nl> void SearchJobWidget : : updateFilter ( ) <nl> void SearchJobWidget : : fillFilterComboBoxes ( ) <nl> { <nl> using Utils : : Misc : : SizeUnit ; <nl> + using Utils : : Misc : : unitString ; <nl> + <nl> QStringList unitStrings ; <nl> unitStrings . append ( unitString ( SizeUnit : : Byte ) ) ; <nl> unitStrings . append ( unitString ( SizeUnit : : KibiByte ) ) ; <nl>
Improve scaling of speed graphs
qbittorrent/qBittorrent
fabd7e34f5c2a810b2d259d5c84955e609937dff
2018-10-21T09:45:32Z
new file mode 100644 <nl> index 0000000000 . . 6fa6d3cb46 <nl> mmm / dev / null <nl> ppp b / code / data_structures / Tree / print_postorder_traversal_from_given_inorder_and_preorder_traversals <nl> <nl> + # include < stdio . h > <nl> + # include < stdlib . h > <nl> + <nl> + <nl> + int search ( int arr [ ] , int x , int n ) <nl> + { <nl> + for ( int i = 0 ; i < n ; i + + ) <nl> + if ( arr [ i ] = = x ) <nl> + return i ; <nl> + return - 1 ; <nl> + } <nl> + <nl> + / / Prints postorder traversal from given inorder and preorder traversals <nl> + void printPostOrder ( int in [ ] , int pre [ ] , int n ) <nl> + { <nl> + / / The first element in pre [ ] is always root , search it <nl> + / / in in [ ] to find left and right subtrees <nl> + int root = search ( in , pre [ 0 ] , n ) ; <nl> + <nl> + / / If left subtree is not empty , print left subtree <nl> + if ( root ! = 0 ) <nl> + printPostOrder ( in , pre + 1 , root ) ; <nl> + <nl> + / / If right subtree is not empty , print right subtree <nl> + if ( root ! = n - 1 ) <nl> + printPostOrder ( in + root + 1 , pre + root + 1 , n - root - 1 ) ; <nl> + <nl> + <nl> + printf ( " % d " , pre [ 0 ] ) ; <nl> + } <nl> + <nl> + <nl> + int main ( ) <nl> + { <nl> + int n ; <nl> + printf ( " Enter the no . of element in the tree " ) ; <nl> + printf ( " \ n " ) ; <nl> + scanf ( " % d " , & n ) ; <nl> + int in [ 10000 ] ; <nl> + int pre [ 10000 ] ; <nl> + printf ( " Enter elements of inorder traversal seprated by single space " ) ; <nl> + printf ( " \ n " ) ; <nl> + for ( int x = 0 ; x < n ; x + + ) { <nl> + scanf ( " % d " , & in [ x ] ) ; <nl> + } <nl> + printf ( " Enter elements of preorder traversal by single space " ) ; <nl> + printf ( " \ n " ) ; <nl> + for ( int x = 0 ; x < n ; x + + ) { <nl> + scanf ( " % d " , & pre [ x ] ) ; <nl> + } <nl> + printf ( " Postorder traversal " ) ; <nl> + printf ( " \ n " ) ; <nl> + printPostOrder ( in , pre , n ) ; <nl> + printf ( " \ n " ) ; <nl> + return 0 ; <nl> + } <nl>
Create print_postorder_traversal_from_given_inorder_and_preorder_traversals
OpenGenus/cosmos
3dba7d5c34a27ad4867b91c3034b57faba61e403
2017-10-05T17:52:21Z
mmm a / modules / imgproc / src / filter . cpp <nl> ppp b / modules / imgproc / src / filter . cpp <nl> int FilterEngine : : start ( Size _wholeSize , Rect _roi , int _maxBufRows ) <nl> constBorderRow . resize ( getElemSize ( bufType ) * ( maxWidth + ksize . width - 1 + VEC_ALIGN ) ) ; <nl> uchar * dst = alignPtr ( & constBorderRow [ 0 ] , VEC_ALIGN ) , * tdst ; <nl> int n = ( int ) constBorderValue . size ( ) , N ; <nl> - if ( isSeparable ( ) ) <nl> - { <nl> - tdst = & srcRow [ 0 ] ; <nl> - N = ( maxWidth + ksize . width - 1 ) * esz ; <nl> - } <nl> - else <nl> - { <nl> - tdst = dst ; <nl> - N = maxWidth * esz ; <nl> - } <nl> + N = ( maxWidth + ksize . width - 1 ) * esz ; <nl> + tdst = isSeparable ( ) ? & srcRow [ 0 ] : dst ; <nl> <nl> for ( i = 0 ; i < N ; i + = n ) <nl> { <nl>
fixed top - right and bottom - right corners filtering in the case of constant border ( ticket )
opencv/opencv
d40320090b52a827e255b6550861e321d0d04296
2011-05-19T12:19:48Z
mmm a / BUILD <nl> ppp b / BUILD <nl> grpc_cc_library ( <nl> ] , <nl> hdrs = [ <nl> " third_party / objective_c / Cronet / bidirectional_stream_c . h " , <nl> + " src / core / ext / transport / cronet / transport / cronet_transport . h " , <nl> ] , <nl> language = " c " , <nl> public_hdrs = [ <nl> mmm a / build . yaml <nl> ppp b / build . yaml <nl> filegroups : <nl> - include / grpc / grpc_security . h <nl> - include / grpc / grpc_security_constants . h <nl> headers : <nl> + - src / core / ext / transport / cronet / transport / cronet_transport . h <nl> - third_party / objective_c / Cronet / bidirectional_stream_c . h <nl> src : <nl> - src / core / ext / transport / cronet / client / secure / cronet_channel_create . c <nl> mmm a / gRPC - Core . podspec <nl> ppp b / gRPC - Core . podspec <nl> Pod : : Spec . new do | s | <nl> <nl> s . subspec ' Cronet - Interface ' do | ss | <nl> ss . header_mappings_dir = ' include / grpc ' <nl> - ss . source_files = ' include / grpc / grpc_cronet . h ' <nl> + ss . source_files = ' include / grpc / grpc_cronet . h ' , <nl> + ' src / core / ext / transport / cronet / transport / cronet_transport . h ' <nl> end <nl> <nl> s . subspec ' Cronet - Implementation ' do | ss | <nl> mmm a / include / grpc / impl / codegen / grpc_types . h <nl> ppp b / include / grpc / impl / codegen / grpc_types . h <nl> typedef struct { <nl> # define GRPC_ARG_LB_POLICY_NAME " grpc . lb_policy_name " <nl> / * * The grpc_socket_mutator instance that set the socket options . A pointer . * / <nl> # define GRPC_ARG_SOCKET_MUTATOR " grpc . socket_mutator " <nl> + / * * If non - zero , Cronet transport will coalesce packets to fewer frames when <nl> + * possible . * / <nl> + # define GRPC_ARG_USE_CRONET_PACKET_COALESCING \ <nl> + " grpc . use_cronet_packet_coalescing " <nl> / * * \ } * / <nl> <nl> / * * Result of a grpc call . If the caller satisfies the prerequisites of a <nl> mmm a / src / core / ext / transport / cronet / client / secure / cronet_channel_create . c <nl> ppp b / src / core / ext / transport / cronet / client / secure / cronet_channel_create . c <nl> <nl> # include < grpc / support / alloc . h > <nl> # include < grpc / support / log . h > <nl> <nl> + # include " src / core / ext / transport / cronet / transport / cronet_transport . h " <nl> # include " src / core / lib / surface / channel . h " <nl> # include " src / core / lib / transport / transport_impl . h " <nl> <nl> extern grpc_transport_vtable grpc_cronet_vtable ; <nl> GRPCAPI grpc_channel * grpc_cronet_secure_channel_create ( <nl> void * engine , const char * target , const grpc_channel_args * args , <nl> void * reserved ) { <nl> - cronet_transport * ct = gpr_malloc ( sizeof ( cronet_transport ) ) ; <nl> - ct - > base . vtable = & grpc_cronet_vtable ; <nl> - ct - > engine = engine ; <nl> - ct - > host = gpr_malloc ( strlen ( target ) + 1 ) ; <nl> - strcpy ( ct - > host , target ) ; <nl> gpr_log ( GPR_DEBUG , <nl> " grpc_create_cronet_transport : stream_engine = % p , target = % s " , engine , <nl> - ct - > host ) ; <nl> + target ) ; <nl> + <nl> + grpc_transport * ct = <nl> + grpc_create_cronet_transport ( engine , target , args , reserved ) ; <nl> <nl> grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT ; <nl> return grpc_channel_create ( & exec_ctx , target , args , <nl> - GRPC_CLIENT_DIRECT_CHANNEL , ( grpc_transport * ) ct ) ; <nl> + GRPC_CLIENT_DIRECT_CHANNEL , ct ) ; <nl> } <nl> mmm a / src / core / ext / transport / cronet / transport / cronet_api_dummy . c <nl> ppp b / src / core / ext / transport / cronet / transport / cronet_api_dummy . c <nl> void bidirectional_stream_cancel ( bidirectional_stream * stream ) { <nl> GPR_ASSERT ( 0 ) ; <nl> } <nl> <nl> + void bidirectional_stream_disable_auto_flush ( bidirectional_stream * stream , <nl> + bool disable_auto_flush ) { <nl> + GPR_ASSERT ( 0 ) ; <nl> + } <nl> + <nl> + void bidirectional_stream_delay_request_headers_until_flush ( <nl> + bidirectional_stream * stream , bool delay_headers_until_flush ) { <nl> + GPR_ASSERT ( 0 ) ; <nl> + } <nl> + <nl> + void bidirectional_stream_flush ( bidirectional_stream * stream ) { GPR_ASSERT ( 0 ) ; } <nl> + <nl> # endif / * GRPC_COMPILE_WITH_CRONET * / <nl> mmm a / src / core / ext / transport / cronet / transport / cronet_transport . c <nl> ppp b / src / core / ext / transport / cronet / transport / cronet_transport . c <nl> enum e_op_id { <nl> <nl> / * Cronet callbacks . See cronet_c_for_grpc . h for documentation for each . * / <nl> <nl> - static void on_request_headers_sent ( bidirectional_stream * ) ; <nl> + static void on_stream_ready ( bidirectional_stream * ) ; <nl> static void on_response_headers_received ( <nl> bidirectional_stream * , const bidirectional_stream_header_array * , <nl> const char * ) ; <nl> static void on_succeeded ( bidirectional_stream * ) ; <nl> static void on_failed ( bidirectional_stream * , int ) ; <nl> static void on_canceled ( bidirectional_stream * ) ; <nl> static bidirectional_stream_callback cronet_callbacks = { <nl> - on_request_headers_sent , <nl> + on_stream_ready , <nl> on_response_headers_received , <nl> on_read_completed , <nl> on_write_completed , <nl> struct grpc_cronet_transport { <nl> grpc_transport base ; / * must be first element in this structure * / <nl> stream_engine * engine ; <nl> char * host ; <nl> + bool use_packet_coalescing ; <nl> } ; <nl> typedef struct grpc_cronet_transport grpc_cronet_transport ; <nl> <nl> struct op_state { <nl> bool state_callback_received [ OP_NUM_OPS ] ; <nl> bool fail_state ; <nl> bool flush_read ; <nl> + bool flush_cronet_when_ready ; <nl> + bool pending_write_for_trailer ; <nl> + bool unprocessed_send_message ; <nl> grpc_error * cancel_error ; <nl> / * data structure for storing data coming from server * / <nl> struct read_state rs ; <nl> struct op_storage { <nl> struct stream_obj { <nl> struct op_and_state * oas ; <nl> grpc_transport_stream_op * curr_op ; <nl> - grpc_cronet_transport curr_ct ; <nl> + grpc_cronet_transport * curr_ct ; <nl> grpc_stream * curr_gs ; <nl> bidirectional_stream * cbs ; <nl> bidirectional_stream_header_array header_array ; <nl> static void add_to_storage ( struct stream_obj * s , grpc_transport_stream_op * op ) { <nl> new_op - > next = storage - > head ; <nl> storage - > head = new_op ; <nl> storage - > num_pending_ops + + ; <nl> + if ( op - > send_message ) { <nl> + s - > state . unprocessed_send_message = true ; <nl> + } <nl> CRONET_LOG ( GPR_DEBUG , " adding new op % p . % d in the queue . " , new_op , <nl> storage - > num_pending_ops ) ; <nl> gpr_mu_unlock ( & s - > mu ) ; <nl> static void on_succeeded ( bidirectional_stream * stream ) { <nl> / * <nl> Cronet callback <nl> * / <nl> - static void on_request_headers_sent ( bidirectional_stream * stream ) { <nl> - CRONET_LOG ( GPR_DEBUG , " W : on_request_headers_sent ( % p ) " , stream ) ; <nl> + static void on_stream_ready ( bidirectional_stream * stream ) { <nl> + CRONET_LOG ( GPR_DEBUG , " W : on_stream_ready ( % p ) " , stream ) ; <nl> stream_obj * s = ( stream_obj * ) stream - > annotation ; <nl> + grpc_cronet_transport * t = ( grpc_cronet_transport * ) s - > curr_ct ; <nl> gpr_mu_lock ( & s - > mu ) ; <nl> s - > state . state_op_done [ OP_SEND_INITIAL_METADATA ] = true ; <nl> s - > state . state_callback_received [ OP_SEND_INITIAL_METADATA ] = true ; <nl> static void on_request_headers_sent ( bidirectional_stream * stream ) { <nl> gpr_free ( s - > header_array . headers ) ; <nl> s - > header_array . headers = NULL ; <nl> } <nl> + / * Send the initial metadata on wire if there is no SEND_MESSAGE or <nl> + * SEND_TRAILING_METADATA ops pending * / <nl> + if ( t - > use_packet_coalescing ) { <nl> + if ( s - > state . flush_cronet_when_ready ) { <nl> + CRONET_LOG ( GPR_DEBUG , " cronet_bidirectional_stream_flush ( % p ) " , s - > cbs ) ; <nl> + bidirectional_stream_flush ( stream ) ; <nl> + } <nl> + } <nl> gpr_mu_unlock ( & s - > mu ) ; <nl> execute_from_storage ( s ) ; <nl> } <nl> static void on_response_trailers_received ( <nl> CRONET_LOG ( GPR_DEBUG , " R : on_response_trailers_received ( % p , % p ) " , stream , <nl> trailers ) ; <nl> stream_obj * s = ( stream_obj * ) stream - > annotation ; <nl> + grpc_cronet_transport * t = ( grpc_cronet_transport * ) s - > curr_ct ; <nl> gpr_mu_lock ( & s - > mu ) ; <nl> memset ( & s - > state . rs . trailing_metadata , 0 , <nl> sizeof ( s - > state . rs . trailing_metadata ) ) ; <nl> static void on_response_trailers_received ( <nl> CRONET_LOG ( GPR_DEBUG , " bidirectional_stream_write ( % p , 0 ) " , s - > cbs ) ; <nl> s - > state . state_callback_received [ OP_SEND_MESSAGE ] = false ; <nl> bidirectional_stream_write ( s - > cbs , " " , 0 , true ) ; <nl> + if ( t - > use_packet_coalescing ) { <nl> + CRONET_LOG ( GPR_DEBUG , " bidirectional_stream_flush ( % p ) " , s - > cbs ) ; <nl> + bidirectional_stream_flush ( s - > cbs ) ; <nl> + } <nl> s - > state . state_op_done [ OP_SEND_TRAILING_METADATA ] = true ; <nl> <nl> gpr_mu_unlock ( & s - > mu ) ; <nl> static void convert_metadata_to_cronet_headers ( <nl> curr = curr - > next ; <nl> num_headers_available + + ; <nl> } <nl> - / * Allocate enough memory . It is freed in the on_request_headers_sent callback <nl> + / * Allocate enough memory . It is freed in the on_stream_ready callback <nl> * / <nl> bidirectional_stream_header * headers = <nl> ( bidirectional_stream_header * ) gpr_malloc ( <nl> static bool header_has_authority ( grpc_linked_mdelem * head ) { <nl> executed . This is the heart of the state machine . <nl> * / <nl> static bool op_can_be_run ( grpc_transport_stream_op * curr_op , <nl> - struct op_state * stream_state , <nl> - struct op_state * op_state , enum e_op_id op_id ) { <nl> + struct stream_obj * s , struct op_state * op_state , <nl> + enum e_op_id op_id ) { <nl> + struct op_state * stream_state = & s - > state ; <nl> + grpc_cronet_transport * t = s - > curr_ct ; <nl> bool result = true ; <nl> / * When call is canceled , every op can be run , except under following <nl> conditions <nl> static bool op_can_be_run ( grpc_transport_stream_op * curr_op , <nl> else if ( ! stream_state - > state_callback_received [ OP_SEND_INITIAL_METADATA ] ) <nl> result = false ; <nl> / * we haven ' t sent message yet * / <nl> - else if ( curr_op - > send_message & & <nl> + else if ( stream_state - > unprocessed_send_message & & <nl> ! stream_state - > state_op_done [ OP_SEND_MESSAGE ] ) <nl> result = false ; <nl> / * we haven ' t got on_write_completed for the send yet * / <nl> else if ( stream_state - > state_op_done [ OP_SEND_MESSAGE ] & & <nl> - ! stream_state - > state_callback_received [ OP_SEND_MESSAGE ] ) <nl> + ! stream_state - > state_callback_received [ OP_SEND_MESSAGE ] & & <nl> + ! ( t - > use_packet_coalescing & & <nl> + stream_state - > pending_write_for_trailer ) ) <nl> result = false ; <nl> } else if ( op_id = = OP_CANCEL_ERROR ) { <nl> / * already executed * / <nl> static enum e_op_result execute_stream_op ( grpc_exec_ctx * exec_ctx , <nl> struct op_and_state * oas ) { <nl> grpc_transport_stream_op * stream_op = & oas - > op ; <nl> struct stream_obj * s = oas - > s ; <nl> + grpc_cronet_transport * t = ( grpc_cronet_transport * ) s - > curr_ct ; <nl> struct op_state * stream_state = & s - > state ; <nl> enum e_op_result result = NO_ACTION_POSSIBLE ; <nl> if ( stream_op - > send_initial_metadata & & <nl> - op_can_be_run ( stream_op , stream_state , & oas - > state , <nl> - OP_SEND_INITIAL_METADATA ) ) { <nl> + op_can_be_run ( stream_op , s , & oas - > state , OP_SEND_INITIAL_METADATA ) ) { <nl> CRONET_LOG ( GPR_DEBUG , " running : % p OP_SEND_INITIAL_METADATA " , oas ) ; <nl> / * Start new cronet stream . It is destroyed in on_succeeded , on_canceled , <nl> * on_failed * / <nl> GPR_ASSERT ( s - > cbs = = NULL ) ; <nl> GPR_ASSERT ( ! stream_state - > state_op_done [ OP_SEND_INITIAL_METADATA ] ) ; <nl> - s - > cbs = bidirectional_stream_create ( s - > curr_ct . engine , s - > curr_gs , <nl> - & cronet_callbacks ) ; <nl> + s - > cbs = <nl> + bidirectional_stream_create ( t - > engine , s - > curr_gs , & cronet_callbacks ) ; <nl> CRONET_LOG ( GPR_DEBUG , " % p = bidirectional_stream_create ( ) " , s - > cbs ) ; <nl> + if ( t - > use_packet_coalescing ) { <nl> + bidirectional_stream_disable_auto_flush ( s - > cbs , true ) ; <nl> + bidirectional_stream_delay_request_headers_until_flush ( s - > cbs , true ) ; <nl> + } <nl> char * url = NULL ; <nl> const char * method = " POST " ; <nl> s - > header_array . headers = NULL ; <nl> convert_metadata_to_cronet_headers ( <nl> - stream_op - > send_initial_metadata - > list . head , s - > curr_ct . host , & url , <nl> + stream_op - > send_initial_metadata - > list . head , t - > host , & url , <nl> & s - > header_array . headers , & s - > header_array . count , & method ) ; <nl> s - > header_array . capacity = s - > header_array . count ; <nl> CRONET_LOG ( GPR_DEBUG , " bidirectional_stream_start ( % p , % s ) " , s - > cbs , url ) ; <nl> static enum e_op_result execute_stream_op ( grpc_exec_ctx * exec_ctx , <nl> gpr_free ( ( void * ) s - > header_array . headers [ header_index ] . value ) ; <nl> } <nl> stream_state - > state_op_done [ OP_SEND_INITIAL_METADATA ] = true ; <nl> - result = ACTION_TAKEN_WITH_CALLBACK ; <nl> - } else if ( stream_op - > recv_initial_metadata & & <nl> - op_can_be_run ( stream_op , stream_state , & oas - > state , <nl> - OP_RECV_INITIAL_METADATA ) ) { <nl> - CRONET_LOG ( GPR_DEBUG , " running : % p OP_RECV_INITIAL_METADATA " , oas ) ; <nl> - if ( stream_state - > state_op_done [ OP_CANCEL_ERROR ] ) { <nl> - grpc_closure_sched ( exec_ctx , stream_op - > recv_initial_metadata_ready , <nl> - GRPC_ERROR_NONE ) ; <nl> - } else if ( stream_state - > state_callback_received [ OP_FAILED ] ) { <nl> - grpc_closure_sched ( exec_ctx , stream_op - > recv_initial_metadata_ready , <nl> - GRPC_ERROR_NONE ) ; <nl> - } else { <nl> - grpc_chttp2_incoming_metadata_buffer_publish ( <nl> - exec_ctx , & oas - > s - > state . rs . initial_metadata , <nl> - stream_op - > recv_initial_metadata ) ; <nl> - grpc_closure_sched ( exec_ctx , stream_op - > recv_initial_metadata_ready , <nl> - GRPC_ERROR_NONE ) ; <nl> + if ( t - > use_packet_coalescing ) { <nl> + if ( ! stream_op - > send_message & & ! stream_op - > send_trailing_metadata ) { <nl> + s - > state . flush_cronet_when_ready = true ; <nl> + } <nl> } <nl> - stream_state - > state_op_done [ OP_RECV_INITIAL_METADATA ] = true ; <nl> - result = ACTION_TAKEN_NO_CALLBACK ; <nl> + result = ACTION_TAKEN_WITH_CALLBACK ; <nl> } else if ( stream_op - > send_message & & <nl> - op_can_be_run ( stream_op , stream_state , & oas - > state , <nl> - OP_SEND_MESSAGE ) ) { <nl> + op_can_be_run ( stream_op , s , & oas - > state , OP_SEND_MESSAGE ) ) { <nl> CRONET_LOG ( GPR_DEBUG , " running : % p OP_SEND_MESSAGE " , oas ) ; <nl> + stream_state - > unprocessed_send_message = false ; <nl> if ( stream_state - > state_callback_received [ OP_FAILED ] ) { <nl> result = NO_ACTION_POSSIBLE ; <nl> CRONET_LOG ( GPR_DEBUG , " Stream is either cancelled or failed . " ) ; <nl> static enum e_op_result execute_stream_op ( grpc_exec_ctx * exec_ctx , <nl> stream_state - > state_callback_received [ OP_SEND_MESSAGE ] = false ; <nl> bidirectional_stream_write ( s - > cbs , stream_state - > ws . write_buffer , <nl> ( int ) write_buffer_size , false ) ; <nl> - result = ACTION_TAKEN_WITH_CALLBACK ; <nl> + if ( t - > use_packet_coalescing ) { <nl> + if ( ! stream_op - > send_trailing_metadata ) { <nl> + CRONET_LOG ( GPR_DEBUG , " bidirectional_stream_flush ( % p ) " , s - > cbs ) ; <nl> + bidirectional_stream_flush ( s - > cbs ) ; <nl> + result = ACTION_TAKEN_WITH_CALLBACK ; <nl> + } else { <nl> + stream_state - > pending_write_for_trailer = true ; <nl> + result = ACTION_TAKEN_NO_CALLBACK ; <nl> + } <nl> + } else { <nl> + result = ACTION_TAKEN_WITH_CALLBACK ; <nl> + } <nl> } else { <nl> result = NO_ACTION_POSSIBLE ; <nl> } <nl> } <nl> stream_state - > state_op_done [ OP_SEND_MESSAGE ] = true ; <nl> oas - > state . state_op_done [ OP_SEND_MESSAGE ] = true ; <nl> + } else if ( stream_op - > send_trailing_metadata & & <nl> + op_can_be_run ( stream_op , s , & oas - > state , <nl> + OP_SEND_TRAILING_METADATA ) ) { <nl> + CRONET_LOG ( GPR_DEBUG , " running : % p OP_SEND_TRAILING_METADATA " , oas ) ; <nl> + if ( stream_state - > state_callback_received [ OP_FAILED ] ) { <nl> + result = NO_ACTION_POSSIBLE ; <nl> + CRONET_LOG ( GPR_DEBUG , " Stream is either cancelled or failed . " ) ; <nl> + } else { <nl> + CRONET_LOG ( GPR_DEBUG , " bidirectional_stream_write ( % p , 0 ) " , s - > cbs ) ; <nl> + stream_state - > state_callback_received [ OP_SEND_MESSAGE ] = false ; <nl> + bidirectional_stream_write ( s - > cbs , " " , 0 , true ) ; <nl> + if ( t - > use_packet_coalescing ) { <nl> + CRONET_LOG ( GPR_DEBUG , " bidirectional_stream_flush ( % p ) " , s - > cbs ) ; <nl> + bidirectional_stream_flush ( s - > cbs ) ; <nl> + } <nl> + result = ACTION_TAKEN_WITH_CALLBACK ; <nl> + } <nl> + stream_state - > state_op_done [ OP_SEND_TRAILING_METADATA ] = true ; <nl> + } else if ( stream_op - > recv_initial_metadata & & <nl> + op_can_be_run ( stream_op , s , & oas - > state , <nl> + OP_RECV_INITIAL_METADATA ) ) { <nl> + CRONET_LOG ( GPR_DEBUG , " running : % p OP_RECV_INITIAL_METADATA " , oas ) ; <nl> + if ( stream_state - > state_op_done [ OP_CANCEL_ERROR ] ) { <nl> + grpc_closure_sched ( exec_ctx , stream_op - > recv_initial_metadata_ready , <nl> + GRPC_ERROR_NONE ) ; <nl> + } else if ( stream_state - > state_callback_received [ OP_FAILED ] ) { <nl> + grpc_closure_sched ( exec_ctx , stream_op - > recv_initial_metadata_ready , <nl> + GRPC_ERROR_NONE ) ; <nl> + } else { <nl> + grpc_chttp2_incoming_metadata_buffer_publish ( <nl> + exec_ctx , & oas - > s - > state . rs . initial_metadata , <nl> + stream_op - > recv_initial_metadata ) ; <nl> + grpc_closure_sched ( exec_ctx , stream_op - > recv_initial_metadata_ready , <nl> + GRPC_ERROR_NONE ) ; <nl> + } <nl> + stream_state - > state_op_done [ OP_RECV_INITIAL_METADATA ] = true ; <nl> + result = ACTION_TAKEN_NO_CALLBACK ; <nl> } else if ( stream_op - > recv_message & & <nl> - op_can_be_run ( stream_op , stream_state , & oas - > state , <nl> - OP_RECV_MESSAGE ) ) { <nl> + op_can_be_run ( stream_op , s , & oas - > state , OP_RECV_MESSAGE ) ) { <nl> CRONET_LOG ( GPR_DEBUG , " running : % p OP_RECV_MESSAGE " , oas ) ; <nl> if ( stream_state - > state_op_done [ OP_CANCEL_ERROR ] ) { <nl> CRONET_LOG ( GPR_DEBUG , " Stream is cancelled . " ) ; <nl> static enum e_op_result execute_stream_op ( grpc_exec_ctx * exec_ctx , <nl> GRPC_ERROR_NONE ) ; <nl> stream_state - > state_op_done [ OP_RECV_MESSAGE ] = true ; <nl> oas - > state . state_op_done [ OP_RECV_MESSAGE ] = true ; <nl> + <nl> + / * Extra read to trigger on_succeed * / <nl> + stream_state - > rs . read_buffer = stream_state - > rs . grpc_header_bytes ; <nl> + stream_state - > rs . remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES ; <nl> + stream_state - > rs . received_bytes = 0 ; <nl> + CRONET_LOG ( GPR_DEBUG , " bidirectional_stream_read ( % p ) " , s - > cbs ) ; <nl> + stream_state - > state_op_done [ OP_READ_REQ_MADE ] = <nl> + true ; / * Indicates that at least one read request has been made * / <nl> + bidirectional_stream_read ( s - > cbs , stream_state - > rs . read_buffer , <nl> + stream_state - > rs . remaining_bytes ) ; <nl> result = ACTION_TAKEN_NO_CALLBACK ; <nl> } <nl> } else if ( stream_state - > rs . remaining_bytes = = 0 ) { <nl> static enum e_op_result execute_stream_op ( grpc_exec_ctx * exec_ctx , <nl> result = ACTION_TAKEN_NO_CALLBACK ; <nl> } <nl> } else if ( stream_op - > recv_trailing_metadata & & <nl> - op_can_be_run ( stream_op , stream_state , & oas - > state , <nl> + op_can_be_run ( stream_op , s , & oas - > state , <nl> OP_RECV_TRAILING_METADATA ) ) { <nl> CRONET_LOG ( GPR_DEBUG , " running : % p OP_RECV_TRAILING_METADATA " , oas ) ; <nl> if ( oas - > s - > state . rs . trailing_metadata_valid ) { <nl> static enum e_op_result execute_stream_op ( grpc_exec_ctx * exec_ctx , <nl> } <nl> stream_state - > state_op_done [ OP_RECV_TRAILING_METADATA ] = true ; <nl> result = ACTION_TAKEN_NO_CALLBACK ; <nl> - } else if ( stream_op - > send_trailing_metadata & & <nl> - op_can_be_run ( stream_op , stream_state , & oas - > state , <nl> - OP_SEND_TRAILING_METADATA ) ) { <nl> - CRONET_LOG ( GPR_DEBUG , " running : % p OP_SEND_TRAILING_METADATA " , oas ) ; <nl> - if ( stream_state - > state_callback_received [ OP_FAILED ] ) { <nl> - result = NO_ACTION_POSSIBLE ; <nl> - CRONET_LOG ( GPR_DEBUG , " Stream is either cancelled or failed . " ) ; <nl> - } else { <nl> - CRONET_LOG ( GPR_DEBUG , " bidirectional_stream_write ( % p , 0 ) " , s - > cbs ) ; <nl> - stream_state - > state_callback_received [ OP_SEND_MESSAGE ] = false ; <nl> - bidirectional_stream_write ( s - > cbs , " " , 0 , true ) ; <nl> - result = ACTION_TAKEN_WITH_CALLBACK ; <nl> - } <nl> - stream_state - > state_op_done [ OP_SEND_TRAILING_METADATA ] = true ; <nl> } else if ( stream_op - > cancel_error & & <nl> - op_can_be_run ( stream_op , stream_state , & oas - > state , <nl> - OP_CANCEL_ERROR ) ) { <nl> + op_can_be_run ( stream_op , s , & oas - > state , OP_CANCEL_ERROR ) ) { <nl> CRONET_LOG ( GPR_DEBUG , " running : % p OP_CANCEL_ERROR " , oas ) ; <nl> CRONET_LOG ( GPR_DEBUG , " W : bidirectional_stream_cancel ( % p ) " , s - > cbs ) ; <nl> if ( s - > cbs ) { <nl> static enum e_op_result execute_stream_op ( grpc_exec_ctx * exec_ctx , <nl> stream_state - > cancel_error = GRPC_ERROR_REF ( stream_op - > cancel_error ) ; <nl> } <nl> } else if ( stream_op - > on_complete & & <nl> - op_can_be_run ( stream_op , stream_state , & oas - > state , <nl> - OP_ON_COMPLETE ) ) { <nl> + op_can_be_run ( stream_op , s , & oas - > state , OP_ON_COMPLETE ) ) { <nl> CRONET_LOG ( GPR_DEBUG , " running : % p OP_ON_COMPLETE " , oas ) ; <nl> if ( stream_state - > state_op_done [ OP_CANCEL_ERROR ] ) { <nl> grpc_closure_sched ( exec_ctx , stream_op - > on_complete , <nl> static int init_stream ( grpc_exec_ctx * exec_ctx , grpc_transport * gt , <nl> sizeof ( s - > state . state_callback_received ) ) ; <nl> s - > state . fail_state = s - > state . flush_read = false ; <nl> s - > state . cancel_error = NULL ; <nl> + s - > state . flush_cronet_when_ready = s - > state . pending_write_for_trailer = false ; <nl> + s - > state . unprocessed_send_message = false ; <nl> + <nl> + s - > curr_gs = gs ; <nl> + s - > curr_ct = ( grpc_cronet_transport * ) gt ; <nl> + <nl> gpr_mu_init ( & s - > mu ) ; <nl> return 0 ; <nl> } <nl> static void perform_stream_op ( grpc_exec_ctx * exec_ctx , grpc_transport * gt , <nl> grpc_stream * gs , grpc_transport_stream_op * op ) { <nl> CRONET_LOG ( GPR_DEBUG , " perform_stream_op " ) ; <nl> stream_obj * s = ( stream_obj * ) gs ; <nl> - s - > curr_gs = gs ; <nl> - memcpy ( & s - > curr_ct , gt , sizeof ( grpc_cronet_transport ) ) ; <nl> add_to_storage ( s , op ) ; <nl> if ( op - > send_initial_metadata & & <nl> header_has_authority ( op - > send_initial_metadata - > list . head ) ) { <nl> static grpc_endpoint * get_endpoint ( grpc_exec_ctx * exec_ctx , <nl> static void perform_op ( grpc_exec_ctx * exec_ctx , grpc_transport * gt , <nl> grpc_transport_op * op ) { } <nl> <nl> - const grpc_transport_vtable grpc_cronet_vtable = { sizeof ( stream_obj ) , <nl> - " cronet_http " , <nl> - init_stream , <nl> - set_pollset_do_nothing , <nl> - set_pollset_set_do_nothing , <nl> - perform_stream_op , <nl> - perform_op , <nl> - destroy_stream , <nl> - destroy_transport , <nl> - get_peer , <nl> - get_endpoint } ; <nl> + static const grpc_transport_vtable grpc_cronet_vtable = { <nl> + sizeof ( stream_obj ) , <nl> + " cronet_http " , <nl> + init_stream , <nl> + set_pollset_do_nothing , <nl> + set_pollset_set_do_nothing , <nl> + perform_stream_op , <nl> + perform_op , <nl> + destroy_stream , <nl> + destroy_transport , <nl> + get_peer , <nl> + get_endpoint } ; <nl> + <nl> + grpc_transport * grpc_create_cronet_transport ( void * engine , const char * target , <nl> + const grpc_channel_args * args , <nl> + void * reserved ) { <nl> + grpc_cronet_transport * ct = gpr_malloc ( sizeof ( grpc_cronet_transport ) ) ; <nl> + if ( ! ct ) { <nl> + goto error ; <nl> + } <nl> + ct - > base . vtable = & grpc_cronet_vtable ; <nl> + ct - > engine = engine ; <nl> + ct - > host = gpr_malloc ( strlen ( target ) + 1 ) ; <nl> + if ( ! ct - > host ) { <nl> + goto error ; <nl> + } <nl> + strcpy ( ct - > host , target ) ; <nl> + <nl> + ct - > use_packet_coalescing = true ; <nl> + if ( args ) { <nl> + for ( size_t i = 0 ; i < args - > num_args ; i + + ) { <nl> + if ( 0 = = <nl> + strcmp ( args - > args [ i ] . key , GRPC_ARG_USE_CRONET_PACKET_COALESCING ) ) { <nl> + if ( args - > args [ i ] . type ! = GRPC_ARG_INTEGER ) { <nl> + gpr_log ( GPR_ERROR , " % s ignored : it must be an integer " , <nl> + GRPC_ARG_USE_CRONET_PACKET_COALESCING ) ; <nl> + } else { <nl> + ct - > use_packet_coalescing = ( args - > args [ i ] . value . integer ! = 0 ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + return & ct - > base ; <nl> + <nl> + error : <nl> + if ( ct ) { <nl> + if ( ct - > host ) { <nl> + gpr_free ( ct - > host ) ; <nl> + } <nl> + gpr_free ( ct ) ; <nl> + } <nl> + <nl> + return NULL ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 169ce31fd7d <nl> mmm / dev / null <nl> ppp b / src / core / ext / transport / cronet / transport / cronet_transport . h <nl> <nl> + / * <nl> + * <nl> + * Copyright 2016 , Google Inc . <nl> + * All rights reserved . <nl> + * <nl> + * Redistribution and use in source and binary forms , with or without <nl> + * modification , are permitted provided that the following conditions are <nl> + * met : <nl> + * <nl> + * * Redistributions of source code must retain the above copyright <nl> + * notice , this list of conditions and the following disclaimer . <nl> + * * Redistributions in binary form must reproduce the above <nl> + * copyright notice , this list of conditions and the following disclaimer <nl> + * in the documentation and / or other materials provided with the <nl> + * distribution . <nl> + * * Neither the name of Google Inc . nor the names of its <nl> + * contributors may be used to endorse or promote products derived from <nl> + * this software without specific prior written permission . <nl> + * <nl> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + * " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + * LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + * A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + * SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + * LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + * DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + * THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + * ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + * OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + * <nl> + * / <nl> + <nl> + # ifndef GRPC_CORE_EXT_TRANSPORT_CRONET_TRANSPORT_CRONET_TRANSPORT_H <nl> + # define GRPC_CORE_EXT_TRANSPORT_CRONET_TRANSPORT_CRONET_TRANSPORT_H <nl> + <nl> + # include " src / core / lib / transport / transport . h " <nl> + <nl> + grpc_transport * grpc_create_cronet_transport ( void * engine , const char * target , <nl> + const grpc_channel_args * args , <nl> + void * reserved ) ; <nl> + <nl> + # endif / * GRPC_CORE_EXT_TRANSPORT_CRONET_TRANSPORT_CRONET_TRANSPORT_H * / <nl> mmm a / src / objective - c / tests / CronetUnitTests / CronetUnitTests . m <nl> ppp b / src / objective - c / tests / CronetUnitTests / CronetUnitTests . m <nl> <nl> * / <nl> <nl> # import < XCTest / XCTest . h > <nl> - # import < sys / socket . h > <nl> # import < netinet / in . h > <nl> + # import < sys / socket . h > <nl> <nl> # import < Cronet / Cronet . h > <nl> - # import < grpc / support / host_port . h > <nl> - # import < grpc / grpc_cronet . h > <nl> # import < grpc / grpc . h > <nl> + # import < grpc / grpc_cronet . h > <nl> + # import < grpc / support / host_port . h > <nl> # import " test / core / end2end / cq_verifier . h " <nl> # import " test / core / util / port . h " <nl> <nl> <nl> # import " src / core / lib / support / env . h " <nl> # import " src / core / lib / support / string . h " <nl> # import " src / core / lib / support / tmpfile . h " <nl> + # import " test / core / end2end / data / ssl_test_data . h " <nl> # import " test / core / util / test_config . h " <nl> <nl> + # import < BoringSSL / openssl / ssl . h > <nl> + <nl> static void drain_cq ( grpc_completion_queue * cq ) { <nl> grpc_event ev ; <nl> do { <nl> - ev = grpc_completion_queue_next ( cq , grpc_timeout_seconds_to_deadline ( 5 ) , NULL ) ; <nl> + ev = grpc_completion_queue_next ( cq , grpc_timeout_seconds_to_deadline ( 5 ) , <nl> + NULL ) ; <nl> } while ( ev . type ! = GRPC_QUEUE_SHUTDOWN ) ; <nl> } <nl> <nl> - <nl> @ interface CronetUnitTests : XCTestCase <nl> <nl> @ end <nl> @ implementation CronetUnitTests <nl> + ( void ) setUp { <nl> [ super setUp ] ; <nl> <nl> - / * * * FILE * roots_file ; <nl> - size_t roots_size = strlen ( test_root_cert ) ; * / <nl> - <nl> char * argv [ ] = { " CoreCronetEnd2EndTests " } ; <nl> grpc_test_init ( 1 , argv ) ; <nl> <nl> grpc_init ( ) ; <nl> <nl> [ Cronet setHttp2Enabled : YES ] ; <nl> + [ Cronet setSslKeyLogFileName : @ " Documents / key " ] ; <nl> + [ Cronet enableTestCertVerifierForTesting ] ; <nl> NSURL * url = [ [ [ NSFileManager defaultManager ] <nl> - URLsForDirectory : NSDocumentDirectory <nl> - inDomains : NSUserDomainMask ] lastObject ] ; <nl> + URLsForDirectory : NSDocumentDirectory <nl> + inDomains : NSUserDomainMask ] lastObject ] ; <nl> NSLog ( @ " Documents directory : % @ " , url ) ; <nl> [ Cronet start ] ; <nl> [ Cronet startNetLogToFile : @ " Documents / cronet_netlog . json " logBytes : YES ] ; <nl> + <nl> + init_ssl ( ) ; <nl> } <nl> <nl> + ( void ) tearDown { <nl> grpc_shutdown ( ) ; <nl> + cleanup_ssl ( ) ; <nl> <nl> [ super tearDown ] ; <nl> } <nl> <nl> + void init_ssl ( void ) { <nl> + SSL_load_error_strings ( ) ; <nl> + OpenSSL_add_ssl_algorithms ( ) ; <nl> + } <nl> + <nl> + void cleanup_ssl ( void ) { EVP_cleanup ( ) ; } <nl> + <nl> + int alpn_cb ( SSL * ssl , const unsigned char * * out , unsigned char * outlen , <nl> + const unsigned char * in , unsigned int inlen , void * arg ) { <nl> + / / Always select " h2 " as the ALPN protocol to be used <nl> + * out = ( const unsigned char * ) " h2 " ; <nl> + * outlen = 2 ; <nl> + return SSL_TLSEXT_ERR_OK ; <nl> + } <nl> + <nl> + void init_ctx ( SSL_CTX * ctx ) { <nl> + / / Install server certificate <nl> + BIO * pem = BIO_new_mem_buf ( ( void * ) test_server1_cert , <nl> + ( int ) strlen ( test_server1_cert ) ) ; <nl> + X509 * cert = PEM_read_bio_X509_AUX ( pem , NULL , NULL , " " ) ; <nl> + SSL_CTX_use_certificate ( ctx , cert ) ; <nl> + X509_free ( cert ) ; <nl> + BIO_free ( pem ) ; <nl> + <nl> + / / Install server private key <nl> + pem = <nl> + BIO_new_mem_buf ( ( void * ) test_server1_key , ( int ) strlen ( test_server1_key ) ) ; <nl> + EVP_PKEY * key = PEM_read_bio_PrivateKey ( pem , NULL , NULL , " " ) ; <nl> + SSL_CTX_use_PrivateKey ( ctx , key ) ; <nl> + EVP_PKEY_free ( key ) ; <nl> + BIO_free ( pem ) ; <nl> + <nl> + / / Select cipher suite <nl> + SSL_CTX_set_cipher_list ( ctx , " ECDHE - RSA - AES128 - GCM - SHA256 " ) ; <nl> + <nl> + / / Select ALPN protocol <nl> + SSL_CTX_set_alpn_select_cb ( ctx , alpn_cb , NULL ) ; <nl> + } <nl> + <nl> + unsigned int parse_h2_length ( const char * field ) { <nl> + return ( ( unsigned int ) ( unsigned char ) ( field [ 0 ] ) ) * 65536 + <nl> + ( ( unsigned int ) ( unsigned char ) ( field [ 1 ] ) ) * 256 + <nl> + ( ( unsigned int ) ( unsigned char ) ( field [ 2 ] ) ) ; <nl> + } <nl> + <nl> - ( void ) testInternalError { <nl> grpc_call * c ; <nl> grpc_slice request_payload_slice = <nl> - grpc_slice_from_copied_string ( " hello world " ) ; <nl> + grpc_slice_from_copied_string ( " hello world " ) ; <nl> grpc_byte_buffer * request_payload = <nl> - grpc_raw_byte_buffer_create ( & request_payload_slice , 1 ) ; <nl> + grpc_raw_byte_buffer_create ( & request_payload_slice , 1 ) ; <nl> gpr_timespec deadline = grpc_timeout_seconds_to_deadline ( 5 ) ; <nl> - grpc_metadata meta_c [ 2 ] = { <nl> - { " key1 " , " val1 " , 4 , 0 , { { NULL , NULL , NULL , NULL } } } , <nl> - { " key2 " , " val2 " , 4 , 0 , { { NULL , NULL , NULL , NULL } } } } ; <nl> + grpc_metadata meta_c [ 2 ] = { { grpc_slice_from_static_string ( " key1 " ) , <nl> + grpc_slice_from_static_string ( " val1 " ) , <nl> + 0 , <nl> + { { NULL , NULL , NULL , NULL } } } , <nl> + { grpc_slice_from_static_string ( " key2 " ) , <nl> + grpc_slice_from_static_string ( " val2 " ) , <nl> + 0 , <nl> + { { NULL , NULL , NULL , NULL } } } } ; <nl> <nl> int port = grpc_pick_unused_port_or_die ( ) ; <nl> char * addr ; <nl> gpr_join_host_port ( & addr , " 127 . 0 . 0 . 1 " , port ) ; <nl> grpc_completion_queue * cq = grpc_completion_queue_create ( NULL ) ; <nl> - cronet_engine * cronetEngine = [ Cronet getGlobalEngine ] ; <nl> - grpc_channel * client = grpc_cronet_secure_channel_create ( cronetEngine , addr , <nl> - NULL , NULL ) ; <nl> + stream_engine * cronetEngine = [ Cronet getGlobalEngine ] ; <nl> + grpc_channel * client = <nl> + grpc_cronet_secure_channel_create ( cronetEngine , addr , NULL , NULL ) ; <nl> <nl> cq_verifier * cqv = cq_verifier_create ( cq ) ; <nl> grpc_op ops [ 6 ] ; <nl> - ( void ) testInternalError { <nl> grpc_call_details call_details ; <nl> grpc_status_code status ; <nl> grpc_call_error error ; <nl> - char * details = NULL ; <nl> - size_t details_capacity = 0 ; <nl> + grpc_slice details ; <nl> <nl> - c = grpc_channel_create_call ( <nl> - client , NULL , GRPC_PROPAGATE_DEFAULTS , cq , " / foo " , <nl> - NULL , deadline , NULL ) ; <nl> + c = grpc_channel_create_call ( client , NULL , GRPC_PROPAGATE_DEFAULTS , cq , <nl> + grpc_slice_from_static_string ( " / foo " ) , NULL , <nl> + deadline , NULL ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> grpc_metadata_array_init ( & initial_metadata_recv ) ; <nl> - ( void ) testInternalError { <nl> op - > data . recv_status_on_client . trailing_metadata = & trailing_metadata_recv ; <nl> op - > data . recv_status_on_client . status = & status ; <nl> op - > data . recv_status_on_client . status_details = & details ; <nl> - op - > data . recv_status_on_client . status_details_capacity = & details_capacity ; <nl> op - > flags = 0 ; <nl> op - > reserved = NULL ; <nl> op + + ; <nl> - error = grpc_call_start_batch ( c , ops , ( size_t ) ( op - ops ) , ( void * ) 1 , NULL ) ; <nl> + error = grpc_call_start_batch ( c , ops , ( size_t ) ( op - ops ) , ( void * ) 1 , NULL ) ; <nl> GPR_ASSERT ( GRPC_CALL_OK = = error ) ; <nl> <nl> - dispatch_async ( dispatch_get_global_queue ( DISPATCH_QUEUE_PRIORITY_DEFAULT , 0 ) , ^ { <nl> - int sl = socket ( AF_INET , SOCK_STREAM , 0 ) ; <nl> - GPR_ASSERT ( sl > = 0 ) ; <nl> - struct sockaddr_in s_addr ; <nl> - memset ( & s_addr , 0 , sizeof ( s_addr ) ) ; <nl> - s_addr . sin_family = AF_INET ; <nl> - s_addr . sin_addr . s_addr = htonl ( INADDR_ANY ) ; <nl> - s_addr . sin_port = htons ( port ) ; <nl> - bind ( sl , ( struct sockaddr * ) & s_addr , sizeof ( s_addr ) ) ; <nl> - listen ( sl , 5 ) ; <nl> - int s = accept ( sl , NULL , NULL ) ; <nl> - sleep ( 1 ) ; <nl> - close ( s ) ; <nl> - close ( sl ) ; <nl> - } ) ; <nl> - <nl> - CQ_EXPECT_COMPLETION ( cqv , ( void * ) 1 , 1 ) ; <nl> + dispatch_async ( <nl> + dispatch_get_global_queue ( DISPATCH_QUEUE_PRIORITY_DEFAULT , 0 ) , ^ { <nl> + int sl = socket ( AF_INET , SOCK_STREAM , 0 ) ; <nl> + GPR_ASSERT ( sl > = 0 ) ; <nl> + <nl> + / / Make and TCP endpoint to accept the connection <nl> + struct sockaddr_in s_addr ; <nl> + memset ( & s_addr , 0 , sizeof ( s_addr ) ) ; <nl> + s_addr . sin_family = AF_INET ; <nl> + s_addr . sin_addr . s_addr = htonl ( INADDR_ANY ) ; <nl> + s_addr . sin_port = htons ( port ) ; <nl> + GPR_ASSERT ( 0 = = bind ( sl , ( struct sockaddr * ) & s_addr , sizeof ( s_addr ) ) ) ; <nl> + GPR_ASSERT ( 0 = = listen ( sl , 5 ) ) ; <nl> + int s = accept ( sl , NULL , NULL ) ; <nl> + GPR_ASSERT ( s > = 0 ) ; <nl> + <nl> + / / Close the connection after 1 second to trigger Cronet ' s on_failed ( ) <nl> + sleep ( 1 ) ; <nl> + close ( s ) ; <nl> + close ( sl ) ; <nl> + } ) ; <nl> + <nl> + CQ_EXPECT_COMPLETION ( cqv , ( void * ) 1 , 1 ) ; <nl> cq_verify ( cqv ) ; <nl> <nl> GPR_ASSERT ( status = = GRPC_STATUS_UNAVAILABLE ) ; <nl> <nl> - gpr_free ( details ) ; <nl> + grpc_slice_unref ( details ) ; <nl> grpc_metadata_array_destroy ( & initial_metadata_recv ) ; <nl> grpc_metadata_array_destroy ( & trailing_metadata_recv ) ; <nl> grpc_metadata_array_destroy ( & request_metadata_recv ) ; <nl> - ( void ) testInternalError { <nl> <nl> grpc_byte_buffer_destroy ( request_payload ) ; <nl> grpc_byte_buffer_destroy ( response_payload_recv ) ; <nl> - <nl> + <nl> + grpc_channel_destroy ( client ) ; <nl> + grpc_completion_queue_shutdown ( cq ) ; <nl> + drain_cq ( cq ) ; <nl> + grpc_completion_queue_destroy ( cq ) ; <nl> + } <nl> + <nl> + - ( void ) packetCoalescing : ( BOOL ) useCoalescing { <nl> + grpc_arg arg ; <nl> + arg . key = GRPC_ARG_USE_CRONET_PACKET_COALESCING ; <nl> + arg . type = GRPC_ARG_INTEGER ; <nl> + arg . value . integer = useCoalescing ? 1 : 0 ; <nl> + grpc_channel_args * args = grpc_channel_args_copy_and_add ( NULL , & arg , 1 ) ; <nl> + grpc_call * c ; <nl> + grpc_slice request_payload_slice = <nl> + grpc_slice_from_copied_string ( " hello world " ) ; <nl> + grpc_byte_buffer * request_payload = <nl> + grpc_raw_byte_buffer_create ( & request_payload_slice , 1 ) ; <nl> + gpr_timespec deadline = grpc_timeout_seconds_to_deadline ( 5 ) ; <nl> + grpc_metadata meta_c [ 2 ] = { { grpc_slice_from_static_string ( " key1 " ) , <nl> + grpc_slice_from_static_string ( " val1 " ) , <nl> + 0 , <nl> + { { NULL , NULL , NULL , NULL } } } , <nl> + { grpc_slice_from_static_string ( " key2 " ) , <nl> + grpc_slice_from_static_string ( " val2 " ) , <nl> + 0 , <nl> + { { NULL , NULL , NULL , NULL } } } } ; <nl> + <nl> + int port = grpc_pick_unused_port_or_die ( ) ; <nl> + char * addr ; <nl> + gpr_join_host_port ( & addr , " 127 . 0 . 0 . 1 " , port ) ; <nl> + grpc_completion_queue * cq = grpc_completion_queue_create ( NULL ) ; <nl> + stream_engine * cronetEngine = [ Cronet getGlobalEngine ] ; <nl> + grpc_channel * client = <nl> + grpc_cronet_secure_channel_create ( cronetEngine , addr , args , NULL ) ; <nl> + <nl> + cq_verifier * cqv = cq_verifier_create ( cq ) ; <nl> + grpc_op ops [ 6 ] ; <nl> + grpc_op * op ; <nl> + grpc_metadata_array initial_metadata_recv ; <nl> + grpc_metadata_array trailing_metadata_recv ; <nl> + grpc_metadata_array request_metadata_recv ; <nl> + grpc_byte_buffer * response_payload_recv = NULL ; <nl> + grpc_call_details call_details ; <nl> + grpc_status_code status ; <nl> + grpc_call_error error ; <nl> + grpc_slice details ; <nl> + <nl> + c = grpc_channel_create_call ( client , NULL , GRPC_PROPAGATE_DEFAULTS , cq , <nl> + grpc_slice_from_static_string ( " / foo " ) , NULL , <nl> + deadline , NULL ) ; <nl> + GPR_ASSERT ( c ) ; <nl> + <nl> + grpc_metadata_array_init ( & initial_metadata_recv ) ; <nl> + grpc_metadata_array_init ( & trailing_metadata_recv ) ; <nl> + grpc_metadata_array_init ( & request_metadata_recv ) ; <nl> + grpc_call_details_init ( & call_details ) ; <nl> + <nl> + memset ( ops , 0 , sizeof ( ops ) ) ; <nl> + op = ops ; <nl> + op - > op = GRPC_OP_SEND_INITIAL_METADATA ; <nl> + op - > data . send_initial_metadata . count = 2 ; <nl> + op - > data . send_initial_metadata . metadata = meta_c ; <nl> + op - > flags = 0 ; <nl> + op - > reserved = NULL ; <nl> + op + + ; <nl> + op - > op = GRPC_OP_SEND_MESSAGE ; <nl> + op - > data . send_message . send_message = request_payload ; <nl> + op - > flags = 0 ; <nl> + op - > reserved = NULL ; <nl> + op + + ; <nl> + op - > op = GRPC_OP_SEND_CLOSE_FROM_CLIENT ; <nl> + op - > flags = 0 ; <nl> + op - > reserved = NULL ; <nl> + op + + ; <nl> + op - > op = GRPC_OP_RECV_INITIAL_METADATA ; <nl> + op - > data . recv_initial_metadata . recv_initial_metadata = & initial_metadata_recv ; <nl> + op - > flags = 0 ; <nl> + op - > reserved = NULL ; <nl> + op + + ; <nl> + op - > op = GRPC_OP_RECV_MESSAGE ; <nl> + op - > data . recv_message . recv_message = & response_payload_recv ; <nl> + op - > flags = 0 ; <nl> + op - > reserved = NULL ; <nl> + op + + ; <nl> + op - > op = GRPC_OP_RECV_STATUS_ON_CLIENT ; <nl> + op - > data . recv_status_on_client . trailing_metadata = & trailing_metadata_recv ; <nl> + op - > data . recv_status_on_client . status = & status ; <nl> + op - > data . recv_status_on_client . status_details = & details ; <nl> + op - > flags = 0 ; <nl> + op - > reserved = NULL ; <nl> + op + + ; <nl> + error = grpc_call_start_batch ( c , ops , ( size_t ) ( op - ops ) , ( void * ) 1 , NULL ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = error ) ; <nl> + <nl> + __weak XCTestExpectation * expectation = [ self expectationWithDescription : @ " Coalescing " ] ; <nl> + <nl> + dispatch_async ( <nl> + dispatch_get_global_queue ( DISPATCH_QUEUE_PRIORITY_DEFAULT , 0 ) , ^ { <nl> + int sl = socket ( AF_INET , SOCK_STREAM , 0 ) ; <nl> + GPR_ASSERT ( sl > = 0 ) ; <nl> + struct sockaddr_in s_addr ; <nl> + memset ( & s_addr , 0 , sizeof ( s_addr ) ) ; <nl> + s_addr . sin_family = AF_INET ; <nl> + s_addr . sin_addr . s_addr = htonl ( INADDR_ANY ) ; <nl> + s_addr . sin_port = htons ( port ) ; <nl> + GPR_ASSERT ( 0 = = bind ( sl , ( struct sockaddr * ) & s_addr , sizeof ( s_addr ) ) ) ; <nl> + GPR_ASSERT ( 0 = = listen ( sl , 5 ) ) ; <nl> + int s = accept ( sl , NULL , NULL ) ; <nl> + GPR_ASSERT ( s > = 0 ) ; <nl> + struct timeval tv ; <nl> + tv . tv_sec = 2 ; <nl> + tv . tv_usec = 0 ; <nl> + setsockopt ( s , SOL_SOCKET , SO_RCVTIMEO , & tv , sizeof ( tv ) ) ; <nl> + <nl> + / / Make an TLS endpoint to receive Cronet ' s transmission <nl> + SSL_CTX * ctx = SSL_CTX_new ( TLSv1_2_server_method ( ) ) ; <nl> + init_ctx ( ctx ) ; <nl> + SSL * ssl = SSL_new ( ctx ) ; <nl> + SSL_set_fd ( ssl , s ) ; <nl> + SSL_accept ( ssl ) ; <nl> + <nl> + const char magic [ ] = " PRI * HTTP / 2 . 0 \ r \ n \ r \ nSM \ r \ n \ r \ n " ; <nl> + <nl> + char buf [ 4096 ] ; <nl> + long len ; <nl> + BOOL coalesced = NO ; <nl> + while ( ( len = SSL_read ( ssl , buf , sizeof ( buf ) ) ) > 0 ) { <nl> + gpr_log ( GPR_DEBUG , " Read len : % ld " , len ) ; <nl> + <nl> + / / Analyze the HTTP / 2 frames in the same TLS PDU to identify if <nl> + / / coalescing is successful <nl> + unsigned int p = 0 ; <nl> + while ( p < len ) { <nl> + if ( len - p > = 24 & & 0 = = memcmp ( & buf [ p ] , magic , 24 ) ) { <nl> + p + = 24 ; <nl> + continue ; <nl> + } <nl> + <nl> + if ( buf [ p + 3 ] = = 0 & & / / Type is DATA <nl> + parse_h2_length ( & buf [ p ] ) = = 0x10 & & / / Length is correct <nl> + ( buf [ p + 4 ] & 1 ) ! = 0 & & / / EOS bit is set <nl> + 0 = = memcmp ( " hello world " , & buf [ p + 14 ] , <nl> + 11 ) ) { / / Message is correct <nl> + coalesced = YES ; <nl> + break ; <nl> + } <nl> + p + = ( parse_h2_length ( & buf [ p ] ) + 9 ) ; <nl> + } <nl> + if ( coalesced ) { <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + XCTAssert ( coalesced = = useCoalescing ) ; <nl> + SSL_free ( ssl ) ; <nl> + SSL_CTX_free ( ctx ) ; <nl> + close ( s ) ; <nl> + close ( sl ) ; <nl> + [ expectation fulfill ] ; <nl> + } ) ; <nl> + <nl> + CQ_EXPECT_COMPLETION ( cqv , ( void * ) 1 , 1 ) ; <nl> + cq_verify ( cqv ) ; <nl> + <nl> + grpc_slice_unref ( details ) ; <nl> + grpc_metadata_array_destroy ( & initial_metadata_recv ) ; <nl> + grpc_metadata_array_destroy ( & trailing_metadata_recv ) ; <nl> + grpc_metadata_array_destroy ( & request_metadata_recv ) ; <nl> + grpc_call_details_destroy ( & call_details ) ; <nl> + <nl> + grpc_call_destroy ( c ) ; <nl> + <nl> + cq_verifier_destroy ( cqv ) ; <nl> + <nl> + grpc_byte_buffer_destroy ( request_payload ) ; <nl> + grpc_byte_buffer_destroy ( response_payload_recv ) ; <nl> + <nl> grpc_channel_destroy ( client ) ; <nl> grpc_completion_queue_shutdown ( cq ) ; <nl> drain_cq ( cq ) ; <nl> grpc_completion_queue_destroy ( cq ) ; <nl> + <nl> + [ self waitForExpectationsWithTimeout : 4 handler : nil ] ; <nl> + } <nl> + <nl> + - ( void ) testPacketCoalescing { <nl> + [ self packetCoalescing : YES ] ; <nl> + [ self packetCoalescing : NO ] ; <nl> } <nl> <nl> @ end <nl> mmm a / src / objective - c / tests / Tests . xcodeproj / project . pbxproj <nl> ppp b / src / objective - c / tests / Tests . xcodeproj / project . pbxproj <nl> <nl> " $ ( inherited ) " , <nl> " GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS = 1 " , <nl> " GRPC_COMPILE_WITH_CRONET = 1 " , <nl> + " GRPC_CRONET_WITH_PACKET_COALESCING = 1 " , <nl> ) ; <nl> INFOPLIST_FILE = InteropTestsRemoteWithCronet / Info . plist ; <nl> IPHONEOS_DEPLOYMENT_TARGET = 9 . 3 ; <nl> mmm a / src / objective - c / tests / Tests . xcodeproj / xcshareddata / xcschemes / AllTests . xcscheme <nl> ppp b / src / objective - c / tests / Tests . xcodeproj / xcshareddata / xcschemes / AllTests . xcscheme <nl> <nl> ReferencedContainer = " container : Tests . xcodeproj " > <nl> < / BuildableReference > <nl> < / TestableReference > <nl> + < TestableReference <nl> + skipped = " NO " > <nl> + < BuildableReference <nl> + BuildableIdentifier = " primary " <nl> + BlueprintIdentifier = " 5EAD6D231E27047400002378 " <nl> + BuildableName = " CronetUnitTests . xctest " <nl> + BlueprintName = " CronetUnitTests " <nl> + ReferencedContainer = " container : Tests . xcodeproj " > <nl> + < / BuildableReference > <nl> + < / TestableReference > <nl> < / Testables > <nl> < MacroExpansion > <nl> < BuildableReference <nl> <nl> savedToolIdentifier = " " <nl> useCustomWorkingDirectory = " NO " <nl> debugDocumentVersioning = " YES " > <nl> + < MacroExpansion > <nl> + < BuildableReference <nl> + BuildableIdentifier = " primary " <nl> + BlueprintIdentifier = " 63423F431B150A5F006CF63C " <nl> + BuildableName = " AllTests . xctest " <nl> + BlueprintName = " AllTests " <nl> + ReferencedContainer = " container : Tests . xcodeproj " > <nl> + < / BuildableReference > <nl> + < / MacroExpansion > <nl> < / ProfileAction > <nl> < AnalyzeAction <nl> buildConfiguration = " Debug " > <nl> mmm a / templates / gRPC - Core . podspec . template <nl> ppp b / templates / gRPC - Core . podspec . template <nl> <nl> <nl> s . subspec ' Cronet - Interface ' do | ss | <nl> ss . header_mappings_dir = ' include / grpc ' <nl> - ss . source_files = ' include / grpc / grpc_cronet . h ' <nl> + ss . source_files = ' include / grpc / grpc_cronet . h ' , <nl> + ' src / core / ext / transport / cronet / transport / cronet_transport . h ' <nl> end <nl> <nl> s . subspec ' Cronet - Implementation ' do | ss | <nl> mmm a / test / core / end2end / gen_build_yaml . py <nl> ppp b / test / core / end2end / gen_build_yaml . py <nl> <nl> <nl> # maps test names to options <nl> END2END_TESTS = { <nl> + ' authority_not_supported ' : default_test_options , <nl> ' bad_hostname ' : default_test_options , <nl> ' binary_metadata ' : default_test_options , <nl> ' resource_quota_server ' : default_test_options . _replace ( large_writes = True , <nl> <nl> ' simple_request ' : default_test_options , <nl> ' streaming_error_response ' : default_test_options , <nl> ' trailing_metadata ' : default_test_options , <nl> - ' authority_not_supported ' : default_test_options , <nl> ' write_buffering ' : default_test_options , <nl> ' write_buffering_at_end ' : default_test_options , <nl> } <nl> mmm a / tools / codegen / core / gen_nano_proto . sh <nl> ppp b / tools / codegen / core / gen_nano_proto . sh <nl> popd <nl> <nl> # this should be the same version as the submodule we compile against <nl> # ideally we ' d update this as a template to ensure that <nl> - pip install protobuf = = 3 . 0 . 0 <nl> + pip install protobuf = = 3 . 2 . 0 <nl> <nl> pushd " $ ( dirname $ INPUT_PROTO ) " > / dev / null <nl> <nl> mmm a / tools / run_tests / generated / sources_and_headers . json <nl> ppp b / tools / run_tests / generated / sources_and_headers . json <nl> <nl> " include / grpc / grpc_cronet . h " , <nl> " include / grpc / grpc_security . h " , <nl> " include / grpc / grpc_security_constants . h " , <nl> + " src / core / ext / transport / cronet / transport / cronet_transport . h " , <nl> " third_party / objective_c / Cronet / bidirectional_stream_c . h " <nl> ] , <nl> " is_filegroup " : true , <nl> <nl> " include / grpc / grpc_security_constants . h " , <nl> " src / core / ext / transport / cronet / client / secure / cronet_channel_create . c " , <nl> " src / core / ext / transport / cronet / transport / cronet_api_dummy . c " , <nl> - " src / core / ext / transport / cronet / transport / cronet_transport . c " <nl> + " src / core / ext / transport / cronet / transport / cronet_transport . c " , <nl> + " src / core / ext / transport / cronet / transport / cronet_transport . h " <nl> ] , <nl> " third_party " : false , <nl> " type " : " filegroup " <nl> mmm a / tools / run_tests / run_microbenchmark . py <nl> ppp b / tools / run_tests / run_microbenchmark . py <nl> def collect_perf ( bm_name , args ) : <nl> ' - g ' , ' - c ' , ' 1000 ' , <nl> ' bins / mutrace / % s ' % bm_name , <nl> ' - - benchmark_filter = ^ % s $ ' % line , <nl> - ' - - benchmark_min_time = 20 ' ] ) <nl> + ' - - benchmark_min_time = 10 ' ] ) <nl> env = os . environ . copy ( ) <nl> env . update ( { <nl> ' PERF_BASE_NAME ' : fnize ( line ) , <nl> def collect_perf ( bm_name , args ) : <nl> } ) <nl> subprocess . check_call ( [ ' tools / run_tests / performance / process_local_perf_flamegraphs . sh ' ] , <nl> env = env ) <nl> + subprocess . check_call ( [ ' rm ' , ' % s - perf . data ' % fnize ( line ) ] ) <nl> + subprocess . check_call ( [ ' rm ' , ' % s - out . perf ' % fnize ( line ) ] ) <nl> <nl> def collect_summary ( bm_name , args ) : <nl> heading ( ' Summary : % s ' % bm_name ) <nl>
Merge github . com : grpc / grpc into bm_cq
grpc/grpc
ad3b200e690f703cfa4fe9fea6603f708420ed0f
2017-02-15T16:33:30Z
mmm a / hphp / runtime / ext / hsl / ext_hsl_os . cpp <nl> ppp b / hphp / runtime / ext / hsl / ext_hsl_os . cpp <nl> struct HSLFileDescriptor { <nl> } ; <nl> <nl> static Object newInstance ( int fd ) { <nl> + / / Callers should check for an invalid FD before trying to construct a <nl> + / / FileDescriptor object , but don ' t trust them . <nl> + if ( fd < 0 ) { <nl> + SystemLib : : throwErrorObject ( <nl> + " Asked to create a negative FileDescriptor instance ; this indicates " <nl> + " a bug in HHVM or a misbehaving CLI client . " <nl> + ) ; <nl> + } <nl> assertx ( s_FileDescriptorClass ) ; <nl> Object obj { s_FileDescriptorClass } ; <nl> <nl> T hsl_cli_unwrap ( CLISrvResult < T , int > res ) { <nl> throw_errno_exception ( res . error ( ) ) ; <nl> } <nl> <nl> + # define HSL_CLI_INVOKE ( . . . ) hsl_cli_unwrap ( INVOKE_ON_CLI_CLIENT ( __VA_ARGS__ ) ) <nl> + <nl> CLISrvResult < ReturnedFdData , int > <nl> CLI_CLIENT_HANDLER ( HSL_os_open , std : : string path , int64_t flags , int64_t mode ) { <nl> auto const fd = [ & ] { <nl> CLI_CLIENT_HANDLER ( HSL_os_open , std : : string path , int64_t flags , int64_t mode ) { <nl> } <nl> <nl> Object HHVM_FUNCTION ( HSL_os_open , const String & path , int64_t flags , int64_t mode ) { <nl> - int fd = hsl_cli_unwrap ( INVOKE_ON_CLI_CLIENT ( <nl> + int fd = HSL_CLI_INVOKE ( <nl> HSL_os_open , <nl> path . toCppString ( ) , <nl> flags , <nl> mode <nl> - ) ) . fd ; <nl> - assertx ( fd > = 0 ) ; <nl> + ) . fd ; <nl> return HSLFileDescriptor : : newInstance ( fd ) ; <nl> } <nl> <nl> Object HHVM_FUNCTION ( HSL_os_socket , int64_t domain , int64_t type , int64_t protoc <nl> / / - some operations are / used to be privileged ( e . g . raw sockets ) <nl> / / - linux allows setting some socket options via type , which may be privileged <nl> / / . . . so do the syscall on the client <nl> - int fd = hsl_cli_unwrap ( INVOKE_ON_CLI_CLIENT ( <nl> + int fd = HSL_CLI_INVOKE ( <nl> HSL_os_socket , <nl> domain , <nl> type , <nl> protocol <nl> - ) ) . fd ; <nl> - assertx ( fd > = 0 ) ; <nl> + ) . fd ; <nl> return HSLFileDescriptor : : newInstance ( fd ) ; <nl> } <nl> <nl> void HHVM_FUNCTION ( HSL_os_ # # fun , const Object & fd , const Object & hsl_sockaddr ) <nl> socklen_t ss_len ; \ <nl> native_sockaddr_from_hsl ( hsl_sockaddr , ss , ss_len ) ; \ <nl> \ <nl> - hsl_cli_unwrap ( INVOKE_ON_CLI_CLIENT ( \ <nl> + HSL_CLI_INVOKE ( \ <nl> HSL_os_ # # fun , \ <nl> LoanedFdData { HSLFileDescriptor : : fd ( fd ) } , \ <nl> ss , \ <nl> static_cast < int64_t > ( ss_len ) \ <nl> - ) ) ; \ <nl> + ) ; \ <nl> } <nl> <nl> IMPL ( connect ) ; <nl> CLISrvResult < int , int > CLI_CLIENT_HANDLER ( HSL_os_listen , <nl> } <nl> <nl> void HHVM_FUNCTION ( HSL_os_listen , const Object & fd , int64_t backlog ) { <nl> - hsl_cli_unwrap ( INVOKE_ON_CLI_CLIENT ( <nl> + HSL_CLI_INVOKE ( <nl> HSL_os_listen , <nl> LoanedFdData { HSLFileDescriptor : : fd ( fd ) } , <nl> backlog <nl> - ) ) ; <nl> + ) ; <nl> } <nl> <nl> <nl> Variant HHVM_FUNCTION ( HSL_os_fcntl , <nl> " Argument for specific fcntl operation must be an int " <nl> ) ; <nl> } <nl> - return hsl_cli_unwrap ( INVOKE_ON_CLI_CLIENT ( <nl> + return HSL_CLI_INVOKE ( <nl> HSL_os_fcntl_intarg , <nl> LoanedFdData { fd } , <nl> cmd , <nl> arg . toInt64 ( ) <nl> - ) ) ; <nl> + ) ; <nl> default : <nl> throw_errno_exception ( <nl> ENOTSUP , <nl> Object HHVM_FUNCTION ( HSL_os_poll_async , <nl> } <nl> } <nl> <nl> + # undef HSL_CLI_INVOKE <nl> + <nl> struct OSExtension final : Extension { <nl> <nl> OSExtension ( ) : Extension ( " hsl_os " , " 0 . 1 " ) { } <nl>
Reduce copypasta in ext_hsl_os
facebook/hhvm
fb69ddef434a3a46a32c411f24e53a7be5ab4242
2020-04-16T16:44:22Z
mmm a / src / yuzu / applets / profile_select . cpp <nl> ppp b / src / yuzu / applets / profile_select . cpp <nl> QtProfileSelectionDialog : : QtProfileSelectionDialog ( QWidget * parent ) <nl> QtProfileSelectionDialog : : ~ QtProfileSelectionDialog ( ) = default ; <nl> <nl> void QtProfileSelectionDialog : : accept ( ) { <nl> - ok = true ; <nl> QDialog : : accept ( ) ; <nl> } <nl> <nl> void QtProfileSelectionDialog : : reject ( ) { <nl> - ok = false ; <nl> user_index = 0 ; <nl> QDialog : : reject ( ) ; <nl> } <nl> <nl> - bool QtProfileSelectionDialog : : GetStatus ( ) const { <nl> - return ok ; <nl> - } <nl> - <nl> - u32 QtProfileSelectionDialog : : GetIndex ( ) const { <nl> + int QtProfileSelectionDialog : : GetIndex ( ) const { <nl> return user_index ; <nl> } <nl> <nl> mmm a / src / yuzu / applets / profile_select . h <nl> ppp b / src / yuzu / applets / profile_select . h <nl> class QtProfileSelectionDialog final : public QDialog { <nl> void accept ( ) override ; <nl> void reject ( ) override ; <nl> <nl> - bool GetStatus ( ) const ; <nl> - u32 GetIndex ( ) const ; <nl> + int GetIndex ( ) const ; <nl> <nl> private : <nl> - bool ok = false ; <nl> - u32 user_index = 0 ; <nl> - <nl> void SelectUser ( const QModelIndex & index ) ; <nl> <nl> + int user_index = 0 ; <nl> + <nl> QVBoxLayout * layout ; <nl> QTreeView * tree_view ; <nl> QStandardItemModel * item_model ; <nl> mmm a / src / yuzu / applets / software_keyboard . cpp <nl> ppp b / src / yuzu / applets / software_keyboard . cpp <nl> QtSoftwareKeyboardDialog : : QtSoftwareKeyboardDialog ( <nl> QtSoftwareKeyboardDialog : : ~ QtSoftwareKeyboardDialog ( ) = default ; <nl> <nl> void QtSoftwareKeyboardDialog : : accept ( ) { <nl> - ok = true ; <nl> text = line_edit - > text ( ) . toStdU16String ( ) ; <nl> QDialog : : accept ( ) ; <nl> } <nl> <nl> void QtSoftwareKeyboardDialog : : reject ( ) { <nl> - ok = false ; <nl> text . clear ( ) ; <nl> QDialog : : reject ( ) ; <nl> } <nl> std : : u16string QtSoftwareKeyboardDialog : : GetText ( ) const { <nl> return text ; <nl> } <nl> <nl> - bool QtSoftwareKeyboardDialog : : GetStatus ( ) const { <nl> - return ok ; <nl> - } <nl> - <nl> QtSoftwareKeyboard : : QtSoftwareKeyboard ( GMainWindow & main_window ) { <nl> connect ( this , & QtSoftwareKeyboard : : MainWindowGetText , & main_window , <nl> & GMainWindow : : SoftwareKeyboardGetText , Qt : : QueuedConnection ) ; <nl> mmm a / src / yuzu / applets / software_keyboard . h <nl> ppp b / src / yuzu / applets / software_keyboard . h <nl> class QtSoftwareKeyboardDialog final : public QDialog { <nl> void reject ( ) override ; <nl> <nl> std : : u16string GetText ( ) const ; <nl> - bool GetStatus ( ) const ; <nl> <nl> private : <nl> - bool ok = false ; <nl> std : : u16string text ; <nl> <nl> QDialogButtonBox * buttons ; <nl> mmm a / src / yuzu / main . cpp <nl> ppp b / src / yuzu / main . cpp <nl> void GMainWindow : : ProfileSelectorSelectProfile ( ) { <nl> dialog . setWindowFlags ( Qt : : Dialog | Qt : : CustomizeWindowHint | Qt : : WindowTitleHint | <nl> Qt : : WindowSystemMenuHint | Qt : : WindowCloseButtonHint ) ; <nl> dialog . setWindowModality ( Qt : : WindowModal ) ; <nl> - dialog . exec ( ) ; <nl> - <nl> - if ( ! dialog . GetStatus ( ) ) { <nl> + if ( dialog . exec ( ) = = QDialog : : Rejected ) { <nl> emit ProfileSelectorFinishedSelection ( std : : nullopt ) ; <nl> return ; <nl> } <nl> <nl> Service : : Account : : ProfileManager manager ; <nl> - const auto uuid = manager . GetUser ( dialog . GetIndex ( ) ) ; <nl> + const auto uuid = manager . GetUser ( static_cast < std : : size_t > ( dialog . GetIndex ( ) ) ) ; <nl> if ( ! uuid . has_value ( ) ) { <nl> emit ProfileSelectorFinishedSelection ( std : : nullopt ) ; <nl> return ; <nl> void GMainWindow : : SoftwareKeyboardGetText ( <nl> dialog . setWindowFlags ( Qt : : Dialog | Qt : : CustomizeWindowHint | Qt : : WindowTitleHint | <nl> Qt : : WindowSystemMenuHint | Qt : : WindowCloseButtonHint ) ; <nl> dialog . setWindowModality ( Qt : : WindowModal ) ; <nl> - dialog . exec ( ) ; <nl> <nl> - if ( ! dialog . GetStatus ( ) ) { <nl> + if ( dialog . exec ( ) = = QDialog : : Rejected ) { <nl> emit SoftwareKeyboardFinishedText ( std : : nullopt ) ; <nl> return ; <nl> } <nl> void GMainWindow : : SelectAndSetCurrentUser ( ) { <nl> dialog . setWindowFlags ( Qt : : Dialog | Qt : : CustomizeWindowHint | Qt : : WindowTitleHint | <nl> Qt : : WindowSystemMenuHint | Qt : : WindowCloseButtonHint ) ; <nl> dialog . setWindowModality ( Qt : : WindowModal ) ; <nl> - dialog . exec ( ) ; <nl> <nl> - if ( dialog . GetStatus ( ) ) { <nl> - Settings : : values . current_user = static_cast < s32 > ( dialog . GetIndex ( ) ) ; <nl> + if ( dialog . exec ( ) = = QDialog : : Rejected ) { <nl> + return ; <nl> } <nl> + <nl> + Settings : : values . current_user = dialog . GetIndex ( ) ; <nl> } <nl> <nl> void GMainWindow : : BootGame ( const QString & filename ) { <nl> void GMainWindow : : OnGameListOpenFolder ( u64 program_id , GameListOpenTarget target <nl> const std : : string nand_dir = FileUtil : : GetUserPath ( FileUtil : : UserPath : : NANDDir ) ; <nl> ASSERT ( program_id ! = 0 ) ; <nl> <nl> - const auto select_profile = [ this ] ( ) - > s32 { <nl> + const auto select_profile = [ this ] { <nl> QtProfileSelectionDialog dialog ( this ) ; <nl> dialog . setWindowFlags ( Qt : : Dialog | Qt : : CustomizeWindowHint | Qt : : WindowTitleHint | <nl> Qt : : WindowSystemMenuHint | Qt : : WindowCloseButtonHint ) ; <nl> dialog . setWindowModality ( Qt : : WindowModal ) ; <nl> - dialog . exec ( ) ; <nl> <nl> - if ( ! dialog . GetStatus ( ) ) { <nl> + if ( dialog . exec ( ) = = QDialog : : Rejected ) { <nl> return - 1 ; <nl> } <nl> <nl> void GMainWindow : : OnGameListOpenFolder ( u64 program_id , GameListOpenTarget target <nl> } ; <nl> <nl> const auto index = select_profile ( ) ; <nl> - if ( index = = - 1 ) <nl> + if ( index = = - 1 ) { <nl> return ; <nl> + } <nl> <nl> Service : : Account : : ProfileManager manager ; <nl> - const auto user_id = manager . GetUser ( index ) ; <nl> + const auto user_id = manager . GetUser ( static_cast < std : : size_t > ( index ) ) ; <nl> ASSERT ( user_id ) ; <nl> path = nand_dir + FileSys : : SaveDataFactory : : GetFullPath ( FileSys : : SaveDataSpaceId : : NandUser , <nl> FileSys : : SaveDataType : : SaveData , <nl>
Merge pull request from lioncash / index
yuzu-emu/yuzu
1eb979221f6664938456cefdbd5c79b7e59e2b8c
2019-06-05T19:30:51Z