diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / samples / TestJavascript / cocos2d - html5 - tests <nl> ppp b / samples / TestJavascript / cocos2d - html5 - tests <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 77f6d102a842cc83693d9f7c8f3e4cdcd79c0f81 <nl> + Subproject commit 67eb718deb8e9bab28033218ac65ebc7f768daae <nl> | Merge pull request from dumganhar / iss1549 - js - test | cocos2d/cocos2d-x | 8d132699506d5d1b684f4063369eb67af760bf60 | 2012-11-05T11:00:30Z |
mmm a / Manual / Manual_How_to_debug . ipynb <nl> ppp b / Manual / Manual_How_to_debug . ipynb <nl> <nl> " import cntk as C \ n " , <nl> " import numpy as np \ n " , <nl> " import scipy . sparse as sparse \ n " , <nl> - " import sys " <nl> + " import sys \ n " , <nl> + " import cntk . tests . test_utils \ n " , <nl> + " cntk . tests . test_utils . set_device_from_pytest_env ( ) # ( only needed for our build system ) \ n " <nl> ] <nl> } , <nl> { <nl> mmm a / Tests / EndToEndTests / CNTKv2Python / Manual / Manual_How_to_debug_test . py <nl> ppp b / Tests / EndToEndTests / CNTKv2Python / Manual / Manual_How_to_debug_test . py <nl> <nl> abs_path = os . path . dirname ( os . path . abspath ( __file__ ) ) <nl> <nl> notebook = os . path . join ( abs_path , " . . " , " . . " , " . . " , " . . " , " Manual " , " Manual_How_to_debug . ipynb " ) <nl> + notebook_deviceIdsToRun = [ 0 ] <nl> <nl> # Note : Given this is a manual for debugging , we check only for functional correctness of API . <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 284dfdff99b <nl> mmm / dev / null <nl> ppp b / Tests / EndToEndTests / CNTKv2Python / Manual / baseline . txt <nl> @ @ - 0 , 0 + 1 @ @ <nl> + __COMPLETED__ <nl> new file mode 100644 <nl> index 00000000000 . . 40ef8672996 <nl> mmm / dev / null <nl> ppp b / Tests / EndToEndTests / CNTKv2Python / Manual / run - test <nl> <nl> + # ! / bin / bash <nl> + <nl> + . $ TEST_ROOT_DIR / run - test - common <nl> + <nl> + py . test - - verbose - - deviceid $ TEST_DEVICE - - is1bitsgd $ TEST_1BIT_SGD <nl> + <nl> + if [ " $ ? " - eq " 0 " ] ; then <nl> + echo " __COMPLETED__ " <nl> + fi <nl> new file mode 100644 <nl> index 00000000000 . . 86c3aa9c9e5 <nl> mmm / dev / null <nl> ppp b / Tests / EndToEndTests / CNTKv2Python / Manual / testcases . yml <nl> <nl> + isPythonTest : True <nl> + <nl> + dataDir : . <nl> + <nl> + tags : <nl> + # Python only in Release builds . <nl> + - bvt - j ( build_sku = = ' 1bitsgd ' ) and ( flavor = = ' release ' ) <nl> + - nightly - j ( build_sku = = ' 1bitsgd ' ) and ( flavor = = ' release ' ) <nl> + - weekly - j ( build_sku = = ' 1bitsgd ' ) and ( flavor = = ' release ' ) <nl> + <nl> + testCases : <nl> + Run must finish with error code 0 ( outputs __COMPLETED__ in that case ) : <nl> + patterns : <nl> + - __COMPLETED__ <nl> mmm a / Tests / EndToEndTests / Downloads / run - test <nl> ppp b / Tests / EndToEndTests / Downloads / run - test <nl> <nl> # ! / bin / bash <nl> <nl> + python - c " import sys ; sys . exit ( not sys . version_info [ 0 : 2 ] in [ ( 2 , 7 ) , ( 3 , 4 ) ] ) " | | { <nl> + echo Running this test only for Python 2 . 7 or 3 . 4 to save time . <nl> + echo __COMPLETED__ <nl> + exit 0 <nl> + } <nl> + <nl> cd . . / . . / . . <nl> <nl> downloaders = ( <nl> | Integrate mahilleb / MissingTestCaseDefinition into master | microsoft/CNTK | be3c0feca6791cbb1497a751ea090d2e505fda67 | 2017-07-05T19:40:36Z |
mmm a / android / sdk / src / main / java / com / taobao / weex / ui / component / list / BasicListComponent . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / component / list / BasicListComponent . java <nl> public void onLoadMore ( int offScreenY ) { <nl> if ( TextUtils . isEmpty ( offset ) ) { <nl> offset = " 0 " ; <nl> } <nl> - float offsetParsed = WXViewUtils . getRealPxByWidth ( Integer . parseInt ( offset ) , getInstance ( ) . getInstanceViewPortWidth ( ) ) ; <nl> + <nl> + <nl> + float offsetParsed = WXViewUtils . getRealPxByWidth ( WXUtils . getInt ( offset ) , getInstance ( ) . getInstanceViewPortWidth ( ) ) ; <nl> <nl> if ( offScreenY < = offsetParsed & & getEvents ( ) . contains ( Constants . Event . LOADMORE ) ) { <nl> if ( mListCellCount ! = mChildren . size ( ) <nl> | [ WEEX ] [ Android ] When LoadMore Offset Is Float , Exception Happen LOADMORE Function Loss ( ) | apache/incubator-weex | 23079b14b36f9ef45536f624a5a3158a0da1164f | 2019-01-22T11:47:32Z |
mmm a / atom / browser / native_window_views . cc <nl> ppp b / atom / browser / native_window_views . cc <nl> int kWindowsCreated = 0 ; <nl> bool ShouldUseGlobalMenuBar ( ) { <nl> / / Some DE would pretend to be Unity but don ' t have global application menu , <nl> / / so we can not trust unity : : IsRunning ( ) . <nl> + / / When Unity ' s GlobalMenu is running $ UBUNTU_MENUPROXY should be set to <nl> + / / something like " libappmenu . so " . <nl> scoped_ptr < base : : Environment > env ( base : : Environment : : Create ( ) ) ; <nl> - return unity : : IsRunning ( ) & & ( base : : nix : : GetDesktopEnvironment ( env . get ( ) ) = = <nl> - base : : nix : : DESKTOP_ENVIRONMENT_UNITY ) ; <nl> + std : : string name ; <nl> + return env & & env - > GetVar ( " UBUNTU_MENUPROXY " , & name ) & & <nl> + ! name . empty ( ) & & name ! = " 0 " ; <nl> } <nl> # endif <nl> <nl> | linux : GlobalMenu : only if UBUNTU_MENUPROXY is set | electron/electron | 7cd4d35778ff484558ae649a96a3a945914d431d | 2014-10-12T19:16:32Z |
mmm a / aten / src / ATen / cpu / vec256 / vec256_int . h <nl> ppp b / aten / src / ATen / cpu / vec256 / vec256_int . h <nl> struct Vec256 < int32_t > : public Vec256i { <nl> std : : memcpy ( ptr , tmp_values , count * sizeof ( int32_t ) ) ; <nl> } <nl> } <nl> + void dump ( ) const { <nl> + for ( size_t i = 0 ; i < size ( ) ; + + i ) { <nl> + std : : cout < < ( int ) ( ( value_type * ) & values ) [ i ] < < " " ; <nl> + } <nl> + std : : cout < < std : : endl ; <nl> + } <nl> const int32_t & operator [ ] ( int idx ) const = delete ; <nl> int32_t & operator [ ] ( int idx ) = delete ; <nl> Vec256 < int32_t > abs ( ) const { <nl> Vec256 < int16_t > inline clamp_min ( const Vec256 < int16_t > & a , const Vec256 < int16_t > <nl> return _mm256_max_epi16 ( min_val , a ) ; <nl> } <nl> <nl> + template < typename T > <nl> + Vec256 < int32_t > inline convert_to_int32 ( const T * ptr ) { <nl> + return Vec256 < int32_t > : : loadu ( ptr ) ; <nl> + } <nl> + <nl> + template < > <nl> + Vec256 < int32_t > inline convert_to_int32 < int8_t > ( const int8_t * ptr ) { <nl> + return _mm256_cvtepi8_epi32 ( _mm_loadl_epi64 ( reinterpret_cast < const __m128i * > ( ptr ) ) ) ; <nl> + } <nl> + <nl> + template < > <nl> + Vec256 < int32_t > inline convert_to_int32 < uint8_t > ( const uint8_t * ptr ) { <nl> + return _mm256_cvtepu8_epi32 ( _mm_loadl_epi64 ( reinterpret_cast < const __m128i * > ( ptr ) ) ) ; <nl> + } <nl> + <nl> template < typename T > <nl> Vec256 < T > inline intdiv_256 ( const Vec256 < T > & a , const Vec256 < T > & b ) { <nl> T values_a [ Vec256 < T > : : size ( ) ] ; <nl> mmm a / aten / src / ATen / native / native_functions . yaml <nl> ppp b / aten / src / ATen / native / native_functions . yaml <nl> <nl> CPU : adaptive_avg_pool2d_out_cpu <nl> CUDA : adaptive_avg_pool2d_out_cuda <nl> MkldnnCPU : mkldnn_adaptive_avg_pool2d_out <nl> - QuantizedCPU : quantized_adaptive_avg_pool2d_out <nl> <nl> - func : adaptive_avg_pool2d ( Tensor self , int [ 2 ] output_size ) - > Tensor <nl> use_c10_dispatcher : True <nl> <nl> CPU : avg_pool2d_cpu <nl> CUDA : avg_pool2d_cuda <nl> MkldnnCPU : mkldnn_avg_pool2d <nl> + QuantizedCPU : quantized_avg_pool2d <nl> <nl> - func : avg_pool2d_backward . grad_input ( Tensor grad_output , Tensor self , int [ 2 ] kernel_size , int [ 2 ] stride , int [ 2 ] padding , bool ceil_mode , bool count_include_pad , int ? divisor_override , * , Tensor ( a ! ) grad_input ) - > Tensor ( a ! ) <nl> python_module : nn <nl> mmm a / aten / src / ATen / native / quantized / cpu / kernels / QuantizedOpKernels . cpp <nl> ppp b / aten / src / ATen / native / quantized / cpu / kernels / QuantizedOpKernels . cpp <nl> void qadd_kernel ( Tensor & out , const Tensor & self , const Tensor & other ) { <nl> } ) ; <nl> } <nl> <nl> - void qmaxpool_2d_nhwc_kernel ( const Tensor & qx , <nl> - int64_t iC , / / input / output channels <nl> - int64_t iH , <nl> - int64_t iW , / / input sizes <nl> - int64_t oH , <nl> - int64_t oW , / / output sizes <nl> - int64_t kH , <nl> - int64_t kW , / / kernel size <nl> - int64_t sH , <nl> - int64_t sW , / / strides <nl> - int64_t pH , <nl> - int64_t pW , / / padding <nl> - int64_t dH , <nl> - int64_t dW , / / dilation <nl> - Tensor & qy ) { <nl> + void qmaxpool_2d_nhwc_kernel ( <nl> + const Tensor & qx , <nl> + int64_t iC , / / input / output channels <nl> + int64_t iH , <nl> + int64_t iW , / / input sizes <nl> + int64_t oH , <nl> + int64_t oW , / / output sizes <nl> + int64_t kH , <nl> + int64_t kW , / / kernel size <nl> + int64_t sH , <nl> + int64_t sW , / / strides <nl> + int64_t pH , <nl> + int64_t pW , / / padding <nl> + int64_t dH , <nl> + int64_t dW , / / dilation <nl> + Tensor & qy ) { <nl> AT_DISPATCH_QINT_TYPES ( qx . scalar_type ( ) , " max_pool2d_nhwc " , [ & ] ( ) { <nl> - scalar_t * idata = static_cast < scalar_t * > ( qx . data_ptr ( ) ) ; <nl> - scalar_t * odata = static_cast < scalar_t * > ( qy . data_ptr ( ) ) ; <nl> + scalar_t * idata = static_cast < scalar_t * > ( qx . data_ptr ( ) ) ; <nl> + scalar_t * odata = static_cast < scalar_t * > ( qy . data_ptr ( ) ) ; <nl> <nl> / / Loop over N <nl> for ( int64_t b = 0 ; b < qx . size ( 0 ) ; + + b ) { <nl> / / Loop over H <nl> - auto * i_p = reinterpret_cast < scalar_t : : underlying * > ( idata + b * iW * iH * iC ) ; <nl> + auto * i_p = <nl> + reinterpret_cast < scalar_t : : underlying * > ( idata + b * iW * iH * iC ) ; <nl> for ( int64_t row = 0 ; row < oH ; + + row ) { <nl> / / Loop over W <nl> for ( int64_t col = 0 ; col < oW ; + + col ) { <nl> / / Pointer to output data for this specific N , H , W position <nl> - auto * o_p = reinterpret_cast < scalar_t : : underlying * > ( odata + b * oH * oW * iC + row * oW * iC + col * iC ) ; <nl> + auto * o_p = reinterpret_cast < scalar_t : : underlying * > ( <nl> + odata + b * oH * oW * iC + row * oW * iC + col * iC ) ; <nl> <nl> / / Loop over reduction block <nl> int64_t h_start = row * sH - pH ; <nl> void qmaxpool_2d_nhwc_kernel ( const Tensor & qx , <nl> <nl> / / Interleaved vector loop 4x <nl> constexpr auto vec_width = Vec256 < scalar_t > : : size ( ) ; <nl> - for ( ; c + 4 * vec_width < = iC ; c + = 4 * vec_width ) { <nl> - Vec256 < scalar_t > acc { scalar_t ( std : : numeric_limits < scalar_t : : underlying > : : lowest ( ) ) } ; <nl> + for ( ; c + 4 * vec_width < = iC ; c + = 4 * vec_width ) { <nl> + Vec256 < scalar_t > acc { <nl> + scalar_t ( std : : numeric_limits < scalar_t : : underlying > : : lowest ( ) ) } ; <nl> Vec256 < scalar_t > accs [ 4 ] = { acc , acc , acc , acc } ; <nl> int64_t tcntr = 0 ; <nl> int64_t x , y ; <nl> void qmaxpool_2d_nhwc_kernel ( const Tensor & qx , <nl> for ( x = w_start ; x < w_end ; x + = dW ) { <nl> for ( int i = 0 ; i < 4 ; + + i ) { <nl> tcntr = y * iW + x ; <nl> - auto vals = Vec256 < scalar_t > : : loadu ( i_p + tcntr * iC + c + Vec256 < scalar_t > : : size ( ) * i ) ; <nl> + auto vals = Vec256 < scalar_t > : : loadu ( <nl> + i_p + tcntr * iC + c + Vec256 < scalar_t > : : size ( ) * i ) ; <nl> accs [ i ] = vec256 : : maximum ( accs [ i ] , vals ) ; <nl> } <nl> - } / / for x <nl> - } / / for y <nl> + } / / for x <nl> + } / / for y <nl> for ( int i = 0 ; i < 4 ; + + i ) { <nl> accs [ i ] . store ( o_p + c + Vec256 < scalar_t > : : size ( ) * i ) ; <nl> } <nl> - } / / for c <nl> + } / / for c <nl> <nl> / / Vector loop <nl> - for ( ; c + vec_width < = iC ; c + = vec_width ) { <nl> - Vec256 < scalar_t > acc { scalar_t ( std : : numeric_limits < scalar_t : : underlying > : : lowest ( ) ) } ; <nl> + for ( ; c + vec_width < = iC ; c + = vec_width ) { <nl> + Vec256 < scalar_t > acc { <nl> + scalar_t ( std : : numeric_limits < scalar_t : : underlying > : : lowest ( ) ) } ; <nl> int64_t tcntr = 0 ; <nl> int64_t x , y ; <nl> for ( y = h_start ; y < h_end ; y + = dH ) { <nl> void qmaxpool_2d_nhwc_kernel ( const Tensor & qx , <nl> tcntr = y * iW + x ; <nl> auto vals = Vec256 < scalar_t > : : loadu ( i_p + tcntr * iC + c ) ; <nl> acc = vec256 : : maximum ( acc , vals ) ; <nl> - } / / for x <nl> - } / / for y <nl> + } / / for x <nl> + } / / for y <nl> acc . store ( o_p + c ) ; <nl> - } / / for c <nl> + } / / for c <nl> <nl> for ( ; c < iC ; + + c ) { <nl> auto max_val = std : : numeric_limits < scalar_t : : underlying > : : lowest ( ) ; <nl> void qmaxpool_2d_nhwc_kernel ( const Tensor & qx , <nl> tcntr = y * iW + x ; <nl> auto val = * ( i_p + tcntr * iC + c ) ; <nl> max_val = std : : max ( max_val , val ) ; <nl> - } / / for x <nl> - } / / for y <nl> + } / / for x <nl> + } / / for y <nl> <nl> o_p [ c ] = max_val ; <nl> - } / / for c <nl> - } / / for col <nl> - } / / for row <nl> - } / / for b <nl> + } / / for c <nl> + } / / for col <nl> + } / / for row <nl> + } / / for b <nl> } ) ; <nl> - } <nl> + } <nl> + <nl> + void qadaptive_avg_pool2d_nhwc_kernel ( <nl> + const Tensor & qx , <nl> + Tensor & qy , <nl> + int64_t b , <nl> + int64_t sizeD , <nl> + int64_t isizeH , <nl> + int64_t isizeW , <nl> + int64_t osizeH , <nl> + int64_t osizeW , <nl> + int64_t istrideB , <nl> + int64_t istrideD , <nl> + int64_t istrideH , <nl> + int64_t istrideW ) { <nl> + AT_DISPATCH_QINT_TYPES ( qx . scalar_type ( ) , " adaptive_avg_pool2d_nhwc " , [ & ] ( ) { <nl> + scalar_t * idata = static_cast < scalar_t * > ( qx . data_ptr ( ) ) ; <nl> + scalar_t * odata = static_cast < scalar_t * > ( qy . data_ptr ( ) ) ; <nl> + auto minimum = std : : numeric_limits < scalar_t : : underlying > : : lowest ( ) ; <nl> + auto maximum = std : : numeric_limits < scalar_t : : underlying > : : max ( ) ; <nl> + auto * i_p = <nl> + reinterpret_cast < typename scalar_t : : underlying * > ( idata + b * istrideB ) ; <nl> + for ( int64_t oh = 0 ; oh < osizeH ; oh + + ) { <nl> + int istartH = ( int ) std : : floor ( ( float ) ( oh * isizeH ) / osizeH ) ; <nl> + int iendH = ( int ) std : : ceil ( ( float ) ( ( oh + 1 ) * isizeH ) / osizeH ) ; <nl> + int kH = iendH - istartH ; <nl> + for ( int64_t ow = 0 ; ow < osizeW ; ow + + ) { <nl> + auto * o_p = reinterpret_cast < typename scalar_t : : underlying * > ( <nl> + odata + b * osizeH * osizeW * sizeD + ( oh * osizeW + ow ) * sizeD ) ; <nl> + int istartW = ( int ) std : : floor ( ( float ) ( ow * isizeW ) / osizeW ) ; <nl> + int iendW = ( int ) std : : ceil ( ( float ) ( ( ow + 1 ) * isizeW ) / osizeW ) ; <nl> + int kW = iendW - istartW ; <nl> + int size = kH * kW ; <nl> + float multiplier = qx . q_scale ( ) / qy . q_scale ( ) / size ; <nl> + int64_t c = 0 ; <nl> + / / For int8 or uint8quantization , we implicitly use int32 as <nl> + / / accumulation Or else , it will go to the slow path <nl> + / / TODO : support 16bit , 32bit , and etc . <nl> + constexpr auto vec_width = Vec256 < scalar_t > : : size ( ) / 4 ; <nl> + auto * internal_i_p = i_p + istartH * istrideH + istartW * istrideW ; <nl> + <nl> + / / TODO : more vectorization with loop interleaving <nl> + # ifdef __AVX2__ <nl> + if ( vec_width = = 8 ) { <nl> + for ( ; c + vec_width < = sizeD ; c + = vec_width ) { <nl> + int64_t tcntr = 0 ; <nl> + Vec256 < int32_t > acc ( - qx . q_zero_point ( ) * size ) ; <nl> + for ( int64_t ih = 0 ; ih < kH ; ih + + ) { <nl> + for ( int64_t iw = 0 ; iw < kW ; iw + + ) { <nl> + tcntr = ih * istrideH + iw * istrideW ; <nl> + auto vals = <nl> + vec256 : : convert_to_int32 < typename scalar_t : : underlying > ( <nl> + internal_i_p + tcntr + c * istrideD ) ; <nl> + acc = acc + vals ; <nl> + } <nl> + } <nl> + int32_t acc_int [ vec_width ] ; <nl> + float acc_fp [ vec_width ] ; <nl> + acc . store ( acc_int ) ; <nl> + vec256 : : convert ( acc_int , acc_fp , vec_width ) ; <nl> + vec256 : : QuantizeAvx2 < scalar_t > ( <nl> + acc_fp , o_p + c , vec_width , multiplier , qy . q_zero_point ( ) ) ; <nl> + } <nl> + } <nl> + # endif <nl> + / / remainer <nl> + for ( ; c < sizeD ; + + c ) { <nl> + int32_t acc_int32 = - qx . q_zero_point ( ) * size ; <nl> + int64_t tcntr = 0 ; <nl> + for ( int64_t ih = 0 ; ih < kH ; ih + + ) { <nl> + for ( int64_t iw = 0 ; iw < kW ; iw + + ) { <nl> + tcntr = ih * istrideH + iw * istrideW ; <nl> + auto val = * ( internal_i_p + tcntr + c * istrideD ) ; <nl> + acc_int32 + = val ; <nl> + } <nl> + } <nl> + / / clamp <nl> + o_p [ c ] = std : : min < int32_t > ( <nl> + std : : max < int32_t > ( <nl> + std : : nearbyint ( acc_int32 * multiplier + qy . q_zero_point ( ) ) , <nl> + minimum ) , <nl> + maximum ) ; <nl> + } / / c <nl> + } / / oh <nl> + } / / ow <nl> + } ) ; <nl> + } <nl> + <nl> + void qavg_pool2d_nhwc_kernel ( <nl> + const Tensor & qx , <nl> + Tensor & qy , <nl> + int64_t b , <nl> + int64_t nInputPlane , <nl> + int64_t inputWidth , <nl> + int64_t inputHeight , <nl> + int64_t outputWidth , <nl> + int64_t outputHeight , <nl> + int kW , <nl> + int kH , <nl> + int dW , <nl> + int dH , <nl> + int padW , <nl> + int padH , <nl> + bool count_include_pad , <nl> + c10 : : optional < int64_t > divisor_override ) { <nl> + AT_DISPATCH_QINT_TYPES ( qx . scalar_type ( ) , " avg_pool2d_nhwc " , [ & ] ( ) { <nl> + scalar_t * idata = static_cast < scalar_t * > ( qx . data_ptr ( ) ) ; <nl> + scalar_t * odata = static_cast < scalar_t * > ( qy . data_ptr ( ) ) ; <nl> + auto minimum = std : : numeric_limits < scalar_t : : underlying > : : lowest ( ) ; <nl> + auto maximum = std : : numeric_limits < scalar_t : : underlying > : : max ( ) ; <nl> + int64_t batch_size = nInputPlane * inputWidth * inputHeight ; <nl> + auto * i_p = reinterpret_cast < typename scalar_t : : underlying * > ( <nl> + idata + b * batch_size ) ; <nl> + <nl> + for ( int64_t oh = 0 ; oh < outputHeight ; oh + + ) { <nl> + for ( int64_t ow = 0 ; ow < outputWidth ; ow + + ) { <nl> + auto * o_p = reinterpret_cast < typename scalar_t : : underlying * > ( <nl> + odata + b * nInputPlane * outputWidth * outputHeight + <nl> + ( oh * outputWidth + ow ) * nInputPlane ) ; <nl> + int64_t hstart = oh * dH - padH ; <nl> + int64_t wstart = ow * dW - padW ; <nl> + int64_t hend = std : : min ( hstart + kH , inputHeight + padH ) ; <nl> + int64_t wend = std : : min ( wstart + kW , inputWidth + padW ) ; <nl> + int64_t pool_size = ( hend - hstart ) * ( wend - wstart ) ; <nl> + hstart = std : : max ( hstart , ( int64_t ) 0 ) ; <nl> + wstart = std : : max ( wstart , ( int64_t ) 0 ) ; <nl> + hend = std : : min ( hend , inputHeight ) ; <nl> + wend = std : : min ( wend , inputWidth ) ; <nl> + <nl> + int64_t size ; <nl> + int64_t divide_factor ; <nl> + if ( divisor_override . has_value ( ) ) { <nl> + divide_factor = divisor_override . value ( ) ; <nl> + size = ( hend - hstart ) * ( wend - wstart ) ; <nl> + } else { <nl> + if ( count_include_pad ) { <nl> + divide_factor = pool_size ; <nl> + } else { <nl> + divide_factor = ( hend - hstart ) * ( wend - wstart ) ; <nl> + } <nl> + size = divide_factor ; <nl> + } <nl> + <nl> + int64_t c = 0 ; <nl> + / / For int8 quantization , we implicitly use int32 as accumulation <nl> + / / Or else , it will go to the slow path <nl> + / / TODO : support 16bit , 32bit , and etc . <nl> + constexpr auto vec_width = Vec256 < scalar_t > : : size ( ) / 4 ; <nl> + float multiplier = qx . q_scale ( ) / qy . q_scale ( ) / divide_factor ; <nl> + # ifdef __AVX2__ <nl> + if ( vec_width = = 8 ) { <nl> + for ( ; c + vec_width < = nInputPlane ; c + = vec_width ) { <nl> + int64_t tcntr = 0 ; <nl> + Vec256 < int32_t > acc ( - qx . q_zero_point ( ) * size ) ; <nl> + for ( int64_t ih = hstart ; ih < hend ; ih + + ) { <nl> + for ( int64_t iw = wstart ; iw < wend ; iw + + ) { <nl> + tcntr = ih * inputWidth + iw ; <nl> + auto vals = <nl> + vec256 : : convert_to_int32 < typename scalar_t : : underlying > ( <nl> + i_p + tcntr * nInputPlane + c ) ; <nl> + acc = acc + vals ; <nl> + } <nl> + } <nl> + int32_t acc_int [ vec_width ] ; <nl> + float acc_fp [ vec_width ] ; <nl> + acc . store ( acc_int ) ; <nl> + vec256 : : convert ( acc_int , acc_fp , vec_width ) ; <nl> + vec256 : : QuantizeAvx2 < scalar_t > ( <nl> + acc_fp , o_p + c , vec_width , multiplier , qy . q_zero_point ( ) ) ; <nl> + } <nl> + } <nl> + # endif <nl> + / / remainer <nl> + for ( ; c < nInputPlane ; + + c ) { <nl> + int32_t acc_int32 = - qx . q_zero_point ( ) * size ; <nl> + int64_t tcntr = 0 ; <nl> + for ( int64_t ih = hstart ; ih < hend ; ih + + ) { <nl> + for ( int64_t iw = wstart ; iw < wend ; iw + + ) { <nl> + tcntr = ih * inputWidth + iw ; <nl> + auto val = * ( i_p + tcntr * nInputPlane + c ) ; <nl> + acc_int32 + = val ; <nl> + } <nl> + } <nl> + double acc_fp = acc_int32 * 1 . 0 ; <nl> + / / clamp <nl> + o_p [ c ] = std : : min < int32_t > ( <nl> + std : : max < int32_t > ( <nl> + std : : nearbyint ( acc_fp * multiplier + qy . q_zero_point ( ) ) , <nl> + minimum ) , <nl> + maximum ) ; <nl> + } / / c <nl> + } / / ow <nl> + } / / oh <nl> + } ) ; <nl> + } <nl> <nl> } / / namespace <nl> <nl> REGISTER_DISPATCH ( qrelu6_stub , & qrelu6_kernel ) ; <nl> REGISTER_DISPATCH ( qadd_relu_stub , & qadd_kernel < true > ) ; <nl> REGISTER_DISPATCH ( qadd_stub , & qadd_kernel < false > ) ; <nl> REGISTER_DISPATCH ( qmaxpool_2d_nhwc_stub , & qmaxpool_2d_nhwc_kernel ) ; <nl> + REGISTER_DISPATCH ( <nl> + qadaptive_avg_pool2d_nhwc_stub , <nl> + & qadaptive_avg_pool2d_nhwc_kernel ) ; <nl> + REGISTER_DISPATCH ( qavg_pool2d_nhwc_stub , & qavg_pool2d_nhwc_kernel ) ; <nl> <nl> } / / namespace native <nl> - } / / namespace at <nl> \ No newline at end of file <nl> + } / / namespace at <nl> new file mode 100644 <nl> index 000000000000 . . c76c543ad481 <nl> mmm / dev / null <nl> ppp b / aten / src / ATen / native / quantized / cpu / q_adaavgpool . cpp <nl> <nl> + # include < ATen / ATen . h > <nl> + # include < ATen / NativeFunctions . h > <nl> + # include < ATen / Parallel . h > <nl> + # include < ATen / native / quantized / cpu / quantized_ops . h > <nl> + <nl> + # include < algorithm > <nl> + # include < cmath > <nl> + # include < limits > <nl> + # include < vector > <nl> + <nl> + namespace at { <nl> + namespace native { <nl> + namespace { <nl> + <nl> + DEFINE_DISPATCH ( qadaptive_avg_pool2d_nhwc_stub ) ; <nl> + <nl> + inline int start_index ( int out_idx , int out_len , int in_len ) { <nl> + / * <nl> + * out_idx : the current index of output matrix <nl> + * out_len : the dimension_size of output matrix <nl> + * in_len : the dimension_size of input matrix <nl> + * Basically , in_len / out_len gives the number of <nl> + * elements in each average computation . <nl> + * This functin computes the start index on input matrix . <nl> + * / <nl> + return ( int ) std : : floor ( ( float ) ( out_idx * in_len ) / out_len ) ; <nl> + } <nl> + <nl> + inline int end_index ( int out_idx , int out_len , int in_len ) { <nl> + / * <nl> + * Parameter definition is the same as start_index . <nl> + * This function computes the end index on input matrix . <nl> + * / <nl> + return ( int ) std : : ceil ( ( float ) ( ( out_idx + 1 ) * in_len ) / out_len ) ; <nl> + } <nl> + <nl> + template < typename scalar_t > <nl> + static void adaptive_avg_pool2d_single_out_frame ( <nl> + scalar_t * input_p , <nl> + scalar_t * output_p , <nl> + int64_t sizeD , <nl> + int64_t isizeH , <nl> + int64_t isizeW , <nl> + int64_t osizeH , <nl> + int64_t osizeW , <nl> + int64_t istrideD , <nl> + int64_t istrideH , <nl> + int64_t istrideW ) { <nl> + at : : parallel_for ( 0 , sizeD , 0 , [ & ] ( int64_t start , int64_t end ) { <nl> + for ( auto d = start ; d < end ; d + + ) { <nl> + / * loop over output * / <nl> + int64_t oh , ow ; <nl> + for ( oh = 0 ; oh < osizeH ; oh + + ) { <nl> + int istartH = start_index ( oh , osizeH , isizeH ) ; <nl> + int iendH = end_index ( oh , osizeH , isizeH ) ; <nl> + int kH = iendH - istartH ; <nl> + float kHr = 1 . 0 / kH ; <nl> + <nl> + for ( ow = 0 ; ow < osizeW ; ow + + ) { <nl> + int istartW = start_index ( ow , osizeW , isizeW ) ; <nl> + int iendW = end_index ( ow , osizeW , isizeW ) ; <nl> + int kW = iendW - istartW ; <nl> + float kHWr = kHr / kW ; <nl> + <nl> + / * local pointers * / <nl> + scalar_t * ip = <nl> + input_p + d * istrideD + istartH * istrideH + istartW * istrideW ; <nl> + scalar_t * op = output_p + d * osizeH * osizeW + oh * osizeW + ow ; <nl> + <nl> + / * compute local average : * / <nl> + int64_t sum = 0 ; <nl> + int ih , iw ; <nl> + for ( ih = 0 ; ih < kH ; ih + + ) { <nl> + for ( iw = 0 ; iw < kW ; iw + + ) { <nl> + int64_t val = ( ip + ih * istrideH + iw * istrideW ) - > val_ ; <nl> + sum + = val ; <nl> + } <nl> + } <nl> + <nl> + / * set output to local average * / <nl> + / / TODO : add the max / min clip <nl> + op - > val_ = static_cast < typename scalar_t : : underlying > ( <nl> + std : : nearbyint ( sum * kHWr ) ) ; <nl> + } <nl> + } <nl> + } <nl> + } ) ; <nl> + } <nl> + <nl> + std : : vector < int64_t > get_output_shape ( <nl> + const Tensor & input , <nl> + IntArrayRef output_size ) { <nl> + for ( int64_t i = 0 ; i < input . dim ( ) ; i + + ) { <nl> + TORCH_CHECK ( <nl> + input . size ( i ) > 0 , <nl> + " adaptive_avg_pooling2d ( ) : expected input to have non - empty spatial " <nl> + " dimensions , but input has sizes " , <nl> + input . sizes ( ) , <nl> + " with dimension " , <nl> + i , <nl> + " being empty " ) ; <nl> + } <nl> + <nl> + TORCH_CHECK ( <nl> + ( input . dim ( ) = = 3 | | input . dim ( ) = = 4 ) , <nl> + " non - empty 3D or 4D ( batch mode ) tensor expected for input " ) ; <nl> + <nl> + / * sizes * / <nl> + int64_t sizeD = input . size ( - 3 ) ; <nl> + const auto osizeH = output_size [ 0 ] ; <nl> + const auto osizeW = output_size [ 1 ] ; <nl> + <nl> + / * resize output * / <nl> + std : : vector < int64_t > output_shape ; <nl> + int64_t sizeB = 0 ; <nl> + if ( input . dim ( ) = = 3 ) { <nl> + output_shape = { sizeD , osizeH , osizeW } ; <nl> + } else { <nl> + sizeB = input . size ( - 4 ) ; <nl> + output_shape = { sizeB , sizeD , osizeH , osizeW } ; <nl> + } <nl> + <nl> + return output_shape ; <nl> + } <nl> + <nl> + template < typename scalar_t > <nl> + Tensor q_adaptive_avg_pool2d ( const Tensor & input , IntArrayRef output_size ) { <nl> + Tensor output ; <nl> + const auto output_shape = get_output_shape ( input , output_size ) ; <nl> + / * sizes * / <nl> + int64_t sizeD = input . size ( - 3 ) ; <nl> + int64_t isizeH = input . size ( - 2 ) ; <nl> + int64_t isizeW = input . size ( - 1 ) ; <nl> + / * strides * / <nl> + int64_t istrideD = input . stride ( - 3 ) ; <nl> + int64_t istrideH = input . stride ( - 2 ) ; <nl> + int64_t istrideW = input . stride ( - 1 ) ; <nl> + <nl> + auto osizeH = output_shape [ output_shape . size ( ) - 2 ] ; <nl> + auto osizeW = output_shape [ output_shape . size ( ) - 1 ] ; <nl> + int64_t sizeB = output_shape . size ( ) = = 3 ? 0 : output_shape [ 0 ] ; <nl> + <nl> + if ( input . is_contiguous ( c10 : : MemoryFormat : : ChannelsLast ) ) { <nl> + / / Fast path for NHWC <nl> + Tensor output = at : : _empty_affine_quantized ( <nl> + output_shape , <nl> + input . options ( ) , <nl> + input . q_scale ( ) , <nl> + input . q_zero_point ( ) , <nl> + input . suggest_memory_format ( ) ) ; <nl> + if ( input . dim ( ) = = 3 | | input . size ( 0 ) = = 1 ) { <nl> + qadaptive_avg_pool2d_nhwc_stub ( <nl> + input . device ( ) . type ( ) , <nl> + input , <nl> + output , <nl> + 0 , <nl> + sizeD , <nl> + isizeH , <nl> + isizeW , <nl> + osizeH , <nl> + osizeW , <nl> + 0 , <nl> + istrideD , <nl> + istrideH , <nl> + istrideW ) ; <nl> + } else { <nl> + int64_t istrideB = input . stride ( - 4 ) ; <nl> + at : : parallel_for ( 0 , sizeB , 0 , [ & ] ( int64_t start , int64_t end ) { <nl> + for ( auto b = start ; b < end ; b + + ) { <nl> + qadaptive_avg_pool2d_nhwc_stub ( <nl> + input . device ( ) . type ( ) , <nl> + input , <nl> + output , <nl> + b , <nl> + sizeD , <nl> + isizeH , <nl> + isizeW , <nl> + osizeH , <nl> + osizeW , <nl> + istrideB , <nl> + istrideD , <nl> + istrideH , <nl> + istrideW ) ; <nl> + } <nl> + } ) ; <nl> + } <nl> + return output ; <nl> + } else { <nl> + Tensor output = at : : _empty_affine_quantized ( <nl> + output_shape , input . options ( ) , input . q_scale ( ) , input . q_zero_point ( ) ) ; <nl> + auto input_contig = input . contiguous ( ) ; <nl> + auto input_data = input_contig . data_ptr < scalar_t > ( ) ; <nl> + auto output_data = output . data_ptr < scalar_t > ( ) ; <nl> + <nl> + if ( input . dim ( ) = = 3 | | input . size ( 0 ) = = 1 ) { <nl> + adaptive_avg_pool2d_single_out_frame < scalar_t > ( <nl> + input_data , <nl> + output_data , <nl> + sizeD , <nl> + isizeH , <nl> + isizeW , <nl> + osizeH , <nl> + osizeW , <nl> + istrideD , <nl> + istrideH , <nl> + istrideW ) ; <nl> + } else { <nl> + int64_t istrideB = input . stride ( - 4 ) ; <nl> + at : : parallel_for ( 0 , sizeB , 0 , [ & ] ( int64_t start , int64_t end ) { <nl> + for ( auto b = start ; b < end ; b + + ) { <nl> + adaptive_avg_pool2d_single_out_frame < scalar_t > ( <nl> + input_data + b * istrideB , <nl> + output_data + b * sizeD * osizeH * osizeW , <nl> + sizeD , <nl> + isizeH , <nl> + isizeW , <nl> + osizeH , <nl> + osizeW , <nl> + istrideD , <nl> + istrideH , <nl> + istrideW ) ; <nl> + } <nl> + } ) ; <nl> + } <nl> + return output ; <nl> + } <nl> + } <nl> + } / / namespace <nl> + <nl> + Tensor quantized_adaptive_avg_pool2d ( <nl> + const at : : Tensor & input , <nl> + IntArrayRef output_size ) { <nl> + Tensor output ; <nl> + AT_DISPATCH_QINT_TYPES ( <nl> + input . scalar_type ( ) , " quantized_adaptive_avg_pool2d " , [ & ] ( ) { <nl> + output = q_adaptive_avg_pool2d < scalar_t > ( input , output_size ) ; <nl> + } ) ; <nl> + return output ; <nl> + } <nl> + <nl> + } / / namespace native <nl> + } / / namespace at <nl> mmm a / aten / src / ATen / native / quantized / cpu / q_avgpool . cpp <nl> ppp b / aten / src / ATen / native / quantized / cpu / q_avgpool . cpp <nl> <nl> # include < ATen / ATen . h > <nl> # include < ATen / NativeFunctions . h > <nl> # include < ATen / Parallel . h > <nl> - # include < ATen / core / op_registration / op_registration . h > <nl> + # include < ATen / native / Pool . h > <nl> + # include < ATen / native / quantized / cpu / quantized_ops . h > <nl> <nl> # include < algorithm > <nl> # include < cmath > <nl> namespace at { <nl> namespace native { <nl> namespace { <nl> <nl> - inline int start_index ( int a , int b , int c ) { <nl> - return ( int ) std : : floor ( ( float ) ( a * c ) / b ) ; <nl> - } <nl> + DEFINE_DISPATCH ( qavg_pool2d_nhwc_stub ) ; <nl> <nl> - inline int end_index ( int a , int b , int c ) { <nl> - return ( int ) std : : ceil ( ( float ) ( ( a + 1 ) * c ) / b ) ; <nl> - } <nl> + template < typename scalar_t > <nl> + static void avg_pool2d_out_frame ( <nl> + const Tensor & input , <nl> + Tensor & output , <nl> + int64_t b , <nl> + int64_t nInputPlane , <nl> + int64_t inputWidth , <nl> + int64_t inputHeight , <nl> + int64_t outputWidth , <nl> + int64_t outputHeight , <nl> + int kW , <nl> + int kH , <nl> + int dW , <nl> + int dH , <nl> + int padW , <nl> + int padH , <nl> + bool count_include_pad , <nl> + c10 : : optional < int64_t > divisor_override ) { <nl> + at : : parallel_for ( 0 , nInputPlane , 0 , [ & ] ( int64_t start , int64_t end ) { <nl> + for ( auto k = start ; k < end ; k + + ) { <nl> + int64_t xx , yy ; <nl> + / * For all output pixels . . . * / <nl> + auto input_data = input . contiguous ( ) . data_ptr < scalar_t > ( ) ; <nl> + auto output_data = output . data_ptr < scalar_t > ( ) ; <nl> + scalar_t * ptr_output = output_data + <nl> + b * nInputPlane * outputWidth * outputHeight + <nl> + k * outputWidth * outputHeight ; <nl> + const scalar_t * ptr_input = input_data + <nl> + b * nInputPlane * inputWidth * inputHeight + <nl> + k * inputWidth * inputHeight ; <nl> + auto minimum = <nl> + std : : numeric_limits < typename scalar_t : : underlying > : : lowest ( ) ; <nl> + auto maximum = std : : numeric_limits < typename scalar_t : : underlying > : : max ( ) ; <nl> + <nl> + for ( yy = 0 ; yy < outputHeight ; yy + + ) { <nl> + for ( xx = 0 ; xx < outputWidth ; xx + + ) { <nl> + / * Compute the mean of the input image . . . * / <nl> + int64_t hstart = yy * dH - padH ; <nl> + int64_t wstart = xx * dW - padW ; <nl> + int64_t hend = std : : min ( hstart + kH , inputHeight + padH ) ; <nl> + int64_t wend = std : : min ( wstart + kW , inputWidth + padW ) ; <nl> + int64_t pool_size = ( hend - hstart ) * ( wend - wstart ) ; <nl> + hstart = std : : max ( hstart , ( int64_t ) 0 ) ; <nl> + wstart = std : : max ( wstart , ( int64_t ) 0 ) ; <nl> + hend = std : : min ( hend , inputHeight ) ; <nl> + wend = std : : min ( wend , inputWidth ) ; <nl> <nl> - template < typename scalar_t , typename underlying_t > <nl> - static void adaptive_avg_pool2d_single_out_frame ( <nl> - scalar_t * input_p , <nl> - scalar_t * output_p , <nl> - int64_t sizeD , <nl> - int64_t isizeH , <nl> - int64_t isizeW , <nl> - int64_t osizeH , <nl> - int64_t osizeW , <nl> - int64_t istrideD , <nl> - int64_t istrideH , <nl> - int64_t istrideW ) { <nl> - at : : parallel_for ( 0 , sizeD , 0 , [ & ] ( int64_t start , int64_t end ) { <nl> - for ( auto d = start ; d < end ; d + + ) { <nl> - / * loop over output * / <nl> - int64_t oh , ow ; <nl> - for ( oh = 0 ; oh < osizeH ; oh + + ) { <nl> - int istartH = start_index ( oh , osizeH , isizeH ) ; <nl> - int iendH = end_index ( oh , osizeH , isizeH ) ; <nl> - int kH = iendH - istartH ; <nl> - float kHr = 1 . 0 / kH ; <nl> - <nl> - for ( ow = 0 ; ow < osizeW ; ow + + ) { <nl> - int istartW = start_index ( ow , osizeW , isizeW ) ; <nl> - int iendW = end_index ( ow , osizeW , isizeW ) ; <nl> - int kW = iendW - istartW ; <nl> - float kHWr = kHr / kW ; <nl> - <nl> - / * local pointers * / <nl> - scalar_t * ip = <nl> - input_p + d * istrideD + istartH * istrideH + istartW * istrideW ; <nl> - scalar_t * op = output_p + d * osizeH * osizeW + oh * osizeW + ow ; <nl> - <nl> - / * compute local average : * / <nl> - int64_t sum = 0 ; <nl> - int ih , iw ; <nl> - for ( ih = 0 ; ih < kH ; ih + + ) { <nl> - for ( iw = 0 ; iw < kW ; iw + + ) { <nl> - int64_t val = ( ip + ih * istrideH + iw * istrideW ) - > val_ ; <nl> - sum + = val ; <nl> + int sum_int = 0 ; <nl> + ptr_output - > val_ = 0 ; <nl> + <nl> + int64_t divide_factor ; <nl> + int64_t size ; <nl> + if ( divisor_override . has_value ( ) ) { <nl> + divide_factor = divisor_override . value ( ) ; <nl> + size = ( hend - hstart ) * ( wend - wstart ) ; <nl> + } else { <nl> + if ( count_include_pad ) { <nl> + divide_factor = pool_size ; <nl> + } else { <nl> + divide_factor = ( hend - hstart ) * ( wend - wstart ) ; <nl> } <nl> + size = divide_factor ; <nl> + } <nl> + int64_t kx , ky ; <nl> + for ( ky = hstart ; ky < hend ; ky + + ) { <nl> + for ( kx = wstart ; kx < wend ; kx + + ) <nl> + sum_int + = ( ptr_input + ky * inputWidth + kx ) - > val_ ; <nl> } <nl> + float multiplier = input . q_scale ( ) / output . q_scale ( ) / divide_factor ; <nl> <nl> - / * set output to local average * / <nl> - op - > val_ = static_cast < underlying_t > ( std : : nearbyint ( sum * kHWr ) ) ; <nl> + sum_int - = size * input . q_zero_point ( ) ; <nl> + float sum = sum_int * 1 . 0 ; <nl> + / * Update output by requantizing the result * / <nl> + ptr_output - > val_ = <nl> + static_cast < typename scalar_t : : underlying > ( std : : min < int32_t > ( <nl> + std : : max < int32_t > ( <nl> + std : : nearbyint ( sum * multiplier + output . q_zero_point ( ) ) , <nl> + minimum ) , <nl> + maximum ) ) ; <nl> + ptr_output + + ; <nl> } <nl> } <nl> } <nl> } ) ; <nl> } <nl> <nl> - template < typename scalar_t , typename underlying_t > <nl> - void adaptive_avg_pool2d_out_frame ( <nl> - scalar_t * input_p , <nl> - scalar_t * output_p , <nl> - int64_t sizeB , <nl> - int64_t sizeD , <nl> - int64_t isizeH , <nl> - int64_t isizeW , <nl> - int64_t osizeH , <nl> - int64_t osizeW , <nl> - int64_t istrideB , <nl> - int64_t istrideD , <nl> - int64_t istrideH , <nl> - int64_t istrideW ) { <nl> - at : : parallel_for ( 0 , sizeB , 0 , [ & ] ( int64_t start , int64_t end ) { <nl> - for ( auto b = start ; b < end ; b + + ) { <nl> - adaptive_avg_pool2d_single_out_frame < scalar_t , underlying_t > ( <nl> - input_p + b * istrideB , <nl> - output_p + b * sizeD * osizeH * osizeW , <nl> - sizeD , <nl> - isizeH , <nl> - isizeW , <nl> - osizeH , <nl> - osizeW , <nl> - istrideD , <nl> - istrideH , <nl> - istrideW ) ; <nl> - } <nl> - } ) ; <nl> + inline std : : pair < int , int > get_kernel ( IntArrayRef kernel_size ) { <nl> + TORCH_CHECK ( <nl> + kernel_size . size ( ) = = 1 | | kernel_size . size ( ) = = 2 , <nl> + " avg_pool2d : kernel_size must either be a single int , or a tuple of two ints " ) ; <nl> + const int kH = safe_downcast < int , int64_t > ( kernel_size [ 0 ] ) ; <nl> + const int kW = kernel_size . size ( ) = = 1 <nl> + ? kH <nl> + : safe_downcast < int , int64_t > ( kernel_size [ 1 ] ) ; <nl> + return std : : make_pair ( kW , kH ) ; <nl> } <nl> <nl> - void adaptive_avg_pool2d_out_template ( <nl> - Tensor & output , <nl> - Tensor input , <nl> - std : : vector < int64_t > output_shape ) { <nl> - / * sizes * / <nl> - int64_t sizeD = input . size ( - 3 ) ; <nl> - int64_t isizeH = input . size ( - 2 ) ; <nl> - int64_t isizeW = input . size ( - 1 ) ; <nl> - / * strides * / <nl> - int64_t istrideD = input . stride ( - 3 ) ; <nl> - int64_t istrideH = input . stride ( - 2 ) ; <nl> - int64_t istrideW = input . stride ( - 1 ) ; <nl> - <nl> - auto osizeH = output_shape [ output_shape . size ( ) - 2 ] ; <nl> - auto osizeW = output_shape [ output_shape . size ( ) - 1 ] ; <nl> - int64_t sizeB = output_shape . size ( ) = = 3 ? 0 : output_shape [ 0 ] ; <nl> - <nl> - if ( input . dim ( ) = = 3 | | input . size ( 0 ) = = 1 ) { <nl> - AT_DISPATCH_QINT_TYPES ( <nl> - input . scalar_type ( ) , " quantized_adaptive_avg_pool2d " , [ & ] { <nl> - auto input_data = input . data_ptr < scalar_t > ( ) ; <nl> - auto output_data = output . data_ptr < scalar_t > ( ) ; <nl> - adaptive_avg_pool2d_single_out_frame < scalar_t , underlying_t > ( <nl> - input_data , <nl> - output_data , <nl> - sizeD , <nl> - isizeH , <nl> - isizeW , <nl> - osizeH , <nl> - osizeW , <nl> - istrideD , <nl> - istrideH , <nl> - istrideW ) ; <nl> - } ) ; <nl> - } else { <nl> - int64_t istrideB = input . stride ( - 4 ) ; <nl> - <nl> - AT_DISPATCH_QINT_TYPES ( <nl> - input . scalar_type ( ) , " quantized_adaptive_avg_pool2d " , [ & ] { <nl> - auto input_data = input . data_ptr < scalar_t > ( ) ; <nl> - auto output_data = output . data_ptr < scalar_t > ( ) ; <nl> - adaptive_avg_pool2d_out_frame < scalar_t , underlying_t > ( <nl> - input_data , <nl> - output_data , <nl> - sizeB , <nl> - sizeD , <nl> - isizeH , <nl> - isizeW , <nl> - osizeH , <nl> - osizeW , <nl> - istrideB , <nl> - istrideD , <nl> - istrideH , <nl> - istrideW ) ; <nl> - } ) ; <nl> - } <nl> + inline std : : pair < int , int > get_stride ( IntArrayRef stride , int kW , int kH ) { <nl> + TORCH_CHECK ( <nl> + stride . empty ( ) | | stride . size ( ) = = 1 | | stride . size ( ) = = 2 , <nl> + " avg_pool2d : stride must either be omitted , a single int , or a tuple of two ints " ) ; <nl> + const int dH = stride . empty ( ) ? kH : safe_downcast < int , int64_t > ( stride [ 0 ] ) ; <nl> + const int dW = stride . empty ( ) <nl> + ? kW <nl> + : stride . size ( ) = = 1 ? dH : safe_downcast < int , int64_t > ( stride [ 1 ] ) ; <nl> + return std : : make_pair ( dW , dH ) ; <nl> + } <nl> + <nl> + inline std : : pair < int , int > get_padding ( IntArrayRef padding ) { <nl> + TORCH_CHECK ( <nl> + padding . size ( ) = = 1 | | padding . size ( ) = = 2 , <nl> + " avg_pool2d : padding must either be a single int , or a tuple of two ints " ) ; <nl> + const int padH = safe_downcast < int , int64_t > ( padding [ 0 ] ) ; <nl> + const int padW = <nl> + padding . size ( ) = = 1 ? padH : safe_downcast < int , int64_t > ( padding [ 1 ] ) ; <nl> + return std : : make_pair ( padW , padH ) ; <nl> } <nl> <nl> - std : : vector < int64_t > get_output_shape ( Tensor input , IntArrayRef output_size ) { <nl> - for ( int64_t i = 0 ; i < input . dim ( ) ; i + + ) { <nl> - TORCH_CHECK ( <nl> - input . size ( i ) > 0 , <nl> - " adaptive_avg_pooling2d ( ) : expected input to have non - empty spatial " <nl> - " dimensions , but input has sizes " , <nl> - input . sizes ( ) , <nl> - " with dimension " , <nl> - i , <nl> - " being empty " ) ; <nl> + std : : vector < int64_t > get_output_shape ( <nl> + const Tensor & input_ , <nl> + int kW , <nl> + int kH , <nl> + int dW , <nl> + int dH , <nl> + int padW , <nl> + int padH , <nl> + bool ceil_mode ) { <nl> + const int64_t nbatch = input_ . ndimension ( ) = = 4 ? input_ . size ( - 4 ) : 1 ; <nl> + const int64_t nInputPlane = input_ . size ( - 3 ) ; <nl> + const int64_t inputHeight = input_ . size ( - 2 ) ; <nl> + const int64_t inputWidth = input_ . size ( - 1 ) ; <nl> + const int64_t outputHeight = <nl> + pooling_output_shape < int64_t > ( inputHeight , kH , padH , dH , 1 , ceil_mode ) ; <nl> + const int64_t outputWidth = <nl> + pooling_output_shape < int64_t > ( inputWidth , kW , padW , dW , 1 , ceil_mode ) ; <nl> + if ( input_ . ndimension ( ) = = 3 ) { <nl> + return { nInputPlane , outputHeight , outputWidth } ; <nl> } <nl> + return { nbatch , nInputPlane , outputHeight , outputWidth } ; <nl> + } <nl> + <nl> + template < typename scalar_t > <nl> + Tensor q_avg_pool2d ( <nl> + const Tensor & input , <nl> + IntArrayRef kernel_size , <nl> + IntArrayRef stride , <nl> + IntArrayRef padding , <nl> + bool ceil_mode , <nl> + bool count_include_pad , <nl> + c10 : : optional < int64_t > divisor_override ) { <nl> + int kW , kH , dW , dH , padW , padH ; <nl> + std : : tie ( kW , kH ) = get_kernel ( kernel_size ) ; <nl> + std : : tie ( dW , dH ) = get_stride ( stride , kW , kH ) ; <nl> + std : : tie ( padW , padH ) = get_padding ( padding ) ; <nl> + <nl> + const int64_t nbatch = input . ndimension ( ) = = 4 ? input . size ( - 4 ) : 1 ; <nl> + const int64_t nInputPlane = input . size ( - 3 ) ; <nl> + const int64_t inputHeight = input . size ( - 2 ) ; <nl> + const int64_t inputWidth = input . size ( - 1 ) ; <nl> <nl> TORCH_CHECK ( <nl> - ( input . dim ( ) = = 3 | | input . dim ( ) = = 4 ) , <nl> - " non - empty 3D or 4D ( batch mode ) tensor expected for input " ) ; <nl> - <nl> - / * sizes * / <nl> - int64_t sizeD = input . size ( - 3 ) ; <nl> - const auto osizeH = output_size [ 0 ] ; <nl> - const auto osizeW = output_size [ 1 ] ; <nl> - <nl> - / * resize output * / <nl> - std : : vector < int64_t > output_shape ; <nl> - int64_t sizeB = 0 ; <nl> - if ( input . dim ( ) = = 3 ) { <nl> - output_shape = { sizeD , osizeH , osizeW } ; <nl> + ! divisor_override . has_value ( ) | | divisor_override . value ( ) ! = 0 , <nl> + " divisor must be not zero " ) ; <nl> + <nl> + auto output_shape = <nl> + get_output_shape ( input , kW , kH , dW , dH , padW , padH , ceil_mode ) ; <nl> + const int64_t outputHeight = output_shape [ output_shape . size ( ) - 2 ] ; <nl> + const int64_t outputWidth = output_shape [ output_shape . size ( ) - 1 ] ; <nl> + if ( input . is_contiguous ( c10 : : MemoryFormat : : ChannelsLast ) ) { <nl> + auto output = at : : _empty_affine_quantized ( <nl> + output_shape , <nl> + input . options ( ) , <nl> + input . q_scale ( ) , <nl> + input . q_zero_point ( ) , <nl> + input . suggest_memory_format ( ) ) ; <nl> + / / fast path for channel last : qavg_pool_2d_nhwc_stub <nl> + if ( output_shape . size ( ) = = 3 ) { <nl> + qavg_pool2d_nhwc_stub ( <nl> + input . device ( ) . type ( ) , <nl> + input , <nl> + output , <nl> + 0 , <nl> + nInputPlane , <nl> + inputWidth , <nl> + inputHeight , <nl> + outputWidth , <nl> + outputHeight , <nl> + kW , <nl> + kH , <nl> + dW , <nl> + dH , <nl> + padW , <nl> + padH , <nl> + count_include_pad , <nl> + divisor_override ) ; <nl> + } else { <nl> + at : : parallel_for ( 0 , nbatch , 0 , [ & ] ( int64_t start , int64_t end ) { <nl> + for ( auto b = start ; b < end ; b + + ) { <nl> + qavg_pool2d_nhwc_stub ( <nl> + input . device ( ) . type ( ) , <nl> + input , <nl> + output , <nl> + b , <nl> + nInputPlane , <nl> + inputWidth , <nl> + inputHeight , <nl> + outputWidth , <nl> + outputHeight , <nl> + kW , <nl> + kH , <nl> + dW , <nl> + dH , <nl> + padW , <nl> + padH , <nl> + count_include_pad , <nl> + divisor_override ) ; <nl> + } <nl> + } ) ; <nl> + } <nl> + return output ; <nl> } else { <nl> - sizeB = input . size ( - 4 ) ; <nl> - output_shape = { sizeB , sizeD , osizeH , osizeW } ; <nl> + auto output = at : : _empty_affine_quantized ( <nl> + output_shape , input . options ( ) , input . q_scale ( ) , input . q_zero_point ( ) ) ; <nl> + if ( output_shape . size ( ) = = 3 ) { <nl> + avg_pool2d_out_frame < scalar_t > ( <nl> + input , <nl> + output , <nl> + 0 , <nl> + nInputPlane , <nl> + inputWidth , <nl> + inputHeight , <nl> + outputWidth , <nl> + outputHeight , <nl> + kW , <nl> + kH , <nl> + dW , <nl> + dH , <nl> + padW , <nl> + padH , <nl> + count_include_pad , <nl> + divisor_override ) ; <nl> + } else { <nl> + at : : parallel_for ( 0 , nbatch , 0 , [ & ] ( int64_t start , int64_t end ) { <nl> + for ( auto b = start ; b < end ; b + + ) { <nl> + avg_pool2d_out_frame < scalar_t > ( <nl> + input , <nl> + output , <nl> + b , <nl> + nInputPlane , <nl> + inputWidth , <nl> + inputHeight , <nl> + outputWidth , <nl> + outputHeight , <nl> + kW , <nl> + kH , <nl> + dW , <nl> + dH , <nl> + padW , <nl> + padH , <nl> + count_include_pad , <nl> + divisor_override ) ; <nl> + } <nl> + } ) ; <nl> + } <nl> + return output ; <nl> } <nl> - <nl> - return output_shape ; <nl> } <nl> + <nl> } / / namespace <nl> <nl> - Tensor & quantized_adaptive_avg_pool2d_out ( <nl> - Tensor & output , <nl> + Tensor quantized_avg_pool2d ( <nl> const Tensor & input , <nl> - IntArrayRef output_size ) { <nl> - const auto output_shape = get_output_shape ( input , output_size ) ; <nl> - TORCH_CHECK ( <nl> - output . is_quantized ( ) & & output . sizes ( ) = = output_shape , <nl> - " Output Tensor must be quantized and have a shape of " , <nl> - " { " , <nl> - output_shape , <nl> - " } . " ) ; <nl> - adaptive_avg_pool2d_out_template ( output , input , output_shape ) ; <nl> - return output ; <nl> - } <nl> - <nl> - Tensor quantized_adaptive_avg_pool2d ( <nl> - const at : : Tensor & input , <nl> - IntArrayRef output_size ) { <nl> - const auto output_shape = get_output_shape ( input , output_size ) ; <nl> - Tensor output = at : : _empty_affine_quantized ( <nl> - output_shape , input . options ( ) , input . q_scale ( ) , input . q_zero_point ( ) ) ; <nl> - ; <nl> - adaptive_avg_pool2d_out_template ( output , input , output_shape ) ; <nl> + IntArrayRef kernel_size , <nl> + IntArrayRef stride , <nl> + IntArrayRef padding , <nl> + bool ceil_mode , <nl> + bool count_include_pad , <nl> + c10 : : optional < int64_t > divisor_override ) { <nl> + Tensor output ; <nl> + AT_DISPATCH_QINT_TYPES ( input . scalar_type ( ) , " quantized_avg_pool2d " , [ & ] ( ) { <nl> + output = q_avg_pool2d < scalar_t > ( <nl> + input , <nl> + kernel_size , <nl> + stride , <nl> + padding , <nl> + ceil_mode , <nl> + count_include_pad , <nl> + divisor_override ) ; <nl> + } ) ; <nl> return output ; <nl> } <nl> <nl> mmm a / aten / src / ATen / native / quantized / cpu / quantized_ops . h <nl> ppp b / aten / src / ATen / native / quantized / cpu / quantized_ops . h <nl> using qmaxpool_2d_fn = <nl> int64_t dW , / / dilation <nl> Tensor & qy <nl> ) ; <nl> + using qadaptive_avg_pool2d_fn = <nl> + void ( * ) ( const Tensor & qx , <nl> + Tensor & qy , <nl> + int64_t b , <nl> + int64_t sizeD , <nl> + int64_t isizeH , <nl> + int64_t isizeW , <nl> + int64_t osizeH , <nl> + int64_t osizeW , <nl> + int64_t istrideB , <nl> + int64_t istrideD , <nl> + int64_t istrideH , <nl> + int64_t istrideW <nl> + ) ; <nl> + <nl> + using qavg_pool2d_fn = <nl> + void ( * ) ( const Tensor & qx , <nl> + Tensor & qy , <nl> + int64_t b , <nl> + int64_t nInputPlane , <nl> + int64_t inputWidth , <nl> + int64_t inputHeight , <nl> + int64_t outputWidth , <nl> + int64_t outputHeight , <nl> + int kW , <nl> + int kH , <nl> + int dW , <nl> + int dH , <nl> + int padW , <nl> + int padH , <nl> + bool count_include_pad , <nl> + c10 : : optional < int64_t > divisor_override <nl> + ) ; <nl> <nl> + / / using qavg_pool2d_fn <nl> DECLARE_DISPATCH ( qrelu_fn , qrelu_stub ) ; <nl> DECLARE_DISPATCH ( qrelu_fn , qrelu6_stub ) ; <nl> DECLARE_DISPATCH ( qadd_fn , qadd_stub ) ; <nl> DECLARE_DISPATCH ( qadd_fn , qadd_relu_stub ) ; <nl> DECLARE_DISPATCH ( qmaxpool_2d_fn , qmaxpool_2d_nhwc_stub ) ; <nl> + DECLARE_DISPATCH ( qadaptive_avg_pool2d_fn , qadaptive_avg_pool2d_nhwc_stub ) ; <nl> + DECLARE_DISPATCH ( qavg_pool2d_fn , qavg_pool2d_nhwc_stub ) ; <nl> <nl> } / / namespace native <nl> - } / / namespace at <nl> \ No newline at end of file <nl> + } / / namespace at <nl> mmm a / test / test_quantized . py <nl> ppp b / test / test_quantized . py <nl> def test_max_pool2d_nhwc ( self , X , kernel , stride , dilation , padding ) : <nl> self . assertEqual ( a_ref , a_hat . dequantize ( ) , <nl> message = " ops . quantized . max_pool2d results are off " ) <nl> <nl> - @ no_deadline <nl> @ given ( X = hu . tensor ( shapes = hu . array_shapes ( min_dims = 3 , max_dims = 4 , <nl> + min_side = 5 , max_side = 10 ) , <nl> + qparams = hu . qparams ( dtypes = torch . quint8 ) ) , <nl> + kernel = st . sampled_from ( ( 3 , 5 ) ) , <nl> + stride = st . sampled_from ( ( None , 1 , 2 ) ) , <nl> + padding = st . integers ( 0 , 2 ) , <nl> + ceil_mode = st . sampled_from ( ( True , False ) ) , <nl> + count_include_pad = st . sampled_from ( ( True , False ) ) , <nl> + divisor_override = st . sampled_from ( ( None , None ) ) ) <nl> + def test_avg_pool2d ( self , X , kernel , stride , padding , ceil_mode , count_include_pad , divisor_override ) : <nl> + " " " <nl> + Note : we currently cannot test the divisor_override , because quantized op will clamp the result <nl> + within range . However , the float op will not . <nl> + " " " <nl> + X , ( scale , zero_point , torch_type ) = X <nl> + <nl> + assume ( kernel / / 2 > = padding ) # Kernel cannot be overhanging ! <nl> + iH , iW = X . shape [ - 2 : ] <nl> + oH = pool_output_shape ( iH , kernel , padding , stride , 0 ) <nl> + assume ( oH > 0 ) <nl> + oW = pool_output_shape ( iW , kernel , padding , stride , 0 ) <nl> + assume ( oW > 0 ) <nl> + <nl> + X = torch . from_numpy ( X ) <nl> + qX = torch . quantize_linear ( X , scale = scale , zero_point = zero_point , <nl> + dtype = torch_type ) <nl> + <nl> + # Run reference on int_repr + round to avoid double rounding error . <nl> + X_ref = torch . nn . functional . avg_pool2d ( <nl> + qX . int_repr ( ) . to ( torch . float ) , kernel_size = kernel , stride = stride , padding = padding , <nl> + ceil_mode = ceil_mode , count_include_pad = count_include_pad , divisor_override = divisor_override ) . round ( ) <nl> + <nl> + ops_under_test = { <nl> + " nn . functional " : torch . nn . functional . avg_pool2d , <nl> + " nn . quantized . functional " : torch . nn . quantized . functional . avg_pool2d <nl> + } <nl> + error_message = r " Results are off for { } : \ n \ tExpected : \ n { } \ n \ tGot : \ n { } " <nl> + for name , op in ops_under_test . items ( ) : <nl> + qX_hat = op ( qX , kernel_size = kernel , stride = stride , padding = padding , ceil_mode = ceil_mode , <nl> + count_include_pad = count_include_pad , divisor_override = divisor_override ) <nl> + self . assertEqual ( X_ref , qX_hat . int_repr ( ) , prec = 1 . 0 , <nl> + message = " { } results are off " . format ( name , qX_hat . int_repr ( ) , X_ref ) ) <nl> + self . assertEqual ( scale , qX_hat . q_scale ( ) , <nl> + message = error_message . format ( name + ' . scale ' , scale , qX_hat . q_scale ( ) ) ) <nl> + self . assertEqual ( zero_point , qX_hat . q_zero_point ( ) , <nl> + message = error_message . format ( name + ' . zero_point ' , scale , <nl> + qX_hat . q_zero_point ( ) ) ) <nl> + <nl> + @ given ( X = hu . tensor ( shapes = hu . array_shapes ( min_dims = 4 , max_dims = 4 , <nl> + min_side = 5 , max_side = 10 ) , <nl> + qparams = hu . qparams ( dtypes = torch . qint8 ) ) , <nl> + kernel = st . sampled_from ( ( 4 , 5 ) ) , <nl> + stride = st . sampled_from ( ( None , 1 , 2 ) ) , <nl> + padding = st . integers ( 0 , 2 ) , <nl> + ceil_mode = st . sampled_from ( ( True , False ) ) , <nl> + count_include_pad = st . sampled_from ( ( True , False ) ) , <nl> + divisor_override = st . sampled_from ( ( None , None ) ) ) <nl> + def test_avg_pool2d_nhwc ( self , X , kernel , stride , padding , ceil_mode , count_include_pad , divisor_override ) : <nl> + " " " <nl> + Note : 1 ) we currently cannot test the divisor_override , because quantized op will clamp the result <nl> + within range . However , the float op will not . <nl> + 2 ) we cannot test the qint32 , since the float point precision is much lower than int32 for big number , <nl> + which will make the test be very flaky . <nl> + " " " <nl> + X , ( scale , zero_point , torch_type ) = X <nl> + H , W = X . shape [ - 2 : ] <nl> + <nl> + if X . shape [ 1 ] < 176 : <nl> + X = np . repeat ( X , 176 / X . shape [ 1 ] , 1 ) <nl> + <nl> + X_nchw = np . ascontiguousarray ( X . transpose ( [ 0 , 2 , 3 , 1 ] ) ) <nl> + X = torch . from_numpy ( X_nchw ) . permute ( [ 0 , 3 , 1 , 2 ] ) <nl> + qX = torch . quantize_linear ( torch . from_numpy ( X_nchw ) , scale = scale , <nl> + zero_point = zero_point , dtype = torch_type ) . permute ( [ 0 , 3 , 1 , 2 ] ) <nl> + <nl> + # Run reference on int_repr + round to avoid double rounding error . <nl> + X_ref = torch . nn . functional . avg_pool2d ( <nl> + qX . int_repr ( ) . to ( torch . double ) , kernel_size = kernel , stride = stride , padding = padding , <nl> + ceil_mode = ceil_mode , count_include_pad = count_include_pad , divisor_override = divisor_override ) . round ( ) <nl> + <nl> + self . assertTrue ( qX . stride ( ) ! = sorted ( qX . stride ( ) ) ) <nl> + ops_under_test = { <nl> + " nn . functional " : torch . nn . functional . avg_pool2d , <nl> + " nn . quantized . functional " : torch . nn . quantized . functional . avg_pool2d <nl> + } <nl> + error_message = r " Results are off for { } : \ n \ tExpected : \ n { } \ n \ tGot : \ n { } " <nl> + for name , op in ops_under_test . items ( ) : <nl> + X_hat = op ( qX , kernel_size = kernel , stride = stride , padding = padding , ceil_mode = ceil_mode , <nl> + count_include_pad = count_include_pad , divisor_override = divisor_override ) <nl> + self . assertTrue ( X_hat . stride ( ) ! = sorted ( X_hat . stride ( ) ) ) <nl> + self . assertEqual ( X_ref , X_hat . int_repr ( ) . to ( torch . double ) , prec = 1 . 0 , <nl> + message = " { } results are off " . format ( name ) ) <nl> + self . assertEqual ( scale , X_hat . q_scale ( ) , <nl> + message = error_message . format ( name + ' . scale ' , scale , X_hat . q_scale ( ) ) ) <nl> + self . assertEqual ( zero_point , X_hat . q_zero_point ( ) , <nl> + message = error_message . format ( name + ' . zero_point ' , scale , <nl> + X_hat . q_zero_point ( ) ) ) <nl> + <nl> + @ no_deadline <nl> + @ given ( X = hu . tensor ( shapes = hu . array_shapes ( min_dims = 4 , max_dims = 4 , <nl> min_side = 1 , max_side = 10 ) , <nl> - qparams = hu . qparams ( ) ) , <nl> + qparams = hu . qparams ( dtypes = torch . quint8 ) ) , <nl> + permute = st . sampled_from ( ( [ 0 , 1 , 2 , 3 ] , [ 0 , 2 , 3 , 1 ] , [ 0 , 3 , 1 , 2 ] ) ) , <nl> output_size_h = st . integers ( 1 , 10 ) , <nl> output_size_w = st . integers ( 1 , 10 ) ) <nl> - def test_adaptive_avg_pool2d ( self , X , output_size_h , output_size_w ) : <nl> + def test_adaptive_avg_pool2d ( self , X , permute , output_size_h , output_size_w ) : <nl> X , ( scale , zero_point , torch_type ) = X <nl> <nl> H , W = X . shape [ - 2 : ] <nl> def test_adaptive_avg_pool2d ( self , X , output_size_h , output_size_w ) : <nl> output_size = output_size_h <nl> else : <nl> output_size = ( output_size_h , output_size_w ) <nl> - <nl> X = torch . from_numpy ( X ) <nl> qX = torch . quantize_linear ( X , scale = scale , zero_point = zero_point , <nl> - dtype = torch_type ) <nl> + dtype = torch_type ) . permute ( permute ) <nl> <nl> # Run reference on int_repr + round to avoid double rounding error . <nl> X_ref = torch . nn . functional . adaptive_avg_pool2d ( <nl> def test_adaptive_avg_pool2d ( self , X , output_size_h , output_size_w ) : <nl> message = error_message . format ( name + ' . zero_point ' , scale , <nl> qX_hat . q_zero_point ( ) ) ) <nl> <nl> + " " " Tests adaptive average pool operation on NHWC quantized tensors . " " " <nl> + @ given ( X = hu . tensor ( shapes = hu . array_shapes ( min_dims = 4 , max_dims = 4 , <nl> + min_side = 1 , max_side = 10 ) , <nl> + qparams = hu . qparams ( dtypes = torch . qint8 ) ) , <nl> + output_size_h = st . integers ( 1 , 10 ) , <nl> + output_size_w = st . integers ( 1 , 10 ) ) <nl> + def test_adaptive_avg_pool2d_nhwc ( self , X , output_size_h , output_size_w ) : <nl> + X , ( scale , zero_point , torch_type ) = X <nl> + H , W = X . shape [ - 2 : ] <nl> + assume ( output_size_h < = H ) <nl> + assume ( output_size_w < = W ) <nl> + if output_size_h = = output_size_w : <nl> + output_size = output_size_h <nl> + else : <nl> + output_size = ( output_size_h , output_size_w ) <nl> + <nl> + if X . shape [ 1 ] < 176 : <nl> + X = np . repeat ( X , 176 / X . shape [ 1 ] , 1 ) <nl> + <nl> + X_nchw = np . ascontiguousarray ( X . transpose ( [ 0 , 2 , 3 , 1 ] ) ) <nl> + X = torch . from_numpy ( X_nchw ) . permute ( [ 0 , 3 , 1 , 2 ] ) <nl> + qX = torch . quantize_linear ( torch . from_numpy ( X_nchw ) , scale = scale , <nl> + zero_point = zero_point , dtype = torch_type ) . permute ( [ 0 , 3 , 1 , 2 ] ) <nl> + <nl> + # Run reference on int_repr + round to avoid double rounding error . <nl> + X_ref = torch . nn . functional . adaptive_avg_pool2d ( qX . int_repr ( ) . to ( torch . double ) , output_size ) . round ( ) <nl> + <nl> + self . assertTrue ( qX . stride ( ) ! = sorted ( qX . stride ( ) ) ) <nl> + <nl> + ops_under_test = { <nl> + " nn . functional " : torch . nn . functional . adaptive_avg_pool2d , <nl> + " nn . quantized . functional " : <nl> + torch . nn . quantized . functional . adaptive_avg_pool2d <nl> + } <nl> + error_message = r " Results are off for { } : \ n \ tExpected : \ n { } \ n \ tGot : \ n { } " <nl> + for name , op in ops_under_test . items ( ) : <nl> + X_hat = op ( qX , output_size = output_size ) <nl> + self . assertTrue ( X_hat . stride ( ) ! = sorted ( X_hat . stride ( ) ) ) <nl> + self . assertEqual ( X_ref , X_hat . int_repr ( ) , prec = 1 . 0 , <nl> + message = " { } results are off " . format ( name ) ) <nl> + self . assertEqual ( scale , X_hat . q_scale ( ) , <nl> + message = error_message . format ( name + ' . scale ' , scale , X_hat . q_scale ( ) ) ) <nl> + self . assertEqual ( zero_point , X_hat . q_zero_point ( ) , <nl> + message = error_message . format ( name + ' . zero_point ' , scale , <nl> + X_hat . q_zero_point ( ) ) ) <nl> + <nl> " " " Tests quantize concatenation ( both fused and not ) . " " " <nl> @ given ( X = hu . tensor ( shapes = hu . array_shapes ( min_dims = 3 , max_dims = 4 , <nl> min_side = 1 , max_side = 10 ) , <nl> mmm a / torch / nn / quantized / functional . py <nl> ppp b / torch / nn / quantized / functional . py <nl> def max_pool2d ( input , kernel_size , stride = None , padding = 0 , dilation = 1 , <nl> <nl> # TODO ( zaf ) : Add documentation <nl> adaptive_avg_pool2d = torch . nn . functional . adaptive_avg_pool2d <nl> + <nl> + avg_pool2d = torch . nn . functional . avg_pool2d <nl> | Add the quantized average_pool2d support and adaptive_avg_pool2d support ( ) | pytorch/pytorch | 6411b92d6e50a5215dca807dd1d759625f7f183e | 2019-09-20T21:20:16Z |
mmm a / admin / static / coffee / cluster . coffee <nl> ppp b / admin / static / coffee / cluster . coffee <nl> apply_diffs = ( updates ) - > <nl> data . protocol = " memcached " <nl> when ' datacenters ' then collection = datacenters <nl> when ' machines ' then collection = machines <nl> - when ' me ' then # do nothing for now <nl> + when ' me ' then continue <nl> else <nl> console . log " Unhandled element update : " + update <nl> return <nl> | Fix a bug with / me . | rethinkdb/rethinkdb | 48be1f0aaac530aab5d081c31fc16248bdbf4086 | 2012-03-13T19:22:02Z |
mmm a / imgui . cpp <nl> ppp b / imgui . cpp <nl> static void ImGui : : UpdateManualResize ( ImGuiWindow * window , const ImVec2 & size_au <nl> window - > Size = window - > SizeFull ; <nl> } <nl> <nl> + void ImGui : : UpdateWindowParentAndRootLinks ( ImGuiWindow * window , ImGuiWindowFlags flags , ImGuiWindow * parent_window ) <nl> + { <nl> + window - > ParentWindow = parent_window ; <nl> + window - > RootWindow = window - > RootWindowForTitleBarHighlight = window - > RootWindowForNav = window ; <nl> + if ( parent_window & & ( flags & ImGuiWindowFlags_ChildWindow ) & & ! ( flags & ImGuiWindowFlags_Tooltip ) ) <nl> + window - > RootWindow = parent_window - > RootWindow ; <nl> + if ( parent_window & & ! ( flags & ImGuiWindowFlags_Modal ) & & ( flags & ( ImGuiWindowFlags_ChildWindow | ImGuiWindowFlags_Popup ) ) ) <nl> + window - > RootWindowForTitleBarHighlight = parent_window - > RootWindowForTitleBarHighlight ; <nl> + while ( window - > RootWindowForNav - > Flags & ImGuiWindowFlags_NavFlattened ) <nl> + window - > RootWindowForNav = window - > RootWindowForNav - > ParentWindow ; <nl> + } <nl> + <nl> / / Push a new ImGui window to add widgets to . <nl> / / - A default window called " Debug " is automatically stacked at the beginning of every frame so you can use widgets without explicitly calling a Begin / End pair . <nl> / / - Begin / End can be called multiple times during the frame with the same window name to append content . <nl> bool ImGui : : Begin ( const char * name , bool * p_open , ImGuiWindowFlags flags ) <nl> / / When reusing window again multiple times a frame , just append content ( don ' t need to setup again ) <nl> if ( first_begin_of_the_frame ) <nl> { <nl> - const bool window_is_child_tooltip = ( flags & ImGuiWindowFlags_ChildWindow ) & & ( flags & ImGuiWindowFlags_Tooltip ) ; / / FIXME - WIP : Undocumented behavior of Child + Tooltip for pinned tooltip ( # 1345 ) <nl> - <nl> / / Initialize <nl> - window - > ParentWindow = parent_window ; <nl> - window - > RootWindow = window - > RootWindowForTitleBarHighlight = window - > RootWindowForNav = window ; <nl> - if ( parent_window & & ( flags & ImGuiWindowFlags_ChildWindow ) & & ! window_is_child_tooltip ) <nl> - window - > RootWindow = parent_window - > RootWindow ; <nl> - if ( parent_window & & ! ( flags & ImGuiWindowFlags_Modal ) & & ( flags & ( ImGuiWindowFlags_ChildWindow | ImGuiWindowFlags_Popup ) ) ) <nl> - window - > RootWindowForTitleBarHighlight = parent_window - > RootWindowForTitleBarHighlight ; <nl> - while ( window - > RootWindowForNav - > Flags & ImGuiWindowFlags_NavFlattened ) <nl> - window - > RootWindowForNav = window - > RootWindowForNav - > ParentWindow ; <nl> + const bool window_is_child_tooltip = ( flags & ImGuiWindowFlags_ChildWindow ) & & ( flags & ImGuiWindowFlags_Tooltip ) ; / / FIXME - WIP : Undocumented behavior of Child + Tooltip for pinned tooltip ( # 1345 ) <nl> + UpdateWindowParentAndRootLinks ( window , flags , parent_window ) ; <nl> <nl> window - > Active = true ; <nl> window - > BeginOrderWithinParent = 0 ; <nl> mmm a / imgui_internal . h <nl> ppp b / imgui_internal . h <nl> namespace ImGui <nl> IMGUI_API void FocusWindow ( ImGuiWindow * window ) ; <nl> IMGUI_API void BringWindowToFront ( ImGuiWindow * window ) ; <nl> IMGUI_API void BringWindowToBack ( ImGuiWindow * window ) ; <nl> + IMGUI_API void UpdateWindowParentAndRootLinks ( ImGuiWindow * window , ImGuiWindowFlags flags , ImGuiWindow * parent_window ) ; <nl> IMGUI_API bool IsWindowChildOf ( ImGuiWindow * window , ImGuiWindow * potential_parent ) ; <nl> IMGUI_API bool IsWindowNavFocusable ( ImGuiWindow * window ) ; <nl> - <nl> IMGUI_API void SetCurrentFont ( ImFont * font ) ; <nl> inline ImFont * GetDefaultFont ( ) { ImGuiContext & g = * GImGui ; return g . IO . FontDefault ? g . IO . FontDefault : g . IO . Fonts - > Fonts [ 0 ] ; } <nl> <nl> + / / Init <nl> IMGUI_API void Initialize ( ImGuiContext * context ) ; <nl> IMGUI_API void Shutdown ( ImGuiContext * context ) ; / / Since 1 . 60 this is a _private_ function . You can call DestroyContext ( ) to destroy the context created by CreateContext ( ) . <nl> <nl> + / / NewFrame <nl> IMGUI_API void UpdateHoveredWindowAndCaptureFlags ( ) ; <nl> IMGUI_API void StartMouseMovingWindow ( ImGuiWindow * window ) ; <nl> IMGUI_API void UpdateMouseMovingWindow ( ) ; <nl> <nl> + / / Settings <nl> IMGUI_API void MarkIniSettingsDirty ( ) ; <nl> IMGUI_API void MarkIniSettingsDirty ( ImGuiWindow * window ) ; <nl> IMGUI_API ImGuiSettingsHandler * FindSettingsHandler ( const char * type_name ) ; <nl> IMGUI_API ImGuiWindowSettings * FindWindowSettings ( ImGuiID id ) ; <nl> <nl> + / / Basic Accessors <nl> inline ImGuiID GetItemID ( ) { ImGuiContext & g = * GImGui ; return g . CurrentWindow - > DC . LastItemId ; } <nl> inline ImGuiID GetActiveID ( ) { ImGuiContext & g = * GImGui ; return g . ActiveId ; } <nl> inline ImGuiID GetFocusID ( ) { ImGuiContext & g = * GImGui ; return g . NavId ; } <nl> namespace ImGui <nl> IMGUI_API void KeepAliveID ( ImGuiID id ) ; <nl> IMGUI_API void MarkItemValueChanged ( ImGuiID id ) ; <nl> <nl> + / / Basic Helpers for widget code <nl> IMGUI_API void ItemSize ( const ImVec2 & size , float text_offset_y = 0 . 0f ) ; <nl> IMGUI_API void ItemSize ( const ImRect & bb , float text_offset_y = 0 . 0f ) ; <nl> IMGUI_API bool ItemAdd ( const ImRect & bb , ImGuiID id , const ImRect * nav_bb = NULL ) ; <nl> namespace ImGui <nl> IMGUI_API void PushItemFlag ( ImGuiItemFlags option , bool enabled ) ; <nl> IMGUI_API void PopItemFlag ( ) ; <nl> <nl> + / / Popups , Modals , Tooltips <nl> IMGUI_API void OpenPopupEx ( ImGuiID id ) ; <nl> IMGUI_API void ClosePopup ( ImGuiID id ) ; <nl> IMGUI_API void ClosePopupToLevel ( int remaining ) ; <nl> namespace ImGui <nl> IMGUI_API void BeginTooltipEx ( ImGuiWindowFlags extra_flags , bool override_previous_tooltip = true ) ; <nl> IMGUI_API ImGuiWindow * GetFrontMostPopupModal ( ) ; <nl> <nl> + / / Navigation <nl> IMGUI_API void NavInitWindow ( ImGuiWindow * window , bool force_reinit ) ; <nl> IMGUI_API void NavMoveRequestCancel ( ) ; <nl> IMGUI_API void NavMoveRequestForward ( ImGuiDir move_dir , ImGuiDir clip_dir , const ImRect & bb_rel , ImGuiNavMoveFlags move_flags ) ; <nl> IMGUI_API void NavMoveRequestTryWrapping ( ImGuiWindow * window , ImGuiNavMoveFlags move_flags ) ; <nl> - IMGUI_API void ActivateItem ( ImGuiID id ) ; / / Remotely activate a button , checkbox , tree node etc . given its unique ID . activation is queued and processed on the next frame when the item is encountered again . <nl> - <nl> IMGUI_API float GetNavInputAmount ( ImGuiNavInput n , ImGuiInputReadMode mode ) ; <nl> IMGUI_API ImVec2 GetNavInputAmount2d ( ImGuiNavDirSourceFlags dir_sources , ImGuiInputReadMode mode , float slow_factor = 0 . 0f , float fast_factor = 0 . 0f ) ; <nl> IMGUI_API int CalcTypematicPressedRepeatAmount ( float t , float t_prev , float repeat_delay , float repeat_rate ) ; <nl> - <nl> - IMGUI_API void Scrollbar ( ImGuiLayoutType direction ) ; <nl> - IMGUI_API void VerticalSeparator ( ) ; / / Vertical separator , for menu bars ( use current line height ) . not exposed because it is misleading what it doesn ' t have an effect on regular layout . <nl> - IMGUI_API bool SplitterBehavior ( ImGuiID id , const ImRect & bb , ImGuiAxis axis , float * size1 , float * size2 , float min_size1 , float min_size2 , float hover_extend = 0 . 0f , float hover_visibility_delay = 0 . 0f ) ; <nl> - <nl> + IMGUI_API void ActivateItem ( ImGuiID id ) ; / / Remotely activate a button , checkbox , tree node etc . given its unique ID . activation is queued and processed on the next frame when the item is encountered again . <nl> + <nl> + / / Drag and Drop <nl> IMGUI_API bool BeginDragDropTargetCustom ( const ImRect & bb , ImGuiID id ) ; <nl> IMGUI_API void ClearDragDrop ( ) ; <nl> IMGUI_API bool IsDragDropPayloadBeingAccepted ( ) ; <nl> <nl> - / / FIXME - WIP : New Columns API <nl> + / / New Columns API ( FIXME - WIP ) <nl> IMGUI_API void BeginColumns ( const char * str_id , int count , ImGuiColumnsFlags flags = 0 ) ; / / setup number of columns . use an identifier to distinguish multiple column sets . close with EndColumns ( ) . <nl> IMGUI_API void EndColumns ( ) ; / / close columns <nl> IMGUI_API void PushColumnClipRect ( int column_index = - 1 ) ; <nl> <nl> - / / NB : All position are in absolute pixels coordinates ( never using window coordinates internally ) <nl> + / / Render helpers <nl> / / AVOID USING OUTSIDE OF IMGUI . CPP ! NOT FOR PUBLIC CONSUMPTION . THOSE FUNCTIONS ARE A MESS . THEIR SIGNATURE AND BEHAVIOR WILL CHANGE , THEY NEED TO BE REFACTORED INTO SOMETHING DECENT . <nl> + / / NB : All position are in absolute pixels coordinates ( never using window coordinates internally ) <nl> IMGUI_API void RenderText ( ImVec2 pos , const char * text , const char * text_end = NULL , bool hide_text_after_hash = true ) ; <nl> IMGUI_API void RenderTextWrapped ( ImVec2 pos , const char * text , const char * text_end , float wrap_width ) ; <nl> IMGUI_API void RenderTextClipped ( const ImVec2 & pos_min , const ImVec2 & pos_max , const char * text , const char * text_end , const ImVec2 * text_size_if_known , const ImVec2 & align = ImVec2 ( 0 , 0 ) , const ImRect * clip_rect = NULL ) ; <nl> namespace ImGui <nl> IMGUI_API void RenderRectFilledRangeH ( ImDrawList * draw_list , const ImRect & rect , ImU32 col , float x_start_norm , float x_end_norm , float rounding ) ; <nl> IMGUI_API const char * FindRenderedTextEnd ( const char * text , const char * text_end = NULL ) ; / / Find the optional # # from which we stop displaying text . <nl> <nl> - IMGUI_API bool ButtonBehavior ( const ImRect & bb , ImGuiID id , bool * out_hovered , bool * out_held , ImGuiButtonFlags flags = 0 ) ; <nl> + / / Widgets <nl> IMGUI_API bool ButtonEx ( const char * label , const ImVec2 & size_arg = ImVec2 ( 0 , 0 ) , ImGuiButtonFlags flags = 0 ) ; <nl> IMGUI_API bool CloseButton ( ImGuiID id , const ImVec2 & pos , float radius ) ; <nl> IMGUI_API bool CollapseButton ( ImGuiID id , const ImVec2 & pos ) ; <nl> IMGUI_API bool ArrowButtonEx ( const char * str_id , ImGuiDir dir , ImVec2 size_arg , ImGuiButtonFlags flags ) ; <nl> + IMGUI_API void Scrollbar ( ImGuiLayoutType direction ) ; <nl> + IMGUI_API void VerticalSeparator ( ) ; / / Vertical separator , for menu bars ( use current line height ) . Not exposed because it is misleading and it doesn ' t have an effect on regular layout . <nl> <nl> + / / Widgets low - level behaviors <nl> + IMGUI_API bool ButtonBehavior ( const ImRect & bb , ImGuiID id , bool * out_hovered , bool * out_held , ImGuiButtonFlags flags = 0 ) ; <nl> IMGUI_API bool DragBehavior ( ImGuiID id , ImGuiDataType data_type , void * v , float v_speed , const void * v_min , const void * v_max , const char * format , float power ) ; <nl> IMGUI_API bool SliderBehavior ( const ImRect & bb , ImGuiID id , ImGuiDataType data_type , void * v , const void * v_min , const void * v_max , const char * format , float power , ImGuiSliderFlags flags = 0 ) ; <nl> + IMGUI_API bool SplitterBehavior ( ImGuiID id , const ImRect & bb , ImGuiAxis axis , float * size1 , float * size2 , float min_size1 , float min_size2 , float hover_extend = 0 . 0f , float hover_visibility_delay = 0 . 0f ) ; <nl> + IMGUI_API bool TreeNodeBehavior ( ImGuiID id , ImGuiTreeNodeFlags flags , const char * label , const char * label_end = NULL ) ; <nl> + IMGUI_API bool TreeNodeBehaviorIsOpen ( ImGuiID id , ImGuiTreeNodeFlags flags = 0 ) ; / / Consume previous SetNextTreeNodeOpened ( ) data , if any . May return true when logging <nl> + IMGUI_API void TreePushRawID ( ImGuiID id ) ; <nl> <nl> IMGUI_API bool InputTextEx ( const char * label , char * buf , int buf_size , const ImVec2 & size_arg , ImGuiInputTextFlags flags , ImGuiTextEditCallback callback = NULL , void * user_data = NULL ) ; <nl> IMGUI_API bool InputScalarAsWidgetReplacement ( const ImRect & bb , ImGuiID id , const char * label , ImGuiDataType data_type , void * data_ptr , const char * format ) ; <nl> namespace ImGui <nl> IMGUI_API void ColorTooltip ( const char * text , const float * col , ImGuiColorEditFlags flags ) ; <nl> IMGUI_API void ColorEditOptionsPopup ( const float * col , ImGuiColorEditFlags flags ) ; <nl> <nl> - IMGUI_API bool TreeNodeBehavior ( ImGuiID id , ImGuiTreeNodeFlags flags , const char * label , const char * label_end = NULL ) ; <nl> - IMGUI_API bool TreeNodeBehaviorIsOpen ( ImGuiID id , ImGuiTreeNodeFlags flags = 0 ) ; / / Consume previous SetNextTreeNodeOpened ( ) data , if any . May return true when logging <nl> - IMGUI_API void TreePushRawID ( ImGuiID id ) ; <nl> - <nl> IMGUI_API void PlotEx ( ImGuiPlotType plot_type , const char * label , float ( * values_getter ) ( void * data , int idx ) , void * data , int values_count , int values_offset , const char * overlay_text , float scale_min , float scale_max , ImVec2 graph_size ) ; <nl> <nl> / / Shade functions ( write over already created vertices ) <nl> | Internals : Extracted part of Begin into UpdateWindowParentAndRootLinks ( ) . Useful to call to keep state updated immediately ( namely useful for docking ) + small tidying up of imgui_internal . h | ocornut/imgui | 63df9d6311352c3bda85edd63a006ee08dd35700 | 2018-07-26T21:49:36Z |
mmm a / modules / perception / obstacle / fusion / probabilistic_fusion / pbf_track . cc <nl> ppp b / modules / perception / obstacle / fusion / probabilistic_fusion / pbf_track . cc <nl> int PbfTrack : : GetNextTrackId ( ) { <nl> } <nl> <nl> bool PbfTrack : : AbleToPublish ( ) { <nl> - AINFO < < s_publish_if_has_lidar_ < < " " < < invisible_in_lidar_ < < " " <nl> + ADEBUG < < s_publish_if_has_lidar_ < < " " < < invisible_in_lidar_ < < " " <nl> < < lidar_objects_ . size ( ) ; <nl> double invisible_period_threshold = 0 . 001 ; <nl> if ( invisible_period_ > invisible_period_threshold & & invisible_in_lidar_ & & <nl> mmm a / modules / perception / obstacle / onboard / fusion_subnode . cc <nl> ppp b / modules / perception / obstacle / onboard / fusion_subnode . cc <nl> Status FusionSubnode : : ProcEvents ( ) { <nl> return Status ( ErrorCode : : PERCEPTION_ERROR , " Subscribe event fail . " ) ; <nl> } <nl> if ( events . empty ( ) ) { <nl> + usleep ( 500 ) ; <nl> continue ; <nl> } <nl> Process ( event_meta , events ) ; <nl> Status FusionSubnode : : Process ( const EventMeta & event_meta , <nl> bool FusionSubnode : : SubscribeEvents ( const EventMeta & event_meta , <nl> std : : vector < Event > * events ) const { <nl> Event event ; <nl> - if ( event_meta . event_id = = pub_driven_event_id_ ) { <nl> - if ( event_manager_ - > Subscribe ( event_meta . event_id , & event , false ) ) { <nl> - events - > push_back ( event ) ; <nl> - } else { <nl> - return false ; <nl> - } <nl> - } else { <nl> - / / no blocking <nl> - while ( event_manager_ - > Subscribe ( event_meta . event_id , & event , true ) ) { <nl> - events - > push_back ( event ) ; <nl> - } <nl> + / / no blocking <nl> + while ( event_manager_ - > Subscribe ( event_meta . event_id , & event , true ) ) { <nl> + events - > push_back ( event ) ; <nl> } <nl> return true ; <nl> } <nl> mmm a / modules / perception / obstacle / radar / modest / conti_radar_util . h <nl> ppp b / modules / perception / obstacle / radar / modest / conti_radar_util . h <nl> class ContiRadarUtil { <nl> const Eigen : : Vector3f & velocity ) { <nl> Eigen : : Vector3f vector_temp1 = main_velocity ; <nl> Eigen : : Vector3f vector_temp2 = velocity ; <nl> - if ( vector_temp1 . head ( 2 ) . norm ( ) > 1e - 5 <nl> - & & vector_temp2 . head ( 2 ) . norm ( ) > 1e - 5 ) { <nl> + if ( vector_temp1 . head ( 2 ) . norm ( ) > 0 . 1 <nl> + & & vector_temp2 . head ( 2 ) . norm ( ) > 0 . 1 ) { <nl> double theta = VectorTheta2dXy ( vector_temp1 , vector_temp2 ) ; <nl> if ( ( theta > 1 . 0 / 4 . 0 * M_PI & & theta < 3 . 0 / 4 . 0 * M_PI ) | | <nl> ( theta > - 3 . 0 / 4 . 0 * M_PI & & theta < - 1 . 0 / 4 . 0 * M_PI ) ) { <nl> mmm a / modules / perception / obstacle / radar / modest / modest_radar_detector . cc <nl> ppp b / modules / perception / obstacle / radar / modest / modest_radar_detector . cc <nl> bool ModestRadarDetector : : CollectRadarResult ( std : : vector < ObjectPtr > * objects ) { <nl> for ( size_t i = 0 ; i < obs_track . size ( ) ; + + i ) { <nl> ObjectPtr object_ptr = ObjectPtr ( new Object ( ) ) ; <nl> const ObjectPtr & object_radar_ptr = obs_track [ i ] . GetObsRadar ( ) ; <nl> - if ( object_radar_ptr - > is_background ) { <nl> + if ( use_fp_filter_ & & object_radar_ptr - > is_background ) { <nl> continue ; <nl> } <nl> object_ptr - > clone ( * object_radar_ptr ) ; <nl> | perception : nonblocking fusion subnode event , use host velocity direction when it is not static | ApolloAuto/apollo | 5eecbb3b51851b87ef25bf5319dd8f5ba84ab895 | 2017-12-13T19:07:57Z |
mmm a / hphp / hack / src / typing / typing . rs <nl> ppp b / hphp / hack / src / typing / typing . rs <nl> pub fn fun < ' a > ( env : & mut Env < ' a > , f : & ' a ast : : Fun_ ) - > tast : : Fun_ < ' a > { <nl> let ast = f . body . ast . iter ( ) . map ( | x | stmt ( env , x ) ) . collect ( ) ; <nl> <nl> / / We put empty vec below for all of those , since real conversion is unimplemented <nl> - assert ! ( f . tparams . is_empty ( ) ) ; <nl> - assert ! ( f . params . is_empty ( ) ) ; <nl> assert ! ( f . user_attributes . is_empty ( ) ) ; <nl> assert ! ( f . file_attributes . is_empty ( ) ) ; <nl> <nl> mmm a / hphp / hack / src / typing / typing_defs_core . rs <nl> ppp b / hphp / hack / src / typing / typing_defs_core . rs <nl> use std : : cmp : : Ordering ; <nl> <nl> use bumpalo : : collections : : Vec ; <nl> use ocamlrep : : { Allocator , FromError , OcamlRep , Value } ; <nl> + use oxidized : : pos : : Pos ; <nl> pub use oxidized : : typing_defs_core : : { DestructureKind , Exact , ParamMode } ; <nl> use oxidized : : { aast_defs , ast_defs , ident , nast , tany_sentinel , typing_defs as oxidized_defs } ; <nl> <nl> impl < ' a > Ty < ' a > { <nl> let Ty ( r , _t ) = self ; <nl> * r <nl> } <nl> - pub fn get_pos ( & self ) - > Option < & ' a oxidized : : pos : : Pos > { <nl> + pub fn get_pos ( & self ) - > Option < & ' a Pos > { <nl> self . get_reason ( ) . pos <nl> } <nl> pub fn is_tyvar ( & self ) - > bool { <nl> impl < ' a > FunType_ < ' a > { <nl> pub fn to_oxidized ( & self ) - > oxidized_defs : : FunType { <nl> / / TODO ( hrust ) proper conversion <nl> use oxidized_defs : : * ; <nl> + let FunType_ { return_ , params } = self ; <nl> <nl> FunType { <nl> is_coroutine : false , <nl> arity : FunArity : : Fstandard ( 0 , 0 ) , <nl> tparams : ( vec ! [ ] , FunTparamsKind : : FTKtparams ) , <nl> where_constraints : vec ! [ ] , <nl> - params : vec ! [ ] , <nl> + params : params . iter ( ) . map ( | p | p . to_oxidized ( ) ) . collect ( ) , <nl> ret : PossiblyEnforcedTy { <nl> enforced : false , <nl> - type_ : self . return_ . to_oxidized ( ) , <nl> + type_ : return_ . to_oxidized ( ) , <nl> } , <nl> fun_kind : ast_defs : : FunKind : : FSync , <nl> reactive : Reactivity : : Nonreactive , <nl> impl < ' a > FunType_ < ' a > { <nl> } <nl> <nl> # [ derive ( Debug , Eq , Ord , PartialEq , PartialOrd ) ] <nl> - pub struct FunParam < ' a > { <nl> + pub struct FunParam_ < ' a > { <nl> / / TODO ( hrust ) missing fields <nl> pub type_ : Ty < ' a > , / / TODO ( hrust ) possibly_enforced_ty <nl> } <nl> + <nl> + pub type FunParam < ' a > = & ' a FunParam_ < ' a > ; <nl> + <nl> + impl < ' a > FunParam_ < ' a > { <nl> + pub fn to_oxidized ( & self ) - > oxidized_defs : : FunParam { <nl> + use oxidized_defs : : * ; <nl> + let FunParam_ { type_ } = self ; <nl> + <nl> + FunParam { <nl> + pos : Pos : : make_none ( ) , <nl> + name : None , <nl> + type_ : PossiblyEnforcedTy { <nl> + enforced : false , <nl> + type_ : type_ . to_oxidized ( ) , <nl> + } , <nl> + kind : ParamMode : : FPnormal , <nl> + accept_disposable : false , <nl> + mutability : None , <nl> + rx_annotation : None , <nl> + } <nl> + } <nl> + } <nl> mmm a / hphp / hack / src / typing / typing_make_type . rs <nl> ppp b / hphp / hack / src / typing / typing_make_type . rs <nl> impl < ' a > TypeBuilder < ' a > { <nl> pub fn constraintty ( & ' a self , ty : ConstraintType < ' a > ) - > InternalType < ' a > { <nl> self . alloc ( InternalType_ : : ConstraintType ( ty ) ) <nl> } <nl> - pub fn funtype ( & ' a self , return_ : Ty < ' a > ) - > FunType < ' a > { <nl> - self . alloc ( FunType_ { <nl> - return_ , <nl> - params : vec ! [ in self . alloc ] , <nl> - } ) <nl> + pub fn funparam ( & ' a self , type_ : Ty < ' a > ) - > FunParam < ' a > { <nl> + self . alloc ( FunParam_ { type_ } ) <nl> + } <nl> + pub fn funtype ( & ' a self , params : BVec < ' a , FunParam < ' a > > , return_ : Ty < ' a > ) - > FunType < ' a > { <nl> + self . alloc ( FunType_ { return_ , params } ) <nl> } <nl> } <nl> <nl> mmm a / hphp / hack / src / typing / typing_phase . rs <nl> ppp b / hphp / hack / src / typing / typing_phase . rs <nl> use oxidized : : aast_defs : : Tprim ; <nl> use oxidized : : ast ; <nl> use oxidized : : ast_defs : : Id ; <nl> use oxidized : : pos : : Pos ; <nl> - use oxidized : : typing_defs_core : : { FunType as DFunType , Tparam as DTparam , Ty as DTy , Ty_ as DTy_ } ; <nl> + use oxidized : : typing_defs_core : : { <nl> + FunParam as DFunParam , FunParams as DFunParams , FunType as DFunType , Tparam as DTparam , <nl> + Ty as DTy , Ty_ as DTy_ , <nl> + } ; <nl> use typing_defs_rust : : tast ; <nl> use typing_defs_rust : : typing_defs : : ExpandEnv ; <nl> - use typing_defs_rust : : typing_defs_core : : { FunType , PrimKind , Ty } ; <nl> + use typing_defs_rust : : typing_defs_core : : { FunParam , FunType , PrimKind , Ty } ; <nl> use typing_defs_rust : : typing_reason : : PReason_ ; <nl> <nl> / / / Transforms a declaration phase type into a localized type . This performs <nl> pub fn localize_ft < ' a , ' b > ( <nl> / / TODO ( hrust ) <nl> } <nl> } ; <nl> - / / TODO ( hrust ) : localize params and more . . . <nl> + / * TODO ( hrust ) : <nl> + - set_env_reactive <nl> + - localize tparams <nl> + - localize where constraints <nl> + - check constraints under substs <nl> + - arity <nl> + * / <nl> + let params = localize_funparams ( ety_env , env , & ft . params ) ; <nl> let ret = localize ( ety_env , env , & ft . ret . type_ ) ; <nl> - env . bld ( ) . funtype ( ret ) <nl> + env . bld ( ) . funtype ( params , ret ) <nl> + } <nl> + <nl> + fn localize_funparams < ' a > ( <nl> + ety_env : & mut ExpandEnv < ' a > , <nl> + env : & mut Env < ' a > , <nl> + params : & ' a DFunParams , <nl> + ) - > BVec < ' a , FunParam < ' a > > { <nl> + env . bld ( ) <nl> + . vec_from_iter ( params . iter ( ) . map ( | ty | localize_funparam ( ety_env , env , ty ) ) ) <nl> + } <nl> + <nl> + fn localize_funparam < ' a > ( <nl> + ety_env : & mut ExpandEnv < ' a > , <nl> + env : & mut Env < ' a > , <nl> + param : & ' a DFunParam , <nl> + ) - > FunParam < ' a > { <nl> + let type_ = & param . type_ . type_ ; <nl> + let type_ = localize ( ety_env , env , type_ ) ; <nl> + env . bld ( ) . funparam ( type_ ) <nl> } <nl> <nl> fn localize_tparams < ' a > ( <nl> | localize function parameters | facebook/hhvm | 071116effc7998bcea760da3ce9e34dc21fbe9aa | 2020-04-02T07:31:02Z |
mmm a / modules / planning / lattice / trajectory_generation / BUILD <nl> ppp b / modules / planning / lattice / trajectory_generation / BUILD <nl> cc_library ( <nl> " : end_condition_sampler " , <nl> " : lateral_trajectory_optimizer " , <nl> " : lateral_trajectory_optimizer_interface " , <nl> + " : lateral_osqp_optimizer " , <nl> " / / modules / common " , <nl> " / / modules / planning / common : planning_gflags " , <nl> " / / modules / planning / lattice / behavior : path_time_graph " , <nl> cc_library ( <nl> " / / modules / planning / lattice / trajectory1d : standing_still_trajectory1d " , <nl> " / / modules / planning / math / curve1d : quartic_polynomial_curve1d " , <nl> " / / modules / planning / math / curve1d : quintic_polynomial_curve1d " , <nl> - " / / modules / planning / math / finite_element_qp " , <nl> + " / / modules / planning / math / finite_element_qp : lateral_qp_optimizer " , <nl> " / / modules / planning / proto : lattice_sampling_config_proto " , <nl> " / / modules / planning / proto : lattice_structure_proto " , <nl> ] , <nl> cc_library ( <nl> ] , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " lateral_osqp_optimizer " , <nl> + srcs = [ <nl> + " lateral_osqp_optimizer . cc " , <nl> + ] , <nl> + hdrs = [ <nl> + " lateral_osqp_optimizer . h " , <nl> + ] , <nl> + deps = [ <nl> + " / / modules / planning / math / finite_element_qp : lateral_qp_optimizer " , <nl> + " / / modules / planning / lattice / trajectory1d : piecewise_jerk_trajectory1d " , <nl> + " / / cybertron " , <nl> + " @ eigen " , <nl> + " @ osqp " , <nl> + ] , <nl> + ) <nl> + <nl> cpplint ( ) <nl> mmm a / modules / planning / lattice / trajectory_generation / lateral_osqp_optimizer . cc <nl> ppp b / modules / planning / lattice / trajectory_generation / lateral_osqp_optimizer . cc <nl> <nl> <nl> # include " modules / planning / lattice / trajectory_generation / lateral_osqp_optimizer . h " <nl> <nl> - # include " cybertron / common / log . h " <nl> - # include " qpOASES . hpp " <nl> + # include < algorithm > <nl> <nl> + # include " cybertron / common / log . h " <nl> + # include " modules / common / math / matrix_operations . h " <nl> # include " modules / planning / common / planning_gflags . h " <nl> <nl> namespace apollo { <nl> namespace planning { <nl> bool LateralOSQPOptimizer : : optimize ( <nl> const std : : array < double , 3 > & d_state , const double delta_s , <nl> const std : : vector < std : : pair < double , double > > & d_bounds ) { <nl> - / / TODO ( kechxu ) implement <nl> + std : : vector < c_float > P_data ; <nl> + std : : vector < c_int > P_indices ; <nl> + std : : vector < c_int > P_indptr ; <nl> + CalculateKernel ( d_bounds , & P_data , & P_indices , & P_indptr ) ; <nl> + delta_s_ = delta_s ; <nl> + const int num_var = d_bounds . size ( ) ; <nl> + const int kNumParam = 3 * d_bounds . size ( ) ; <nl> + const int kNumConstraint = kNumParam + 3 * ( num_var - 1 ) + 3 ; <nl> + c_float lower_bounds [ kNumConstraint ] ; <nl> + c_float upper_bounds [ kNumConstraint ] ; <nl> + <nl> + const int prime_offset = num_var ; <nl> + const int pprime_offset = 2 * num_var ; <nl> + <nl> + std : : vector < std : : vector < std : : pair < c_int , c_float > > > columns ; <nl> + columns . resize ( kNumParam ) ; <nl> + <nl> + int constraint_index = 0 ; <nl> + <nl> + / / d_i + 1 ' ' - d_i ' ' <nl> + for ( int i = 0 ; i + 1 < num_var ; + + i ) { <nl> + columns [ pprime_offset + i ] . emplace_back ( constraint_index , - 1 . 0 ) ; <nl> + columns [ pprime_offset + i + 1 ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> + <nl> + lower_bounds [ constraint_index ] = <nl> + - FLAGS_lateral_third_order_derivative_max * delta_s ; <nl> + upper_bounds [ constraint_index ] = <nl> + FLAGS_lateral_third_order_derivative_max * delta_s ; <nl> + + + constraint_index ; <nl> + } <nl> + <nl> + / / d_i + 1 ' - d_i ' - 0 . 5 * ds * ( d_i ' ' + d_i + 1 ' ' ) <nl> + for ( int i = 0 ; i + 1 < num_var ; + + i ) { <nl> + columns [ prime_offset + i ] . emplace_back ( constraint_index , - 1 . 0 ) ; <nl> + columns [ prime_offset + i + 1 ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> + columns [ pprime_offset + i ] . emplace_back ( constraint_index , - 0 . 5 * delta_s ) ; <nl> + columns [ pprime_offset + i + 1 ] . emplace_back ( constraint_index , <nl> + - 0 . 5 * delta_s ) ; <nl> + <nl> + lower_bounds [ constraint_index ] = 0 . 0 ; <nl> + upper_bounds [ constraint_index ] = 0 . 0 ; <nl> + + + constraint_index ; <nl> + } <nl> + <nl> + / / d_i + 1 - d_i - d_i ' * ds - 1 / 3 * d_i ' ' * ds ^ 2 - 1 / 6 * d_i + 1 ' ' * ds ^ 2 <nl> + for ( int i = 0 ; i + 1 < num_var ; + + i ) { <nl> + columns [ i ] . emplace_back ( constraint_index , - 1 . 0 ) ; <nl> + columns [ i + 1 ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> + columns [ prime_offset + i ] . emplace_back ( constraint_index , - delta_s ) ; <nl> + columns [ pprime_offset + i ] . emplace_back ( constraint_index , <nl> + - delta_s * delta_s / 3 . 0 ) ; <nl> + columns [ pprime_offset + i + 1 ] . emplace_back ( constraint_index , <nl> + - delta_s * delta_s / 6 . 0 ) ; <nl> + <nl> + lower_bounds [ constraint_index ] = 0 . 0 ; <nl> + upper_bounds [ constraint_index ] = 0 . 0 ; <nl> + + + constraint_index ; <nl> + } <nl> + <nl> + columns [ 0 ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> + lower_bounds [ constraint_index ] = d_state [ 0 ] ; <nl> + upper_bounds [ constraint_index ] = d_state [ 0 ] ; <nl> + + + constraint_index ; <nl> + <nl> + columns [ prime_offset ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> + lower_bounds [ constraint_index ] = d_state [ 1 ] ; <nl> + upper_bounds [ constraint_index ] = d_state [ 1 ] ; <nl> + + + constraint_index ; <nl> + <nl> + columns [ pprime_offset ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> + lower_bounds [ constraint_index ] = d_state [ 2 ] ; <nl> + upper_bounds [ constraint_index ] = d_state [ 2 ] ; <nl> + + + constraint_index ; <nl> + <nl> + const double LARGE_VALUE = 2 . 0 ; <nl> + for ( int i = 0 ; i < kNumParam ; + + i ) { <nl> + columns [ i ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> + if ( i < num_var ) { <nl> + lower_bounds [ constraint_index ] = d_bounds [ i ] . first ; <nl> + upper_bounds [ constraint_index ] = d_bounds [ i ] . second ; <nl> + } else { <nl> + lower_bounds [ constraint_index ] = - LARGE_VALUE ; <nl> + upper_bounds [ constraint_index ] = LARGE_VALUE ; <nl> + } <nl> + + + constraint_index ; <nl> + } <nl> + <nl> + CHECK_EQ ( constraint_index , kNumConstraint ) ; <nl> + <nl> + / / change affine_constraint to CSC format <nl> + std : : vector < c_float > A_data ; <nl> + std : : vector < c_int > A_indices ; <nl> + std : : vector < c_int > A_indptr ; <nl> + int ind_p = 0 ; <nl> + for ( int j = 0 ; j < kNumParam ; + + j ) { <nl> + A_indptr . push_back ( ind_p ) ; <nl> + for ( const auto & row_data_pair : columns [ j ] ) { <nl> + A_data . push_back ( row_data_pair . second ) ; <nl> + A_indices . push_back ( row_data_pair . first ) ; <nl> + + + ind_p ; <nl> + } <nl> + } <nl> + A_indptr . push_back ( ind_p ) ; <nl> + <nl> + / / offset <nl> + double q [ kNumParam ] ; <nl> + for ( int i = 0 ; i < kNumParam ; + + i ) { <nl> + if ( i < num_var ) { <nl> + q [ i ] = - 2 . 0 * FLAGS_weight_lateral_obstacle_distance * <nl> + ( d_bounds [ i ] . first + d_bounds [ i ] . second ) ; <nl> + } else { <nl> + q [ i ] = 0 . 0 ; <nl> + } <nl> + } <nl> + <nl> + / / Problem settings <nl> + OSQPSettings * settings = <nl> + reinterpret_cast < OSQPSettings * > ( c_malloc ( sizeof ( OSQPSettings ) ) ) ; <nl> + <nl> + / / Define Solver settings as default <nl> + osqp_set_default_settings ( settings ) ; <nl> + settings - > alpha = 1 . 0 ; / / Change alpha parameter <nl> + settings - > eps_abs = 1 . 0e - 05 ; <nl> + settings - > eps_rel = 1 . 0e - 05 ; <nl> + settings - > max_iter = 5000 ; <nl> + settings - > polish = true ; <nl> + settings - > verbose = FLAGS_enable_osqp_debug ; <nl> + <nl> + / / Populate data <nl> + OSQPData * data = reinterpret_cast < OSQPData * > ( c_malloc ( sizeof ( OSQPData ) ) ) ; <nl> + data - > n = kNumParam ; <nl> + data - > m = kNumConstraint ; <nl> + data - > P = csc_matrix ( data - > n , data - > n , P_data . size ( ) , P_data . data ( ) , <nl> + P_indices . data ( ) , P_indptr . data ( ) ) ; <nl> + data - > q = q ; <nl> + data - > A = csc_matrix ( data - > m , data - > n , A_data . size ( ) , A_data . data ( ) , <nl> + A_indices . data ( ) , A_indptr . data ( ) ) ; <nl> + data - > l = lower_bounds ; <nl> + data - > u = upper_bounds ; <nl> + <nl> + / / Workspace <nl> + OSQPWorkspace * work = osqp_setup ( data , settings ) ; <nl> + <nl> + / / Solve Problem <nl> + osqp_solve ( work ) ; <nl> + <nl> + / / extract primal results <nl> + for ( int i = 0 ; i < num_var ; + + i ) { <nl> + opt_d_ . push_back ( work - > solution - > x [ i ] ) ; <nl> + opt_d_prime_ . push_back ( work - > solution - > x [ i + num_var ] ) ; <nl> + opt_d_pprime_ . push_back ( work - > solution - > x [ i + 2 * num_var ] ) ; <nl> + } <nl> + opt_d_prime_ [ num_var - 1 ] = 0 . 0 ; <nl> + opt_d_pprime_ [ num_var - 1 ] = 0 . 0 ; <nl> + <nl> + / / Cleanup <nl> + osqp_cleanup ( work ) ; <nl> + c_free ( data - > A ) ; <nl> + c_free ( data - > P ) ; <nl> + c_free ( data ) ; <nl> + c_free ( settings ) ; <nl> + <nl> return true ; <nl> } <nl> <nl> + void LateralOSQPOptimizer : : CalculateKernel ( <nl> + const std : : vector < std : : pair < double , double > > & d_bounds , <nl> + std : : vector < c_float > * P_data , std : : vector < c_int > * P_indices , <nl> + std : : vector < c_int > * P_indptr ) { <nl> + const int kNumParam = 3 * d_bounds . size ( ) ; <nl> + P_data - > resize ( kNumParam ) ; <nl> + P_indices - > resize ( kNumParam ) ; <nl> + P_indptr - > resize ( kNumParam + 1 ) ; <nl> + <nl> + for ( int i = 0 ; i < kNumParam ; + + i ) { <nl> + if ( i < static_cast < int > ( d_bounds . size ( ) ) ) { <nl> + P_data - > at ( i ) = 2 . 0 * FLAGS_weight_lateral_offset + <nl> + 2 . 0 * FLAGS_weight_lateral_obstacle_distance ; <nl> + } else if ( i < 2 * static_cast < int > ( d_bounds . size ( ) ) ) { <nl> + P_data - > at ( i ) = 2 . 0 * FLAGS_weight_lateral_derivative ; <nl> + } else { <nl> + P_data - > at ( i ) = 2 . 0 * FLAGS_weight_lateral_second_order_derivative ; <nl> + } <nl> + P_indices - > at ( i ) = i ; <nl> + P_indptr - > at ( i ) = i ; <nl> + } <nl> + P_indptr - > at ( kNumParam ) = kNumParam ; <nl> + CHECK_EQ ( P_data - > size ( ) , P_indices - > size ( ) ) ; <nl> + } <nl> + <nl> } / / namespace planning <nl> } / / namespace apollo <nl> mmm a / modules / planning / lattice / trajectory_generation / lateral_osqp_optimizer . h <nl> ppp b / modules / planning / lattice / trajectory_generation / lateral_osqp_optimizer . h <nl> <nl> # include < utility > <nl> # include < vector > <nl> <nl> + # include " Eigen / Core " <nl> + # include " osqp / include / osqp . h " <nl> + <nl> + # include " modules / planning / math / finite_element_qp / lateral_qp_optimizer . h " <nl> # include " modules / planning / lattice / trajectory1d / piecewise_jerk_trajectory1d . h " <nl> - # include " modules / planning / lattice / trajectory_generation / lateral_qp_optimizer . h " <nl> <nl> namespace apollo { <nl> namespace planning { <nl> class LateralOSQPOptimizer : public LateralQPOptimizer { <nl> bool optimize ( <nl> const std : : array < double , 3 > & d_state , const double delta_s , <nl> const std : : vector < std : : pair < double , double > > & d_bounds ) override ; <nl> + <nl> + private : <nl> + void CalculateKernel ( const std : : vector < std : : pair < double , double > > & d_bounds , <nl> + std : : vector < c_float > * P_data , <nl> + std : : vector < c_int > * P_indices , <nl> + std : : vector < c_int > * P_indptr ) ; <nl> + <nl> + double delta_s_ = 0 . 0 ; <nl> } ; <nl> <nl> } / / namespace planning <nl> mmm a / modules / planning / lattice / trajectory_generation / trajectory1d_generator . cc <nl> ppp b / modules / planning / lattice / trajectory_generation / trajectory1d_generator . cc <nl> <nl> # include " modules / planning / lattice / trajectory1d / piecewise_jerk_trajectory1d . h " <nl> # include " modules / planning / lattice / trajectory1d / standing_still_trajectory1d . h " <nl> # include " modules / planning / lattice / trajectory_generation / lateral_trajectory_optimizer . h " <nl> - # include " modules / planning / math / finite_element_qp / osqp_lateral_qp_optimizer . h " <nl> + # include " modules / planning / math / finite_element_qp / lateral_qp_optimizer . h " <nl> + # include " modules / planning / lattice / trajectory_generation / lateral_osqp_optimizer . h " <nl> <nl> namespace apollo { <nl> namespace planning { <nl> void Trajectory1dGenerator : : GenerateLateralTrajectoryBundle ( <nl> <nl> / / LateralTrajectoryOptimizer lateral_optimizer ; <nl> std : : unique_ptr < LateralQPOptimizer > lateral_optimizer ( <nl> - new OsqpLateralQPOptimizer ) ; <nl> + new LateralOSQPOptimizer ) ; <nl> <nl> lateral_optimizer - > optimize ( init_lat_state_ , delta_s , lateral_bounds ) ; <nl> <nl> mmm a / modules / planning / math / finite_element_qp / BUILD <nl> ppp b / modules / planning / math / finite_element_qp / BUILD <nl> cc_library ( <nl> " : lateral_qp_optimizer " , <nl> " : osqp_lateral_jerk_qp_optimizer " , <nl> " : osqp_lateral_linear_qp_optimizer " , <nl> - " : osqp_lateral_qp_optimizer " , <nl> ] , <nl> ) <nl> <nl> cc_library ( <nl> ] , <nl> ) <nl> <nl> - cc_library ( <nl> - name = " osqp_lateral_qp_optimizer " , <nl> - srcs = [ <nl> - " osqp_lateral_qp_optimizer . cc " , <nl> - ] , <nl> - hdrs = [ <nl> - " osqp_lateral_qp_optimizer . h " , <nl> - ] , <nl> - deps = [ <nl> - " : lateral_qp_optimizer " , <nl> - " @ eigen " , <nl> - " @ osqp " , <nl> - ] , <nl> - ) <nl> - <nl> cc_library ( <nl> name = " osqp_lateral_linear_qp_optimizer " , <nl> srcs = [ <nl> cc_library ( <nl> ] , <nl> ) <nl> <nl> + <nl> cc_library ( <nl> name = " osqp_lateral_jerk_qp_optimizer " , <nl> srcs = [ <nl> deleted file mode 100644 <nl> index da5bcc0805b . . 00000000000 <nl> mmm a / modules / planning / math / finite_element_qp / osqp_lateral_qp_optimizer . cc <nl> ppp / dev / null <nl> <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> - * Copyright 2018 The Apollo Authors . All Rights Reserved . <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - <nl> - # include " modules / planning / math / finite_element_qp / osqp_lateral_qp_optimizer . h " <nl> - <nl> - # include < algorithm > <nl> - <nl> - # include " cybertron / common / log . h " <nl> - # include " modules / common / math / matrix_operations . h " <nl> - # include " modules / planning / common / planning_gflags . h " <nl> - <nl> - namespace apollo { <nl> - namespace planning { <nl> - <nl> - using Eigen : : MatrixXd ; <nl> - using apollo : : common : : math : : DenseToCSCMatrix ; <nl> - <nl> - bool OsqpLateralQPOptimizer : : optimize ( <nl> - const std : : array < double , 3 > & d_state , const double delta_s , <nl> - const std : : vector < std : : pair < double , double > > & d_bounds ) { <nl> - std : : vector < c_float > P_data ; <nl> - std : : vector < c_int > P_indices ; <nl> - std : : vector < c_int > P_indptr ; <nl> - CalculateKernel ( d_bounds , & P_data , & P_indices , & P_indptr ) ; <nl> - delta_s_ = delta_s ; <nl> - const int num_var = d_bounds . size ( ) ; <nl> - const int kNumParam = 3 * d_bounds . size ( ) ; <nl> - const int kNumConstraint = kNumParam + 3 * ( num_var - 1 ) + 3 ; <nl> - c_float lower_bounds [ kNumConstraint ] ; <nl> - c_float upper_bounds [ kNumConstraint ] ; <nl> - <nl> - const int prime_offset = num_var ; <nl> - const int pprime_offset = 2 * num_var ; <nl> - <nl> - std : : vector < std : : vector < std : : pair < c_int , c_float > > > columns ; <nl> - columns . resize ( kNumParam ) ; <nl> - <nl> - int constraint_index = 0 ; <nl> - <nl> - / / d_i + 1 ' ' - d_i ' ' <nl> - for ( int i = 0 ; i + 1 < num_var ; + + i ) { <nl> - columns [ pprime_offset + i ] . emplace_back ( constraint_index , - 1 . 0 ) ; <nl> - columns [ pprime_offset + i + 1 ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> - <nl> - lower_bounds [ constraint_index ] = <nl> - - FLAGS_lateral_third_order_derivative_max * delta_s ; <nl> - upper_bounds [ constraint_index ] = <nl> - FLAGS_lateral_third_order_derivative_max * delta_s ; <nl> - + + constraint_index ; <nl> - } <nl> - <nl> - / / d_i + 1 ' - d_i ' - 0 . 5 * ds * ( d_i ' ' + d_i + 1 ' ' ) <nl> - for ( int i = 0 ; i + 1 < num_var ; + + i ) { <nl> - columns [ prime_offset + i ] . emplace_back ( constraint_index , - 1 . 0 ) ; <nl> - columns [ prime_offset + i + 1 ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> - columns [ pprime_offset + i ] . emplace_back ( constraint_index , - 0 . 5 * delta_s ) ; <nl> - columns [ pprime_offset + i + 1 ] . emplace_back ( constraint_index , <nl> - - 0 . 5 * delta_s ) ; <nl> - <nl> - lower_bounds [ constraint_index ] = 0 . 0 ; <nl> - upper_bounds [ constraint_index ] = 0 . 0 ; <nl> - + + constraint_index ; <nl> - } <nl> - <nl> - / / d_i + 1 - d_i - d_i ' * ds - 1 / 3 * d_i ' ' * ds ^ 2 - 1 / 6 * d_i + 1 ' ' * ds ^ 2 <nl> - for ( int i = 0 ; i + 1 < num_var ; + + i ) { <nl> - columns [ i ] . emplace_back ( constraint_index , - 1 . 0 ) ; <nl> - columns [ i + 1 ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> - columns [ prime_offset + i ] . emplace_back ( constraint_index , - delta_s ) ; <nl> - columns [ pprime_offset + i ] . emplace_back ( constraint_index , <nl> - - delta_s * delta_s / 3 . 0 ) ; <nl> - columns [ pprime_offset + i + 1 ] . emplace_back ( constraint_index , <nl> - - delta_s * delta_s / 6 . 0 ) ; <nl> - <nl> - lower_bounds [ constraint_index ] = 0 . 0 ; <nl> - upper_bounds [ constraint_index ] = 0 . 0 ; <nl> - + + constraint_index ; <nl> - } <nl> - <nl> - columns [ 0 ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> - lower_bounds [ constraint_index ] = d_state [ 0 ] ; <nl> - upper_bounds [ constraint_index ] = d_state [ 0 ] ; <nl> - + + constraint_index ; <nl> - <nl> - columns [ prime_offset ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> - lower_bounds [ constraint_index ] = d_state [ 1 ] ; <nl> - upper_bounds [ constraint_index ] = d_state [ 1 ] ; <nl> - + + constraint_index ; <nl> - <nl> - columns [ pprime_offset ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> - lower_bounds [ constraint_index ] = d_state [ 2 ] ; <nl> - upper_bounds [ constraint_index ] = d_state [ 2 ] ; <nl> - + + constraint_index ; <nl> - <nl> - const double LARGE_VALUE = 2 . 0 ; <nl> - for ( int i = 0 ; i < kNumParam ; + + i ) { <nl> - columns [ i ] . emplace_back ( constraint_index , 1 . 0 ) ; <nl> - if ( i < num_var ) { <nl> - lower_bounds [ constraint_index ] = d_bounds [ i ] . first ; <nl> - upper_bounds [ constraint_index ] = d_bounds [ i ] . second ; <nl> - } else { <nl> - lower_bounds [ constraint_index ] = - LARGE_VALUE ; <nl> - upper_bounds [ constraint_index ] = LARGE_VALUE ; <nl> - } <nl> - + + constraint_index ; <nl> - } <nl> - <nl> - CHECK_EQ ( constraint_index , kNumConstraint ) ; <nl> - <nl> - / / change affine_constraint to CSC format <nl> - std : : vector < c_float > A_data ; <nl> - std : : vector < c_int > A_indices ; <nl> - std : : vector < c_int > A_indptr ; <nl> - int ind_p = 0 ; <nl> - for ( int j = 0 ; j < kNumParam ; + + j ) { <nl> - A_indptr . push_back ( ind_p ) ; <nl> - for ( const auto & row_data_pair : columns [ j ] ) { <nl> - A_data . push_back ( row_data_pair . second ) ; <nl> - A_indices . push_back ( row_data_pair . first ) ; <nl> - + + ind_p ; <nl> - } <nl> - } <nl> - A_indptr . push_back ( ind_p ) ; <nl> - <nl> - / / offset <nl> - double q [ kNumParam ] ; <nl> - for ( int i = 0 ; i < kNumParam ; + + i ) { <nl> - if ( i < num_var ) { <nl> - q [ i ] = - 2 . 0 * FLAGS_weight_lateral_obstacle_distance * <nl> - ( d_bounds [ i ] . first + d_bounds [ i ] . second ) ; <nl> - } else { <nl> - q [ i ] = 0 . 0 ; <nl> - } <nl> - } <nl> - <nl> - / / Problem settings <nl> - OSQPSettings * settings = <nl> - reinterpret_cast < OSQPSettings * > ( c_malloc ( sizeof ( OSQPSettings ) ) ) ; <nl> - <nl> - / / Define Solver settings as default <nl> - osqp_set_default_settings ( settings ) ; <nl> - settings - > alpha = 1 . 0 ; / / Change alpha parameter <nl> - settings - > eps_abs = 1 . 0e - 05 ; <nl> - settings - > eps_rel = 1 . 0e - 05 ; <nl> - settings - > max_iter = 5000 ; <nl> - settings - > polish = true ; <nl> - settings - > verbose = FLAGS_enable_osqp_debug ; <nl> - <nl> - / / Populate data <nl> - OSQPData * data = reinterpret_cast < OSQPData * > ( c_malloc ( sizeof ( OSQPData ) ) ) ; <nl> - data - > n = kNumParam ; <nl> - data - > m = kNumConstraint ; <nl> - data - > P = csc_matrix ( data - > n , data - > n , P_data . size ( ) , P_data . data ( ) , <nl> - P_indices . data ( ) , P_indptr . data ( ) ) ; <nl> - data - > q = q ; <nl> - data - > A = csc_matrix ( data - > m , data - > n , A_data . size ( ) , A_data . data ( ) , <nl> - A_indices . data ( ) , A_indptr . data ( ) ) ; <nl> - data - > l = lower_bounds ; <nl> - data - > u = upper_bounds ; <nl> - <nl> - / / Workspace <nl> - OSQPWorkspace * work = osqp_setup ( data , settings ) ; <nl> - <nl> - / / Solve Problem <nl> - osqp_solve ( work ) ; <nl> - <nl> - / / extract primal results <nl> - for ( int i = 0 ; i < num_var ; + + i ) { <nl> - opt_d_ . push_back ( work - > solution - > x [ i ] ) ; <nl> - opt_d_prime_ . push_back ( work - > solution - > x [ i + num_var ] ) ; <nl> - opt_d_pprime_ . push_back ( work - > solution - > x [ i + 2 * num_var ] ) ; <nl> - } <nl> - opt_d_prime_ [ num_var - 1 ] = 0 . 0 ; <nl> - opt_d_pprime_ [ num_var - 1 ] = 0 . 0 ; <nl> - <nl> - / / Cleanup <nl> - osqp_cleanup ( work ) ; <nl> - c_free ( data - > A ) ; <nl> - c_free ( data - > P ) ; <nl> - c_free ( data ) ; <nl> - c_free ( settings ) ; <nl> - <nl> - return true ; <nl> - } <nl> - <nl> - void OsqpLateralQPOptimizer : : CalculateKernel ( <nl> - const std : : vector < std : : pair < double , double > > & d_bounds , <nl> - std : : vector < c_float > * P_data , std : : vector < c_int > * P_indices , <nl> - std : : vector < c_int > * P_indptr ) { <nl> - const int kNumParam = 3 * d_bounds . size ( ) ; <nl> - P_data - > resize ( kNumParam ) ; <nl> - P_indices - > resize ( kNumParam ) ; <nl> - P_indptr - > resize ( kNumParam + 1 ) ; <nl> - <nl> - for ( int i = 0 ; i < kNumParam ; + + i ) { <nl> - if ( i < static_cast < int > ( d_bounds . size ( ) ) ) { <nl> - P_data - > at ( i ) = 2 . 0 * FLAGS_weight_lateral_offset + <nl> - 2 . 0 * FLAGS_weight_lateral_obstacle_distance ; <nl> - } else if ( i < 2 * static_cast < int > ( d_bounds . size ( ) ) ) { <nl> - P_data - > at ( i ) = 2 . 0 * FLAGS_weight_lateral_derivative ; <nl> - } else { <nl> - P_data - > at ( i ) = 2 . 0 * FLAGS_weight_lateral_second_order_derivative ; <nl> - } <nl> - P_indices - > at ( i ) = i ; <nl> - P_indptr - > at ( i ) = i ; <nl> - } <nl> - P_indptr - > at ( kNumParam ) = kNumParam ; <nl> - CHECK_EQ ( P_data - > size ( ) , P_indices - > size ( ) ) ; <nl> - } <nl> - <nl> - } / / namespace planning <nl> - } / / namespace apollo <nl> deleted file mode 100644 <nl> index aff6b23468e . . 00000000000 <nl> mmm a / modules / planning / math / finite_element_qp / osqp_lateral_qp_optimizer . h <nl> ppp / dev / null <nl> <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> - * Copyright 2018 The Apollo Authors . All Rights Reserved . <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - <nl> - / * * <nl> - * @ file <nl> - * * / <nl> - <nl> - # pragma once <nl> - <nl> - # include < array > <nl> - # include < utility > <nl> - # include < vector > <nl> - <nl> - # include " Eigen / Core " <nl> - # include " osqp / include / osqp . h " <nl> - <nl> - # include " modules / planning / lattice / trajectory1d / piecewise_jerk_trajectory1d . h " <nl> - # include " modules / planning / math / finite_element_qp / lateral_qp_optimizer . h " <nl> - <nl> - namespace apollo { <nl> - namespace planning { <nl> - <nl> - class OsqpLateralQPOptimizer : public LateralQPOptimizer { <nl> - public : <nl> - OsqpLateralQPOptimizer ( ) = default ; <nl> - <nl> - virtual ~ OsqpLateralQPOptimizer ( ) = default ; <nl> - <nl> - bool optimize ( <nl> - const std : : array < double , 3 > & d_state , const double delta_s , <nl> - const std : : vector < std : : pair < double , double > > & d_bounds ) override ; <nl> - <nl> - private : <nl> - void CalculateKernel ( const std : : vector < std : : pair < double , double > > & d_bounds , <nl> - std : : vector < c_float > * P_data , <nl> - std : : vector < c_int > * P_indices , <nl> - std : : vector < c_int > * P_indptr ) ; <nl> - double delta_s_ = 0 . 0 ; <nl> - } ; <nl> - <nl> - } / / namespace planning <nl> - } / / namespace apollo <nl> mmm a / modules / planning / toolkits / optimizers / qp_piecewise_jerk_path / qp_piecewise_jerk_path_optimizer . cc <nl> ppp b / modules / planning / toolkits / optimizers / qp_piecewise_jerk_path / qp_piecewise_jerk_path_optimizer . cc <nl> <nl> # include " modules / common / time / time . h " <nl> # include " modules / planning / math / finite_element_qp / osqp_lateral_jerk_qp_optimizer . h " <nl> # include " modules / planning / math / finite_element_qp / osqp_lateral_linear_qp_optimizer . h " <nl> - # include " modules / planning / math / finite_element_qp / osqp_lateral_qp_optimizer . h " <nl> <nl> namespace apollo { <nl> namespace planning { <nl> | Planning : move osqp_lateral_qp_optimizer to lattice to make fem math lib clean | ApolloAuto/apollo | 2a3f8d9276e10e4885752183c0c338cc9cfae29a | 2018-12-13T23:17:27Z |
mmm a / src / common / object_pool . h <nl> ppp b / src / common / object_pool . h <nl> <nl> # ifndef MXNET_COMMON_OBJECT_POOL_H_ <nl> # define MXNET_COMMON_OBJECT_POOL_H_ <nl> # include < dmlc / logging . h > <nl> - # include < malloc . h > <nl> # include < cstdlib > <nl> # include < mutex > <nl> # include < utility > <nl> + # include < vector > <nl> <nl> namespace common { <nl> <nl> class ObjectPool { <nl> * \ brief Head of free list . <nl> * / <nl> LinkedList * head_ { nullptr } ; <nl> + / * ! <nl> + * \ brief Pages allocated . <nl> + * / <nl> + std : : vector < void * > allocated_ ; <nl> + / * ! <nl> + * \ brief Private constructor . <nl> + * / <nl> ObjectPool ( ) ; <nl> / * ! <nl> * \ brief Allocate a page of raw objects . <nl> struct ObjectPoolAllocatable { <nl> * \ brief Create new object . <nl> * \ return Pointer to the new object . <nl> * / <nl> - static T * New ( ) ; <nl> + template < typename . . . Args > <nl> + static T * New ( Args & & . . . args ) ; <nl> / * ! <nl> * \ brief Delete an existing object . <nl> * \ param ptr The pointer to delete . <nl> struct ObjectPoolAllocatable { <nl> static void Delete ( T * ptr ) ; <nl> } ; / / struct ObjectPoolAllocatable <nl> <nl> - / / TODO ( hotpxl ) free all memory allocated <nl> template < typename T > <nl> - ObjectPool < T > : : ~ ObjectPool ( ) = default ; <nl> + ObjectPool < T > : : ~ ObjectPool ( ) { <nl> + for ( auto i : allocated_ ) { <nl> + free ( i ) ; <nl> + } <nl> + } <nl> <nl> template < typename T > <nl> template < typename . . . Args > <nl> void ObjectPool < T > : : AllocateChunk ( ) { <nl> void * new_chunk_ptr ; <nl> int ret = posix_memalign ( & new_chunk_ptr , kPageSize , kPageSize ) ; <nl> CHECK_EQ ( ret , 0 ) < < " Allocation failed " ; <nl> - auto & & new_chunk = static_cast < LinkedList * > ( new_chunk_ptr ) ; <nl> + allocated_ . emplace_back ( new_chunk_ptr ) ; <nl> + auto new_chunk = static_cast < LinkedList * > ( new_chunk_ptr ) ; <nl> auto size = kPageSize / sizeof ( LinkedList ) ; <nl> for ( std : : size_t i = 0 ; i < size - 1 ; + + i ) { <nl> new_chunk [ i ] . next = & new_chunk [ i + 1 ] ; <nl> void ObjectPool < T > : : AllocateChunk ( ) { <nl> } <nl> <nl> template < typename T > <nl> - T * ObjectPoolAllocatable < T > : : New ( ) { <nl> - return ObjectPool < T > : : Get ( ) - > New ( ) ; <nl> + template < typename . . . Args > <nl> + T * ObjectPoolAllocatable < T > : : New ( Args & & . . . args ) { <nl> + return ObjectPool < T > : : Get ( ) - > New ( std : : forward < Args > ( args ) . . . ) ; <nl> } <nl> <nl> template < typename T > <nl> void ObjectPoolAllocatable < T > : : Delete ( T * ptr ) { <nl> - return ObjectPool < T > : : Get ( ) - > Delete ( ptr ) ; <nl> + ObjectPool < T > : : Get ( ) - > Delete ( ptr ) ; <nl> } <nl> <nl> } / / namespace common <nl> mmm a / src / dag_engine / threaded_engine . cc <nl> ppp b / src / dag_engine / threaded_engine . cc <nl> <nl> # include < dmlc / logging . h > <nl> # include < cassert > <nl> # include < algorithm > <nl> - # include < utility > <nl> # include < condition_variable > <nl> # include < mutex > <nl> + # include < utility > <nl> # include " . . / common / cuda_utils . h " <nl> <nl> namespace mxnet { <nl> std : : atomic < std : : size_t > ThreadedVar : : counter { 0 } ; <nl> std : : atomic < std : : size_t > ThreadedOpr : : counter { 0 } ; <nl> # endif / / DAG_ENGINE_DEBUG <nl> <nl> + ThreadedVar : : ThreadedVar ( VersionedVarBlock * head ) : head_ { head } { <nl> + # if DAG_ENGINE_DEBUG <nl> + LOG ( INFO ) < < __func__ < < " " < < + + counter ; <nl> + # endif / / DAG_ENGINE_DEBUG <nl> + } <nl> + void ThreadedVar : : AppendReadDependency ( OprBlock * opr_block ) { <nl> + std : : lock_guard < std : : mutex > lock { m_ } ; <nl> + if ( ready_to_read_ ) { <nl> + assert ( pending_write_ = = nullptr ) ; <nl> + + + num_pending_reads_ ; <nl> + - - opr_block - > wait ; <nl> + } else { <nl> + auto & & new_var_block = VersionedVarBlock : : New ( ) ; <nl> + assert ( head_ - > next = = nullptr ) ; <nl> + assert ( head_ - > trigger = = nullptr ) ; <nl> + assert ( head_ - > write = = false ) ; <nl> + head_ - > next = new_var_block ; <nl> + head_ - > trigger = opr_block ; <nl> + head_ = new_var_block ; <nl> + } <nl> + } <nl> + <nl> + void ThreadedVar : : AppendWriteDependency ( OprBlock * opr_block ) { <nl> + std : : lock_guard < std : : mutex > lock { m_ } ; <nl> + auto & & new_var_block = VersionedVarBlock : : New ( ) ; <nl> + head_ - > next = new_var_block ; <nl> + head_ - > trigger = opr_block ; <nl> + head_ - > write = true ; <nl> + if ( ready_to_read_ ) { <nl> + / * ! <nl> + * Raise ` num_pending_reads_ ` temporarily to avoid premature triggering . <nl> + * / <nl> + + + num_pending_reads_ ; <nl> + pending_write_ = head_ ; <nl> + if ( - - num_pending_reads_ = = 0 ) { <nl> + - - opr_block - > wait ; <nl> + } <nl> + ready_to_read_ = false ; <nl> + } <nl> + head_ = new_var_block ; <nl> + } <nl> + <nl> + void ThreadedVar : : CompleteReadDependency ( <nl> + dmlc : : ConcurrentBlockingQueue < OprBlock * > & task_queue ) { <nl> + std : : lock_guard < std : : mutex > lock { m_ } ; <nl> + if ( - - num_pending_reads_ = = 0 ) { <nl> + if ( pending_write_ ! = nullptr & & - - pending_write_ - > trigger - > wait = = 0 ) { <nl> + task_queue . Push ( pending_write_ - > trigger ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + bool ThreadedVar : : CompleteWriteDependency ( <nl> + dmlc : : ConcurrentBlockingQueue < OprBlock * > & task_queue ) { <nl> + std : : lock_guard < std : : mutex > lock { m_ } ; <nl> + assert ( ready_to_read_ = = false ) ; <nl> + auto cur_head = pending_write_ - > next ; <nl> + VersionedVarBlock : : Delete ( pending_write_ ) ; <nl> + pending_write_ = nullptr ; <nl> + if ( to_delete_ ) { <nl> + assert ( cur_head - > next = = nullptr ) ; <nl> + VersionedVarBlock : : Delete ( cur_head ) ; <nl> + return true ; <nl> + } else { <nl> + while ( true ) { <nl> + if ( cur_head - > write = = true ) { <nl> + + + num_pending_reads_ ; <nl> + pending_write_ = cur_head ; <nl> + if ( - - num_pending_reads_ = = 0 ) { <nl> + if ( - - cur_head - > trigger - > wait = = 0 ) { <nl> + task_queue . Push ( cur_head - > trigger ) ; <nl> + } <nl> + } <nl> + break ; <nl> + } else if ( cur_head - > next = = nullptr ) { <nl> + ready_to_read_ = true ; <nl> + break ; <nl> + } else { <nl> + + + num_pending_reads_ ; <nl> + if ( - - cur_head - > trigger - > wait = = 0 ) { <nl> + task_queue . Push ( cur_head - > trigger ) ; <nl> + } <nl> + auto prev = cur_head ; <nl> + cur_head = cur_head - > next ; <nl> + VersionedVarBlock : : Delete ( prev ) ; <nl> + } <nl> + } <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> + void ThreadedVar : : SetToDelete ( ) { <nl> + std : : lock_guard < std : : mutex > lock { m_ } ; <nl> + to_delete_ = true ; <nl> + } <nl> + <nl> ThreadedVar * ThreadedVar : : CastFromBase ( Var * v ) { <nl> return v - > Cast < ThreadedVar > ( ) ; <nl> } <nl> ThreadedEngine : : ~ ThreadedEngine ( ) noexcept ( false ) { <nl> } <nl> <nl> ThreadedVar * ThreadedEngine : : NewVar ( ) { <nl> - auto ret = ThreadedVar : : New ( ) ; <nl> - ret - > head = VersionedVarBlock : : New ( ) ; <nl> + auto ret = ThreadedVar : : New ( VersionedVarBlock : : New ( ) ) ; <nl> return ret ; <nl> } <nl> <nl> void ThreadedEngine : : Push ( OprHandle op , Context exec_ctx ) { <nl> + + pending_ ; <nl> / / Add read dependencies . <nl> for ( auto & & i : threaded_opr - > use_vars ) { <nl> - std : : lock_guard < std : : mutex > lock { i - > m } ; <nl> - if ( i - > ready_to_read ) { <nl> - assert ( i - > pending_write = = nullptr ) ; <nl> - + + i - > num_pending_reads ; <nl> - - - opr_block - > wait ; <nl> - } else { <nl> - auto & & new_var_block = VersionedVarBlock : : New ( ) ; <nl> - assert ( i - > head - > next = = nullptr ) ; <nl> - assert ( i - > head - > trigger = = nullptr ) ; <nl> - assert ( i - > head - > write = = false ) ; <nl> - i - > head - > next = new_var_block ; <nl> - i - > head - > trigger = opr_block ; <nl> - i - > head = new_var_block ; <nl> - } <nl> + i - > AppendReadDependency ( opr_block ) ; <nl> } <nl> / / Add write dependencies . <nl> for ( auto & & i : threaded_opr - > mutate_vars ) { <nl> - std : : lock_guard < std : : mutex > lock { i - > m } ; <nl> - auto & & new_var_block = VersionedVarBlock : : New ( ) ; <nl> - i - > head - > next = new_var_block ; <nl> - i - > head - > trigger = opr_block ; <nl> - i - > head - > write = true ; <nl> - if ( i - > ready_to_read ) { <nl> - / * ! <nl> - * Raise ` num_pending_reads ` temporarily to avoid premature triggering . <nl> - * / <nl> - + + i - > num_pending_reads ; <nl> - i - > pending_write = i - > head ; <nl> - if ( - - i - > num_pending_reads = = 0 ) { <nl> - - - opr_block - > wait ; <nl> - } <nl> - i - > ready_to_read = false ; <nl> - } <nl> - i - > head = new_var_block ; <nl> + i - > AppendWriteDependency ( opr_block ) ; <nl> } <nl> if ( - - opr_block - > wait = = 0 ) { <nl> task_queue_ . Push ( opr_block ) ; <nl> void ThreadedEngine : : PushDelete ( Fn delete_fn , Context exec_ctx , Variable var ) { <nl> auto & & func = [ delete_fn , threaded_var ] ( RunContext ctx ) { <nl> / * ! <nl> * Mark variable as orphan , so during ` ThreadedEngine : : OnComplete ` it could <nl> - * be <nl> - * recycled . <nl> + * be recycled . <nl> * / <nl> - threaded_var - > to_delete = true ; <nl> + threaded_var - > SetToDelete ( ) ; <nl> delete_fn ( ctx ) ; <nl> } ; <nl> Push ( func , exec_ctx , { } , { var } ) ; <nl> void ThreadedEngine : : OnComplete ( ThreadedOpr * threaded_opr ) { <nl> * Mark complete for read variables . <nl> * / <nl> for ( auto & & i : threaded_opr - > use_vars ) { <nl> - std : : lock_guard < std : : mutex > lock { i - > m } ; <nl> - if ( - - i - > num_pending_reads = = 0 ) { <nl> - if ( i - > pending_write ! = nullptr & & <nl> - - - i - > pending_write - > trigger - > wait = = 0 ) { <nl> - task_queue_ . Push ( i - > pending_write - > trigger ) ; <nl> - } <nl> - } <nl> + i - > CompleteReadDependency ( task_queue_ ) ; <nl> } <nl> / * ! <nl> * Mark complete for write variables . <nl> * / <nl> for ( auto & & i : threaded_opr - > mutate_vars ) { <nl> - bool to_delete = false ; <nl> - { <nl> - std : : lock_guard < std : : mutex > lock { i - > m } ; <nl> - assert ( i - > ready_to_read = = false ) ; <nl> - auto head = i - > pending_write - > next ; <nl> - VersionedVarBlock : : Delete ( i - > pending_write ) ; <nl> - i - > pending_write = nullptr ; <nl> - if ( i - > to_delete ) { <nl> - assert ( head - > next = = nullptr ) ; <nl> - VersionedVarBlock : : Delete ( head ) ; <nl> - to_delete = true ; <nl> - } else { <nl> - while ( true ) { <nl> - if ( head - > write = = true ) { <nl> - + + i - > num_pending_reads ; <nl> - i - > pending_write = head ; <nl> - if ( - - i - > num_pending_reads = = 0 ) { <nl> - if ( - - head - > trigger - > wait = = 0 ) { <nl> - task_queue_ . Push ( head - > trigger ) ; <nl> - } <nl> - } <nl> - break ; <nl> - } else if ( head - > next = = nullptr ) { <nl> - i - > ready_to_read = true ; <nl> - break ; <nl> - } else { <nl> - + + i - > num_pending_reads ; <nl> - if ( - - head - > trigger - > wait = = 0 ) { <nl> - task_queue_ . Push ( head - > trigger ) ; <nl> - } <nl> - auto prev = head ; <nl> - head = head - > next ; <nl> - VersionedVarBlock : : Delete ( prev ) ; <nl> - } <nl> - } <nl> - } <nl> - } <nl> + bool to_delete = i - > CompleteWriteDependency ( task_queue_ ) ; <nl> if ( to_delete ) { <nl> ThreadedVar : : Delete ( i ) ; <nl> } <nl> mmm a / src / dag_engine / threaded_engine . h <nl> ppp b / src / dag_engine / threaded_engine . h <nl> struct VersionedVarBlock <nl> / * ! <nl> * \ brief Variable implementation . <nl> * / <nl> - struct ThreadedVar final : public Var , <nl> + class ThreadedVar final : public Var , <nl> public common : : ObjectPoolAllocatable < ThreadedVar > { <nl> + public : <nl> # if DAG_ENGINE_DEBUG <nl> static std : : atomic < std : : size_t > counter ; <nl> - ThreadedVar ( ) { LOG ( INFO ) < < __func__ < < " " < < + + counter ; } <nl> ~ ThreadedVar ( ) { LOG ( INFO ) < < __func__ < < " " < < - - counter ; } <nl> # endif / / DAG_ENGINE_DEBUG <nl> - std : : mutex m ; <nl> - std : : size_t num_pending_reads { 0 } ; <nl> - VersionedVarBlock * head { nullptr } ; <nl> - VersionedVarBlock * pending_write { nullptr } ; <nl> + ThreadedVar ( VersionedVarBlock * head ) ; <nl> + void AppendReadDependency ( OprBlock * opr_block ) ; <nl> + void AppendWriteDependency ( OprBlock * opr_block ) ; <nl> + void CompleteReadDependency ( dmlc : : ConcurrentBlockingQueue < OprBlock * > & ) ; <nl> + bool CompleteWriteDependency ( dmlc : : ConcurrentBlockingQueue < OprBlock * > & ) ; <nl> + void SetToDelete ( ) ; <nl> + <nl> + static ThreadedVar * CastFromBase ( Var * ptr ) ; <nl> + <nl> + private : <nl> + / / TODO ( hotpxl ) change this to spinlock for faster runtime <nl> + std : : mutex m_ ; <nl> + std : : size_t num_pending_reads_ { 0 } ; <nl> + VersionedVarBlock * head_ { nullptr } ; <nl> + VersionedVarBlock * pending_write_ { nullptr } ; <nl> / * ! <nl> * If true , then there are no current or future processing of the chain . <nl> * / <nl> - bool ready_to_read { true } ; <nl> + bool ready_to_read_ { true } ; <nl> / * ! <nl> * If true , delete after operation completes . <nl> * / <nl> - bool to_delete { false } ; <nl> - <nl> - static ThreadedVar * CastFromBase ( Var * ptr ) ; <nl> + bool to_delete_ { false } ; <nl> } ; / / struct ThreadedVar <nl> <nl> / * ! <nl> | [ engine - refactor ] refactor using var queue abstraction | apache/incubator-mxnet | 94c70ed2143e6959d4e92a1ea617fd8aaeec0829 | 2015-09-05T13:26:41Z |
mmm a / src / heap / mark - compact . cc <nl> ppp b / src / heap / mark - compact . cc <nl> class MarkCompactCollector : : EvacuateNewSpacePageVisitor final <nl> public : <nl> EvacuateNewSpacePageVisitor ( ) : promoted_size_ ( 0 ) { } <nl> <nl> - static void MoveToOldSpace ( Page * page , PagedSpace * owner ) { <nl> - page - > heap ( ) - > new_space ( ) - > ReplaceWithEmptyPage ( page ) ; <nl> - Page * new_page = Page : : ConvertNewToOld ( page , owner ) ; <nl> - new_page - > SetFlag ( Page : : PAGE_NEW_OLD_PROMOTION ) ; <nl> + static void TryMoveToOldSpace ( Page * page , PagedSpace * owner ) { <nl> + if ( page - > heap ( ) - > new_space ( ) - > ReplaceWithEmptyPage ( page ) ) { <nl> + Page * new_page = Page : : ConvertNewToOld ( page , owner ) ; <nl> + new_page - > SetFlag ( Page : : PAGE_NEW_OLD_PROMOTION ) ; <nl> + } <nl> } <nl> <nl> inline bool Visit ( HeapObject * object ) { <nl> void MarkCompactCollector : : EvacuatePagesInParallel ( ) { <nl> ( page - > LiveBytes ( ) > Evacuator : : PageEvacuationThreshold ( ) ) & & <nl> page - > IsFlagSet ( MemoryChunk : : NEW_SPACE_BELOW_AGE_MARK ) & & <nl> ! page - > Contains ( age_mark ) ) { <nl> - EvacuateNewSpacePageVisitor : : MoveToOldSpace ( page , heap ( ) - > old_space ( ) ) ; <nl> + EvacuateNewSpacePageVisitor : : TryMoveToOldSpace ( page , heap ( ) - > old_space ( ) ) ; <nl> } <nl> job . AddPage ( page , & abandoned_pages ) ; <nl> } <nl> mmm a / src / heap / spaces . cc <nl> ppp b / src / heap / spaces . cc <nl> void SemiSpace : : Reset ( ) { <nl> current_page_ = anchor_ . next_page ( ) ; <nl> } <nl> <nl> - void SemiSpace : : ReplaceWithEmptyPage ( Page * old_page ) { <nl> + bool SemiSpace : : ReplaceWithEmptyPage ( Page * old_page ) { <nl> + / / TODO ( mlippautz ) : We do not have to get a new page here when the semispace <nl> + / / is uncommitted later on . <nl> Page * new_page = heap ( ) - > memory_allocator ( ) - > AllocatePage ( <nl> Page : : kAllocatableMemory , this , executable ( ) ) ; <nl> + if ( new_page = = nullptr ) return false ; <nl> Bitmap : : Clear ( new_page ) ; <nl> new_page - > SetFlags ( old_page - > GetFlags ( ) , Page : : kCopyAllFlags ) ; <nl> new_page - > set_next_page ( old_page - > next_page ( ) ) ; <nl> void SemiSpace : : ReplaceWithEmptyPage ( Page * old_page ) { <nl> old_page - > prev_page ( ) - > set_next_page ( new_page ) ; <nl> heap ( ) - > CreateFillerObjectAt ( new_page - > area_start ( ) , new_page - > area_size ( ) , <nl> ClearRecordedSlots : : kNo ) ; <nl> + return true ; <nl> } <nl> <nl> void SemiSpace : : Swap ( SemiSpace * from , SemiSpace * to ) { <nl> mmm a / src / heap / spaces . h <nl> ppp b / src / heap / spaces . h <nl> class SemiSpace : public Space { <nl> / / Resets the space to using the first page . <nl> void Reset ( ) ; <nl> <nl> - void ReplaceWithEmptyPage ( Page * page ) ; <nl> + bool ReplaceWithEmptyPage ( Page * page ) ; <nl> <nl> / / Age mark accessors . <nl> Address age_mark ( ) { return age_mark_ ; } <nl> class NewSpace : public Space { <nl> <nl> inline size_t AllocatedSinceLastGC ( ) ; <nl> <nl> - void ReplaceWithEmptyPage ( Page * page ) { <nl> + bool ReplaceWithEmptyPage ( Page * page ) { <nl> / / This method is called after flipping the semispace . <nl> DCHECK ( page - > InFromSpace ( ) ) ; <nl> - from_space_ . ReplaceWithEmptyPage ( page ) ; <nl> + return from_space_ . ReplaceWithEmptyPage ( page ) ; <nl> } <nl> <nl> / / Return the maximum capacity of a semispace . <nl> | [ heap ] Bail out to regular evacuation if new - > old fails to allocate a page | v8/v8 | 4c880dec6291041d53302d595db9bc948ebf6f52 | 2016-04-28T08:01:55Z |
mmm a / src / serializer / log / data_block_manager . cc <nl> ppp b / src / serializer / log / data_block_manager . cc <nl> <nl> / / Copyright 2010 - 2013 RethinkDB , all rights reserved . <nl> # define __STDC_FORMAT_MACROS <nl> + # define __STDC_LIMIT_MACROS <nl> # include " serializer / log / data_block_manager . hpp " <nl> <nl> + # include < inttypes . h > <nl> # include < sys / uio . h > <nl> <nl> # include " utils . hpp " <nl> data_block_manager_t : : gimme_some_new_offsets ( const std : : vector < buf_write_info_t > <nl> <nl> std : : vector < counted_t < ls_block_token_pointee_t > > tokens ; <nl> for ( auto it = writes . begin ( ) ; it ! = writes . end ( ) ; + + it ) { <nl> - uint32_t relative_offset ; <nl> - unsigned int block_index ; <nl> + uint32_t relative_offset = valgrind_undefined < uint32_t > ( UINT32_MAX ) ; <nl> + unsigned int block_index = valgrind_undefined < unsigned int > ( UINT_MAX ) ; <nl> if ( ! active_extent - > new_offset ( it - > block_size , <nl> & relative_offset , & block_index ) ) { <nl> / / Move the active_extent gc_entry_t to the young extent queue , and make a <nl> | Fixed release mode complaint that values might be uninitialized . | rethinkdb/rethinkdb | 5ba41fea7a7d7ef62c5334e0c52d000d0d2b1d58 | 2013-07-25T21:43:14Z |
mmm a / tensorflow / core / kernels / data / repeat_dataset_op_test . cc <nl> ppp b / tensorflow / core / kernels / data / repeat_dataset_op_test . cc <nl> class RepeatDatasetOpTest : public DatasetOpsTestBase { <nl> NodeDef node_def_ ; <nl> } ; <nl> <nl> - struct TestParam { <nl> + struct TestCase { <nl> std : : vector < Tensor > input_tensors ; <nl> int64 count ; <nl> std : : vector < Tensor > expected_outputs ; <nl> struct TestParam { <nl> std : : vector < int > breakpoints ; <nl> } ; <nl> <nl> - / / Test case 1 : finite repetition . <nl> - TestParam TestCase1 ( ) { <nl> + TestCase FiniteRepeatTestCase ( ) { <nl> return { <nl> / * input_tensors * / <nl> { DatasetOpsTestBase : : CreateTensor < int64 > ( TensorShape { 2 , 2 } , { 1 , 2 , 3 , 4 } ) , <nl> TestParam TestCase1 ( ) { <nl> / * breakpoints * / { 0 , 1 , 3 } } ; <nl> } <nl> <nl> - / / Test case 2 : empty repetition . <nl> - TestParam TestCase2 ( ) { <nl> + TestCase EmptyRepeatTestCase ( ) { <nl> return { <nl> / * input_tensors * / <nl> { DatasetOpsTestBase : : CreateTensor < int64 > ( TensorShape { 2 , 2 } , { 1 , 2 , 3 , 4 } ) , <nl> TestParam TestCase2 ( ) { <nl> / * breakpoints * / { 0 , 1 , 3 } } ; <nl> } <nl> <nl> - / / Test case 3 : infinite repetition . <nl> - TestParam TestCase3 ( ) { <nl> + TestCase ForeverRepeatTestCase ( ) { <nl> return { / * input_tensors * / <nl> { DatasetOpsTestBase : : CreateTensor < int64 > ( TensorShape { 2 , 1 } , { 1 , 2 } ) } , <nl> / * count * / - 1 , <nl> class RepeatDatasetOpTestHelper : public RepeatDatasetOpTest { <nl> if ( dataset_ ) dataset_ - > Unref ( ) ; <nl> } <nl> <nl> + using DatasetOpsTestBase : : CreateDataset ; <nl> + <nl> protected : <nl> / / Creates ` TensorSliceDataset ` variant tensor from the input vector of <nl> / / tensors . <nl> class RepeatDatasetOpTestHelper : public RepeatDatasetOpTest { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - Status CreateDatasetFromTestCase ( const TestParam & test_case ) { <nl> + Status CreateDataset ( const TestCase & test_case ) { <nl> Tensor tensor_slice_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> std : : vector < Tensor > input_tensors = test_case . input_tensors ; <nl> TF_RETURN_IF_ERROR ( CreateTensorSliceDatasetTensor ( <nl> class RepeatDatasetOpTestHelper : public RepeatDatasetOpTest { <nl> Tensor count = CreateTensor < int64 > ( TensorShape { } , { test_case . count } ) ; <nl> inputs . emplace_back ( & tensor_slice_dataset_tensor ) ; <nl> inputs . emplace_back ( & count ) ; <nl> + std : : unique_ptr < OpKernel > dataset_kernel ; <nl> TF_RETURN_IF_ERROR ( CreateRepeatDatasetKernel ( <nl> test_case . expected_output_dtypes , test_case . expected_output_shapes , <nl> - & dataset_kernel_ ) ) ; <nl> - TF_RETURN_IF_ERROR ( CreateRepeatDatasetContext ( <nl> - dataset_kernel_ . get ( ) , & inputs , & dataset_kernel_ctx_ ) ) ; <nl> - TF_RETURN_IF_ERROR ( CreateDataset ( dataset_kernel_ . get ( ) , <nl> + & dataset_kernel ) ) ; <nl> + TF_RETURN_IF_ERROR ( CreateRepeatDatasetContext ( dataset_kernel . get ( ) , & inputs , <nl> + & dataset_kernel_ctx_ ) ) ; <nl> + TF_RETURN_IF_ERROR ( CreateDataset ( dataset_kernel . get ( ) , <nl> dataset_kernel_ctx_ . get ( ) , & dataset_ ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - Status CreateIteratorFromTestCase ( const TestParam & test_case ) { <nl> - TF_RETURN_IF_ERROR ( CreateDatasetFromTestCase ( test_case ) ) ; <nl> + Status CreateIterator ( const TestCase & test_case ) { <nl> + TF_RETURN_IF_ERROR ( CreateDataset ( test_case ) ) ; <nl> TF_RETURN_IF_ERROR ( <nl> CreateIteratorContext ( dataset_kernel_ctx_ . get ( ) , & iterator_ctx_ ) ) ; <nl> TF_RETURN_IF_ERROR ( <nl> class RepeatDatasetOpTestHelper : public RepeatDatasetOpTest { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - std : : unique_ptr < OpKernel > dataset_kernel_ ; <nl> std : : unique_ptr < OpKernelContext > dataset_kernel_ctx_ ; <nl> DatasetBase * dataset_ = nullptr ; / / owned by this class . <nl> std : : unique_ptr < IteratorContext > iterator_ctx_ ; <nl> class RepeatDatasetOpTestHelper : public RepeatDatasetOpTest { <nl> <nl> class ParameterizedDatasetTest <nl> : public RepeatDatasetOpTestHelper , <nl> - public : : testing : : WithParamInterface < TestParam > { } ; <nl> + public : : testing : : WithParamInterface < TestCase > { } ; <nl> <nl> TEST_P ( ParameterizedDatasetTest , GetNext ) { <nl> int thread_num = 2 , cpu_num = 2 ; <nl> TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - const TestParam & test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( CreateIteratorFromTestCase ( test_case ) ) ; <nl> + const TestCase & test_case = GetParam ( ) ; <nl> + TF_ASSERT_OK ( CreateIterator ( test_case ) ) ; <nl> <nl> auto expected_outputs_it = test_case . expected_outputs . begin ( ) ; <nl> bool end_of_sequence = false ; <nl> std : : vector < Tensor > out_tensors ; <nl> <nl> if ( test_case . count < 0 ) { <nl> - int fake_infinite_repetition = 100 ; <nl> - while ( fake_infinite_repetition > 0 ) { <nl> + / / We test only a finite number of steps of the infinite sequence . <nl> + for ( int i = 0 ; i < 100 ; + + i ) { <nl> TF_EXPECT_OK ( iterator_ - > GetNext ( iterator_ctx_ . get ( ) , & out_tensors , <nl> & end_of_sequence ) ) ; <nl> for ( const auto & tensor : out_tensors ) { <nl> TEST_P ( ParameterizedDatasetTest , GetNext ) { <nl> expected_outputs_it = test_case . expected_outputs . begin ( ) ; <nl> } <nl> } <nl> - fake_infinite_repetition - - ; <nl> } <nl> EXPECT_FALSE ( end_of_sequence ) ; <nl> } else { <nl> TEST_F ( RepeatDatasetOpTestHelper , DatasetName ) { <nl> int thread_num = 2 , cpu_num = 2 ; <nl> TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - TF_ASSERT_OK ( CreateDatasetFromTestCase ( TestCase1 ( ) ) ) ; <nl> + TF_ASSERT_OK ( CreateDataset ( FiniteRepeatTestCase ( ) ) ) ; <nl> <nl> EXPECT_EQ ( dataset_ - > type_string ( ) , kOpName ) ; <nl> } <nl> TEST_P ( ParameterizedDatasetTest , DatasetOutputDtypes ) { <nl> int thread_num = 2 , cpu_num = 2 ; <nl> TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - const TestParam & test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( CreateDatasetFromTestCase ( test_case ) ) ; <nl> + const TestCase & test_case = GetParam ( ) ; <nl> + TF_ASSERT_OK ( CreateDataset ( test_case ) ) ; <nl> TF_EXPECT_OK ( VerifyTypesMatch ( dataset_ - > output_dtypes ( ) , <nl> test_case . expected_output_dtypes ) ) ; <nl> } <nl> TEST_P ( ParameterizedDatasetTest , DatasetOutputShapes ) { <nl> int thread_num = 2 , cpu_num = 2 ; <nl> TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - const TestParam & test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( CreateDatasetFromTestCase ( test_case ) ) ; <nl> + const TestCase & test_case = GetParam ( ) ; <nl> + TF_ASSERT_OK ( CreateDataset ( test_case ) ) ; <nl> TF_EXPECT_OK ( VerifyShapesCompatible ( dataset_ - > output_shapes ( ) , <nl> test_case . expected_output_shapes ) ) ; <nl> } <nl> TEST_P ( ParameterizedDatasetTest , Cardinality ) { <nl> int thread_num = 2 , cpu_num = 2 ; <nl> TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - const TestParam & test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( CreateDatasetFromTestCase ( test_case ) ) ; <nl> + const TestCase & test_case = GetParam ( ) ; <nl> + TF_ASSERT_OK ( CreateDataset ( test_case ) ) ; <nl> <nl> EXPECT_EQ ( dataset_ - > Cardinality ( ) , GetParam ( ) . expected_cardinality ) ; <nl> } <nl> TEST_F ( RepeatDatasetOpTestHelper , DatasetSave ) { <nl> int thread_num = 2 , cpu_num = 2 ; <nl> TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - TF_ASSERT_OK ( CreateDatasetFromTestCase ( TestCase1 ( ) ) ) ; <nl> + TF_ASSERT_OK ( CreateDataset ( FiniteRepeatTestCase ( ) ) ) ; <nl> <nl> std : : unique_ptr < SerializationContext > serialization_ctx ; <nl> TF_ASSERT_OK ( CreateSerializationContext ( & serialization_ctx ) ) ; <nl> TEST_P ( ParameterizedDatasetTest , IteratorOutputDtypes ) { <nl> int thread_num = 2 , cpu_num = 2 ; <nl> TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - const TestParam & test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( CreateIteratorFromTestCase ( test_case ) ) ; <nl> + const TestCase & test_case = GetParam ( ) ; <nl> + TF_ASSERT_OK ( CreateIterator ( test_case ) ) ; <nl> TF_EXPECT_OK ( VerifyTypesMatch ( iterator_ - > output_dtypes ( ) , <nl> test_case . expected_output_dtypes ) ) ; <nl> } <nl> TEST_P ( ParameterizedDatasetTest , IteratorOutputShapes ) { <nl> int thread_num = 2 , cpu_num = 2 ; <nl> TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - const TestParam & test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( CreateIteratorFromTestCase ( test_case ) ) ; <nl> + const TestCase & test_case = GetParam ( ) ; <nl> + TF_ASSERT_OK ( CreateIterator ( test_case ) ) ; <nl> TF_EXPECT_OK ( VerifyShapesCompatible ( iterator_ - > output_shapes ( ) , <nl> test_case . expected_output_shapes ) ) ; <nl> } <nl> TEST_P ( ParameterizedDatasetTest , IteratorOutputPrefix ) { <nl> int thread_num = 2 , cpu_num = 2 ; <nl> TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - const TestParam & test_case = GetParam ( ) ; <nl> - TF_ASSERT_OK ( CreateIteratorFromTestCase ( test_case ) ) ; <nl> + const TestCase & test_case = GetParam ( ) ; <nl> + TF_ASSERT_OK ( CreateIterator ( test_case ) ) ; <nl> if ( test_case . count < 0 ) { <nl> EXPECT_EQ ( iterator_ - > prefix ( ) , " Iterator : : ForeverRepeat " ) ; <nl> } else if ( test_case . count = = 0 ) { <nl> TEST_P ( ParameterizedDatasetTest , Roundtrip ) { <nl> int thread_num = 2 , cpu_num = 2 ; <nl> TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - const TestParam & test_case = GetParam ( ) ; <nl> + const TestCase & test_case = GetParam ( ) ; <nl> auto expected_outputs_it = test_case . expected_outputs . begin ( ) ; <nl> - TF_ASSERT_OK ( CreateIteratorFromTestCase ( test_case ) ) ; <nl> + TF_ASSERT_OK ( CreateIterator ( test_case ) ) ; <nl> <nl> std : : unique_ptr < SerializationContext > serialization_ctx ; <nl> TF_ASSERT_OK ( CreateSerializationContext ( & serialization_ctx ) ) ; <nl> TEST_P ( ParameterizedDatasetTest , Roundtrip ) { <nl> } <nl> <nl> INSTANTIATE_TEST_SUITE_P ( RepeatDatasetOpTest , ParameterizedDatasetTest , <nl> - : : testing : : ValuesIn ( std : : vector < TestParam > ( <nl> - { TestCase1 ( ) , TestCase2 ( ) , TestCase3 ( ) } ) ) ) ; <nl> + : : testing : : ValuesIn ( std : : vector < TestCase > ( <nl> + { FiniteRepeatTestCase ( ) , EmptyRepeatTestCase ( ) , <nl> + ForeverRepeatTestCase ( ) } ) ) ) ; <nl> <nl> } / / namespace <nl> } / / namespace data <nl> | Address the comments | tensorflow/tensorflow | 24ca9eaadf33d5f7b7a1ccaa5d1eb8e2396a02a5 | 2019-03-09T06:29:55Z |
mmm a / googletest / include / gtest / internal / gtest - port . h <nl> ppp b / googletest / include / gtest / internal / gtest - port . h <nl> <nl> / / <nl> / / Regular expressions : <nl> / / RE - a simple regular expression class using the POSIX <nl> - / / Extended Regular Expression syntax on UNIX - like <nl> - / / platforms , or a reduced regular exception syntax on <nl> - / / other platforms , including Windows . <nl> + / / Extended Regular Expression syntax on UNIX - like platforms <nl> + / / or a reduced regular exception syntax on other <nl> + / / platforms , including Windows . <nl> / / <nl> / / Logging : <nl> / / GTEST_LOG_ ( ) - logs messages at the specified severity level . <nl> typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION ; <nl> # if GTEST_OS_LINUX_ANDROID & & defined ( _STLPORT_MAJOR ) <nl> / / STLport , provided with the Android NDK , has neither < tr1 / tuple > or < tuple > . <nl> # define GTEST_HAS_TR1_TUPLE 0 <nl> - # elif defined ( _MSC_VER ) & & ( _MSC_VER > = 1910 ) <nl> - / / Prevent ` warning C4996 : ' std : : tr1 ' : warning STL4002 : The non - Standard std : : tr1 namespace and TR1 - only machinery are deprecated and will be REMOVED . ` <nl> + # elif defined ( _LIBCPP_VERSION ) | | ( defined ( _MSC_VER ) & & ( _MSC_VER > = 1910 ) ) <nl> + / / libc + + doesn ' t support TR1 , and MCVS causes warning C4996 <nl> # define GTEST_HAS_TR1_TUPLE 0 <nl> # else <nl> / / The user didn ' t tell us not to do it , so we assume it ' s OK . <nl> typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION ; <nl> <nl> # endif / / GTEST_USE_OWN_TR1_TUPLE <nl> <nl> - / / To avoid conditional compilation everywhere , we make it <nl> - / / gtest - port . h ' s responsibility to # include the header implementing <nl> - / / tuple . <nl> + / / To avoid conditional compilation we make it gtest - port . h ' s responsibility <nl> + / / to # include the header implementing tuple . <nl> # if GTEST_HAS_STD_TUPLE_ <nl> # include < tuple > / / IWYU pragma : export <nl> # define GTEST_TUPLE_NAMESPACE_ : : std <nl> using : : std : : tuple_size ; <nl> / / If the compiler is not GCC 4 . 0 + , we assume the user is using a <nl> / / spec - conforming TR1 implementation . <nl> # include < tuple > / / IWYU pragma : export / / NOLINT <nl> + <nl> # endif / / GTEST_USE_OWN_TR1_TUPLE <nl> <nl> # endif / / GTEST_HAS_TR1_TUPLE <nl> class MutexBase { <nl> extern : : testing : : internal : : MutexBase mutex <nl> <nl> / / Defines and statically ( i . e . at link time ) initializes a static mutex . <nl> + / / The initialization list here does not explicitly initialize each field , <nl> + / / instead relying on default initialization for the unspecified fields . In <nl> + / / particular , the owner_ field ( a pthread_t ) is not explicitly initialized . <nl> + / / This allows initialization to work whether pthread_t is a scalar or struct . <nl> + / / The flag - Wmissing - field - initializers must not be specified for this to work . <nl> # define GTEST_DEFINE_STATIC_MUTEX_ ( mutex ) \ <nl> - : : testing : : internal : : MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER , false , pthread_t ( ) } <nl> + : : testing : : internal : : MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER , false } <nl> <nl> / / The Mutex class can only be used for mutexes created at runtime . It <nl> / / shares its API with MutexBase otherwise . <nl> | Merge pull request from gennadiycivil / master | google/googletest | 9dde7c4e8c46a65ad6215440bb396ad219e06ed4 | 2018-03-27T19:39:32Z |
mmm a / src / app / commands / filters / filter_manager_impl . cpp <nl> ppp b / src / app / commands / filters / filter_manager_impl . cpp <nl> void FilterManagerImpl : : flush ( ) <nl> m_y + m_offset_y + m_row - 1 ) ) , <nl> gfx : : Size ( <nl> editor - > zoom ( ) . apply ( m_w ) , <nl> - editor - > zoom ( ) . apply ( 1 ) ) ) ; <nl> + ( editor - > zoom ( ) . scale ( ) > = 1 ? editor - > zoom ( ) . apply ( 1 ) : <nl> + editor - > zoom ( ) . remove ( 1 ) ) ) ) ; <nl> <nl> gfx : : Region reg1 ( rect ) ; <nl> gfx : : Region reg2 ; <nl> | Fix filter preview when zoom level < 100 % | aseprite/aseprite | c12cb26875253b41bf5cca7e1f0cdb336ecce431 | 2015-10-30T20:58:47Z |
mmm a / src / mongo / db / SConscript <nl> ppp b / src / mongo / db / SConscript <nl> env . Library ( <nl> ' log_process_details . cpp ' , <nl> ] , <nl> LIBDEPS = [ <nl> + ' repl / repl_coordinator_global ' , <nl> + ' repl / repl_coordinator_interface ' , <nl> + ' repl / replica_set_messages ' , <nl> + ' server_options ' , <nl> ' $ BUILD_DIR / mongo / base ' , <nl> ' $ BUILD_DIR / mongo / util / net / network ' , <nl> - ' server_options ' , <nl> ] , <nl> ) <nl> <nl> mmm a / src / mongo / db / log_process_details . cpp <nl> ppp b / src / mongo / db / log_process_details . cpp <nl> <nl> <nl> # include " mongo / db / log_process_details . h " <nl> <nl> + # include " mongo / db / repl / repl_set_config . h " <nl> + # include " mongo / db / repl / replication_coordinator . h " <nl> + # include " mongo / db / repl / replication_coordinator_global . h " <nl> # include " mongo / db / server_options . h " <nl> # include " mongo / db / server_options_helpers . h " <nl> # include " mongo / util / log . h " <nl> void logProcessDetails ( ) { <nl> auto & & vii = VersionInfoInterface : : instance ( ) ; <nl> log ( ) < < mongodVersion ( vii ) ; <nl> vii . logBuildInfo ( ) ; <nl> + <nl> printCommandLineOpts ( ) ; <nl> } <nl> <nl> void logProcessDetailsForLogRotate ( ) { <nl> < < ( is32bit ( ) ? " 32 " : " 64 " ) < < " - bit " <nl> < < " host = " < < getHostNameCached ( ) ; <nl> <nl> + auto replCoord = repl : : getGlobalReplicationCoordinator ( ) ; <nl> + if ( replCoord ! = nullptr & & <nl> + replCoord - > getReplicationMode ( ) = = repl : : ReplicationCoordinator : : modeReplSet ) { <nl> + auto rsConfig = replCoord - > getConfig ( ) ; <nl> + <nl> + if ( rsConfig . isInitialized ( ) ) { <nl> + log ( ) < < " Replica Set Config : " < < rsConfig . toBSON ( ) ; <nl> + log ( ) < < " Replica Set Member State : " < < ( replCoord - > getMemberState ( ) ) . toString ( ) ; <nl> + } else { <nl> + log ( ) < < " Node currently has no Replica Set Config . " ; <nl> + } <nl> + } <nl> + <nl> logProcessDetails ( ) ; <nl> } <nl> <nl> | SERVER - 30715 print replSetConfig in rotateLogs | mongodb/mongo | 033086469e2bb85795576ea9748d4dda2915fb5b | 2017-11-03T17:02:35Z |
mmm a / 3rdParty / velocypack / include / velocypack / Builder . h <nl> ppp b / 3rdParty / velocypack / include / velocypack / Builder . h <nl> class Builder { <nl> } <nl> <nl> / / Return a Slice of the result : <nl> - inline Slice slice ( ) const noexcept { <nl> + inline Slice slice ( ) const { <nl> if ( isEmpty ( ) ) { <nl> return Slice ( ) ; <nl> } <nl> mmm a / 3rdParty / velocypack / include / velocypack / Slice . h <nl> ppp b / 3rdParty / velocypack / include / velocypack / Slice . h <nl> namespace velocypack { <nl> <nl> class SliceScope ; <nl> <nl> - class SliceStaticData { <nl> - friend class Slice ; <nl> + struct SliceStaticData { <nl> static uint8_t const FixedTypeLengths [ 256 ] ; <nl> static ValueType const TypeMap [ 256 ] ; <nl> static unsigned int const WidthMap [ 32 ] ; <nl> static unsigned int const FirstSubMap [ 32 ] ; <nl> + static uint64_t const PrecalculatedHashesForDefaultSeed [ 256 ] ; <nl> } ; <nl> <nl> class Slice { <nl> class Slice { <nl> uint8_t const * _start ; <nl> <nl> public : <nl> + static constexpr uint64_t defaultSeed = 0xdeadbeef ; <nl> <nl> / / constructor for an empty Value of type None <nl> constexpr Slice ( ) noexcept : Slice ( " \ x00 " ) { } <nl> class Slice { <nl> inline uint8_t head ( ) const noexcept { return * _start ; } <nl> <nl> / / hashes the binary representation of a value <nl> - inline uint64_t hash ( uint64_t seed = 0xdeadbeef ) const { <nl> - return VELOCYPACK_HASH ( start ( ) , checkOverflow ( byteSize ( ) ) , seed ) ; <nl> + inline uint64_t hash ( uint64_t seed = defaultSeed ) const { <nl> + size_t const size = checkOverflow ( byteSize ( ) ) ; <nl> + if ( seed = = defaultSeed & & size = = 1 ) { <nl> + uint64_t h = SliceStaticData : : PrecalculatedHashesForDefaultSeed [ head ( ) ] ; <nl> + VELOCYPACK_ASSERT ( h ! = 0 ) ; <nl> + return h ; <nl> + } <nl> + return VELOCYPACK_HASH ( start ( ) , size , seed ) ; <nl> } <nl> <nl> / / hashes the value , normalizing different representations of <nl> / / arrays , objects and numbers . this function may produce different <nl> / / hash values than the binary hash ( ) function <nl> - uint64_t normalizedHash ( uint64_t seed = 0xdeadbeef ) const ; <nl> + uint64_t normalizedHash ( uint64_t seed = defaultSeed ) const ; <nl> <nl> / / hashes the binary representation of a String slice . No check <nl> / / is done if the Slice value is actually of type String <nl> - inline uint64_t hashString ( uint64_t seed = 0xdeadbeef ) const noexcept { <nl> + inline uint64_t hashString ( uint64_t seed = defaultSeed ) const noexcept { <nl> return VELOCYPACK_HASH ( start ( ) , static_cast < size_t > ( stringSliceLength ( ) ) , seed ) ; <nl> } <nl> <nl> mmm a / 3rdParty / velocypack / include / velocypack / velocypack - common . h <nl> ppp b / 3rdParty / velocypack / include / velocypack / velocypack - common . h <nl> bool assemblerFunctionsDisabled ( ) ; <nl> std : : size_t checkOverflow ( ValueLength ) ; <nl> # else <nl> / / on a 64 bit platform , the following function is probably a no - op <nl> - static inline constexpr std : : size_t checkOverflow ( ValueLength length ) { <nl> + static inline constexpr std : : size_t checkOverflow ( ValueLength length ) noexcept { <nl> return static_cast < std : : size_t > ( length ) ; <nl> } <nl> # endif <nl> static inline ValueLength getVariableValueLength ( ValueLength value ) noexcept { <nl> <nl> / / read a variable length integer in unsigned LEB128 format <nl> template < bool reverse > <nl> - static inline ValueLength readVariableValueLength ( uint8_t const * source ) { <nl> + static inline ValueLength readVariableValueLength ( uint8_t const * source ) noexcept { <nl> ValueLength len = 0 ; <nl> uint8_t v ; <nl> ValueLength p = 0 ; <nl> static inline ValueLength readVariableValueLength ( uint8_t const * source ) { <nl> <nl> / / store a variable length integer in unsigned LEB128 format <nl> template < bool reverse > <nl> - static inline void storeVariableValueLength ( uint8_t * dst , ValueLength value ) { <nl> + static inline void storeVariableValueLength ( uint8_t * dst , ValueLength value ) noexcept { <nl> VELOCYPACK_ASSERT ( value > 0 ) ; <nl> <nl> if ( reverse ) { <nl> mmm a / 3rdParty / velocypack / include / velocypack / velocypack - version - number . h <nl> ppp b / 3rdParty / velocypack / include / velocypack / velocypack - version - number . h <nl> <nl> # ifndef VELOCYPACK_VERSION_NUMBER_H <nl> # define VELOCYPACK_VERSION_NUMBER_H 1 <nl> <nl> - # define VELOCYPACK_VERSION " 0 . 1 . 30 " <nl> + # define VELOCYPACK_VERSION " 0 . 1 . 31 " <nl> <nl> # define VELOCYPACK_VERSION_MAJOR 0 <nl> # define VELOCYPACK_VERSION_MINOR 1 <nl> - # define VELOCYPACK_VERSION_PATCH 30 <nl> + # define VELOCYPACK_VERSION_PATCH 31 <nl> <nl> # endif <nl> mmm a / 3rdParty / velocypack / src / Slice . cpp <nl> ppp b / 3rdParty / velocypack / src / Slice . cpp <nl> unsigned int const SliceStaticData : : FirstSubMap [ 32 ] = { <nl> 0 , / / 0x14 , compact object , no index table - note : the offset is dynamic ! <nl> 0 } ; <nl> <nl> + uint64_t const SliceStaticData : : PrecalculatedHashesForDefaultSeed [ 256 ] = { <nl> + # ifdef VELOCYPACK_XXHASH <nl> + / * 0x00 * / 0xe2b56fa571b3a544 , / * 0x01 * / 0xda3cfd1dc58389d8 , <nl> + / * 0x02 * / 0x0000000000000000 , / * 0x03 * / 0x0000000000000000 , <nl> + / * 0x04 * / 0x0000000000000000 , / * 0x05 * / 0x0000000000000000 , <nl> + / * 0x06 * / 0x0000000000000000 , / * 0x07 * / 0x0000000000000000 , <nl> + / * 0x08 * / 0x0000000000000000 , / * 0x09 * / 0x0000000000000000 , <nl> + / * 0x0a * / 0xd296bf393ef8d5f5 , / * 0x0b * / 0x0000000000000000 , <nl> + / * 0x0c * / 0x0000000000000000 , / * 0x0d * / 0x0000000000000000 , <nl> + / * 0x0e * / 0x0000000000000000 , / * 0x0f * / 0x0000000000000000 , <nl> + / * 0x10 * / 0x0000000000000000 , / * 0x11 * / 0x0000000000000000 , <nl> + / * 0x12 * / 0x0000000000000000 , / * 0x13 * / 0x0000000000000000 , <nl> + / * 0x14 * / 0x0000000000000000 , / * 0x15 * / 0x0000000000000000 , <nl> + / * 0x16 * / 0x0000000000000000 , / * 0x17 * / 0x24db76da0ebbd8bb , <nl> + / * 0x18 * / 0x1a6a668fb2aa030a , / * 0x19 * / 0xed545328fb397fed , <nl> + / * 0x1a * / 0xfd783491fceeb46b , / * 0x1b * / 0x0000000000000000 , <nl> + / * 0x1c * / 0x0000000000000000 , / * 0x1d * / 0x0000000000000000 , <nl> + / * 0x1e * / 0x505ec293ddfeec5e , / * 0x1f * / 0x9fab26ba108e2fbf , <nl> + / * 0x20 * / 0x0000000000000000 , / * 0x21 * / 0x0000000000000000 , <nl> + / * 0x22 * / 0x0000000000000000 , / * 0x23 * / 0x0000000000000000 , <nl> + / * 0x24 * / 0x0000000000000000 , / * 0x25 * / 0x0000000000000000 , <nl> + / * 0x26 * / 0x0000000000000000 , / * 0x27 * / 0x0000000000000000 , <nl> + / * 0x28 * / 0x0000000000000000 , / * 0x29 * / 0x0000000000000000 , <nl> + / * 0x2a * / 0x0000000000000000 , / * 0x2b * / 0x0000000000000000 , <nl> + / * 0x2c * / 0x0000000000000000 , / * 0x2d * / 0x0000000000000000 , <nl> + / * 0x2e * / 0x0000000000000000 , / * 0x2f * / 0x0000000000000000 , <nl> + / * 0x30 * / 0xcdf3868203041650 , / * 0x31 * / 0x7c9133bfa6f828a6 , <nl> + / * 0x32 * / 0x62e05b34c4ed7ce4 , / * 0x33 * / 0xb79b6530d263533d , <nl> + / * 0x34 * / 0xf05f611558114f31 , / * 0x35 * / 0xc941f9afc86cdcd5 , <nl> + / * 0x36 * / 0x79873ccd694a9f90 , / * 0x37 * / 0xd360268d66bf8c1f , <nl> + / * 0x38 * / 0x19e870f3e36185fe , / * 0x39 * / 0xd154aeb6ba9114e5 , <nl> + / * 0x3a * / 0xa4d6434557b5b885 , / * 0x3b * / 0x91584221ca9eda5b , <nl> + / * 0x3c * / 0xb39b55e7252de481 , / * 0x3d * / 0xe6bf494f0a40618e , <nl> + / * 0x3e * / 0xd2ce603b7dceb6ea , / * 0x3f * / 0xec71f69fe56368f1 , <nl> + / * 0x40 * / 0x142191d3f9a23bce , / * 0x41 * / 0x0000000000000000 , <nl> + / * 0x42 * / 0x0000000000000000 , / * 0x43 * / 0x0000000000000000 , <nl> + / * 0x44 * / 0x0000000000000000 , / * 0x45 * / 0x0000000000000000 , <nl> + / * 0x46 * / 0x0000000000000000 , / * 0x47 * / 0x0000000000000000 , <nl> + / * 0x48 * / 0x0000000000000000 , / * 0x49 * / 0x0000000000000000 , <nl> + / * 0x4a * / 0x0000000000000000 , / * 0x4b * / 0x0000000000000000 , <nl> + / * 0x4c * / 0x0000000000000000 , / * 0x4d * / 0x0000000000000000 , <nl> + / * 0x4e * / 0x0000000000000000 , / * 0x4f * / 0x0000000000000000 , <nl> + / * 0x50 * / 0x0000000000000000 , / * 0x51 * / 0x0000000000000000 , <nl> + / * 0x52 * / 0x0000000000000000 , / * 0x53 * / 0x0000000000000000 , <nl> + / * 0x54 * / 0x0000000000000000 , / * 0x55 * / 0x0000000000000000 , <nl> + / * 0x56 * / 0x0000000000000000 , / * 0x57 * / 0x0000000000000000 , <nl> + / * 0x58 * / 0x0000000000000000 , / * 0x59 * / 0x0000000000000000 , <nl> + / * 0x5a * / 0x0000000000000000 , / * 0x5b * / 0x0000000000000000 , <nl> + / * 0x5c * / 0x0000000000000000 , / * 0x5d * / 0x0000000000000000 , <nl> + / * 0x5e * / 0x0000000000000000 , / * 0x5f * / 0x0000000000000000 , <nl> + / * 0x60 * / 0x0000000000000000 , / * 0x61 * / 0x0000000000000000 , <nl> + / * 0x62 * / 0x0000000000000000 , / * 0x63 * / 0x0000000000000000 , <nl> + / * 0x64 * / 0x0000000000000000 , / * 0x65 * / 0x0000000000000000 , <nl> + / * 0x66 * / 0x0000000000000000 , / * 0x67 * / 0x0000000000000000 , <nl> + / * 0x68 * / 0x0000000000000000 , / * 0x69 * / 0x0000000000000000 , <nl> + / * 0x6a * / 0x0000000000000000 , / * 0x6b * / 0x0000000000000000 , <nl> + / * 0x6c * / 0x0000000000000000 , / * 0x6d * / 0x0000000000000000 , <nl> + / * 0x6e * / 0x0000000000000000 , / * 0x6f * / 0x0000000000000000 , <nl> + / * 0x70 * / 0x0000000000000000 , / * 0x71 * / 0x0000000000000000 , <nl> + / * 0x72 * / 0x0000000000000000 , / * 0x73 * / 0x0000000000000000 , <nl> + / * 0x74 * / 0x0000000000000000 , / * 0x75 * / 0x0000000000000000 , <nl> + / * 0x76 * / 0x0000000000000000 , / * 0x77 * / 0x0000000000000000 , <nl> + / * 0x78 * / 0x0000000000000000 , / * 0x79 * / 0x0000000000000000 , <nl> + / * 0x7a * / 0x0000000000000000 , / * 0x7b * / 0x0000000000000000 , <nl> + / * 0x7c * / 0x0000000000000000 , / * 0x7d * / 0x0000000000000000 , <nl> + / * 0x7e * / 0x0000000000000000 , / * 0x7f * / 0x0000000000000000 , <nl> + / * 0x80 * / 0x0000000000000000 , / * 0x81 * / 0x0000000000000000 , <nl> + / * 0x82 * / 0x0000000000000000 , / * 0x83 * / 0x0000000000000000 , <nl> + / * 0x84 * / 0x0000000000000000 , / * 0x85 * / 0x0000000000000000 , <nl> + / * 0x86 * / 0x0000000000000000 , / * 0x87 * / 0x0000000000000000 , <nl> + / * 0x88 * / 0x0000000000000000 , / * 0x89 * / 0x0000000000000000 , <nl> + / * 0x8a * / 0x0000000000000000 , / * 0x8b * / 0x0000000000000000 , <nl> + / * 0x8c * / 0x0000000000000000 , / * 0x8d * / 0x0000000000000000 , <nl> + / * 0x8e * / 0x0000000000000000 , / * 0x8f * / 0x0000000000000000 , <nl> + / * 0x90 * / 0x0000000000000000 , / * 0x91 * / 0x0000000000000000 , <nl> + / * 0x92 * / 0x0000000000000000 , / * 0x93 * / 0x0000000000000000 , <nl> + / * 0x94 * / 0x0000000000000000 , / * 0x95 * / 0x0000000000000000 , <nl> + / * 0x96 * / 0x0000000000000000 , / * 0x97 * / 0x0000000000000000 , <nl> + / * 0x98 * / 0x0000000000000000 , / * 0x99 * / 0x0000000000000000 , <nl> + / * 0x9a * / 0x0000000000000000 , / * 0x9b * / 0x0000000000000000 , <nl> + / * 0x9c * / 0x0000000000000000 , / * 0x9d * / 0x0000000000000000 , <nl> + / * 0x9e * / 0x0000000000000000 , / * 0x9f * / 0x0000000000000000 , <nl> + / * 0xa0 * / 0x0000000000000000 , / * 0xa1 * / 0x0000000000000000 , <nl> + / * 0xa2 * / 0x0000000000000000 , / * 0xa3 * / 0x0000000000000000 , <nl> + / * 0xa4 * / 0x0000000000000000 , / * 0xa5 * / 0x0000000000000000 , <nl> + / * 0xa6 * / 0x0000000000000000 , / * 0xa7 * / 0x0000000000000000 , <nl> + / * 0xa8 * / 0x0000000000000000 , / * 0xa9 * / 0x0000000000000000 , <nl> + / * 0xaa * / 0x0000000000000000 , / * 0xab * / 0x0000000000000000 , <nl> + / * 0xac * / 0x0000000000000000 , / * 0xad * / 0x0000000000000000 , <nl> + / * 0xae * / 0x0000000000000000 , / * 0xaf * / 0x0000000000000000 , <nl> + / * 0xb0 * / 0x0000000000000000 , / * 0xb1 * / 0x0000000000000000 , <nl> + / * 0xb2 * / 0x0000000000000000 , / * 0xb3 * / 0x0000000000000000 , <nl> + / * 0xb4 * / 0x0000000000000000 , / * 0xb5 * / 0x0000000000000000 , <nl> + / * 0xb6 * / 0x0000000000000000 , / * 0xb7 * / 0x0000000000000000 , <nl> + / * 0xb8 * / 0x0000000000000000 , / * 0xb9 * / 0x0000000000000000 , <nl> + / * 0xba * / 0x0000000000000000 , / * 0xbb * / 0x0000000000000000 , <nl> + / * 0xbc * / 0x0000000000000000 , / * 0xbd * / 0x0000000000000000 , <nl> + / * 0xbe * / 0x0000000000000000 , / * 0xbf * / 0x0000000000000000 , <nl> + / * 0xc0 * / 0x0000000000000000 , / * 0xc1 * / 0x0000000000000000 , <nl> + / * 0xc2 * / 0x0000000000000000 , / * 0xc3 * / 0x0000000000000000 , <nl> + / * 0xc4 * / 0x0000000000000000 , / * 0xc5 * / 0x0000000000000000 , <nl> + / * 0xc6 * / 0x0000000000000000 , / * 0xc7 * / 0x0000000000000000 , <nl> + / * 0xc8 * / 0x0000000000000000 , / * 0xc9 * / 0x0000000000000000 , <nl> + / * 0xca * / 0x0000000000000000 , / * 0xcb * / 0x0000000000000000 , <nl> + / * 0xcc * / 0x0000000000000000 , / * 0xcd * / 0x0000000000000000 , <nl> + / * 0xce * / 0x0000000000000000 , / * 0xcf * / 0x0000000000000000 , <nl> + / * 0xd0 * / 0x0000000000000000 , / * 0xd1 * / 0x0000000000000000 , <nl> + / * 0xd2 * / 0x0000000000000000 , / * 0xd3 * / 0x0000000000000000 , <nl> + / * 0xd4 * / 0x0000000000000000 , / * 0xd5 * / 0x0000000000000000 , <nl> + / * 0xd6 * / 0x0000000000000000 , / * 0xd7 * / 0x0000000000000000 , <nl> + / * 0xd8 * / 0x0000000000000000 , / * 0xd9 * / 0x0000000000000000 , <nl> + / * 0xda * / 0x0000000000000000 , / * 0xdb * / 0x0000000000000000 , <nl> + / * 0xdc * / 0x0000000000000000 , / * 0xdd * / 0x0000000000000000 , <nl> + / * 0xde * / 0x0000000000000000 , / * 0xdf * / 0x0000000000000000 , <nl> + / * 0xe0 * / 0x0000000000000000 , / * 0xe1 * / 0x0000000000000000 , <nl> + / * 0xe2 * / 0x0000000000000000 , / * 0xe3 * / 0x0000000000000000 , <nl> + / * 0xe4 * / 0x0000000000000000 , / * 0xe5 * / 0x0000000000000000 , <nl> + / * 0xe6 * / 0x0000000000000000 , / * 0xe7 * / 0x0000000000000000 , <nl> + / * 0xe8 * / 0x0000000000000000 , / * 0xe9 * / 0x0000000000000000 , <nl> + / * 0xea * / 0x0000000000000000 , / * 0xeb * / 0x0000000000000000 , <nl> + / * 0xec * / 0x0000000000000000 , / * 0xed * / 0x0000000000000000 , <nl> + / * 0xee * / 0x0000000000000000 , / * 0xef * / 0x0000000000000000 , <nl> + / * 0xf0 * / 0x0000000000000000 , / * 0xf1 * / 0x0000000000000000 , <nl> + / * 0xf2 * / 0x0000000000000000 , / * 0xf3 * / 0x0000000000000000 , <nl> + / * 0xf4 * / 0x0000000000000000 , / * 0xf5 * / 0x0000000000000000 , <nl> + / * 0xf6 * / 0x0000000000000000 , / * 0xf7 * / 0x0000000000000000 , <nl> + / * 0xf8 * / 0x0000000000000000 , / * 0xf9 * / 0x0000000000000000 , <nl> + / * 0xfa * / 0x0000000000000000 , / * 0xfb * / 0x0000000000000000 , <nl> + / * 0xfc * / 0x0000000000000000 , / * 0xfd * / 0x0000000000000000 , <nl> + / * 0xfe * / 0x0000000000000000 , / * 0xff * / 0x0000000000000000 <nl> + # endif <nl> + # ifdef VELOCYPACK_FASTHASH <nl> + / * 0x00 * / 0xf747d9afd5fc13cd , / * 0x01 * / 0x9dd59a0795d72dae , <nl> + / * 0x02 * / 0x0000000000000000 , / * 0x03 * / 0x0000000000000000 , <nl> + / * 0x04 * / 0x0000000000000000 , / * 0x05 * / 0x0000000000000000 , <nl> + / * 0x06 * / 0x0000000000000000 , / * 0x07 * / 0x0000000000000000 , <nl> + / * 0x08 * / 0x0000000000000000 , / * 0x09 * / 0x0000000000000000 , <nl> + / * 0x0a * / 0x651f231e0822a1f2 , / * 0x0b * / 0x0000000000000000 , <nl> + / * 0x0c * / 0x0000000000000000 , / * 0x0d * / 0x0000000000000000 , <nl> + / * 0x0e * / 0x0000000000000000 , / * 0x0f * / 0x0000000000000000 , <nl> + / * 0x10 * / 0x0000000000000000 , / * 0x11 * / 0x0000000000000000 , <nl> + / * 0x12 * / 0x0000000000000000 , / * 0x13 * / 0x0000000000000000 , <nl> + / * 0x14 * / 0x0000000000000000 , / * 0x15 * / 0x0000000000000000 , <nl> + / * 0x16 * / 0x0000000000000000 , / * 0x17 * / 0x423211fec79af09b , <nl> + / * 0x18 * / 0xd43a065f33b14e52 , / * 0x19 * / 0xf1adc756c139e443 , <nl> + / * 0x1a * / 0x6bf229fb02c686b , / * 0x1b * / 0x0000000000000000 , <nl> + / * 0x1c * / 0x0000000000000000 , / * 0x1d * / 0x0000000000000000 , <nl> + / * 0x1e * / 0xc36f498e29ef2aba , / * 0x1f * / 0x1ad28762083cdc7d , <nl> + / * 0x20 * / 0x0000000000000000 , / * 0x21 * / 0x0000000000000000 , <nl> + / * 0x22 * / 0x0000000000000000 , / * 0x23 * / 0x0000000000000000 , <nl> + / * 0x24 * / 0x0000000000000000 , / * 0x25 * / 0x0000000000000000 , <nl> + / * 0x26 * / 0x0000000000000000 , / * 0x27 * / 0x0000000000000000 , <nl> + / * 0x28 * / 0x0000000000000000 , / * 0x29 * / 0x0000000000000000 , <nl> + / * 0x2a * / 0x0000000000000000 , / * 0x2b * / 0x0000000000000000 , <nl> + / * 0x2c * / 0x0000000000000000 , / * 0x2d * / 0x0000000000000000 , <nl> + / * 0x2e * / 0x0000000000000000 , / * 0x2f * / 0x0000000000000000 , <nl> + / * 0x30 * / 0xbb1c99a88abf76d2 , / * 0x31 * / 0x8d3e0efdb932c4b8 , <nl> + / * 0x32 * / 0xcd41672e3cd8f76e , / * 0x33 * / 0x3f831eadbd6628f8 , <nl> + / * 0x34 * / 0x9e4ea5d4abe810ae , / * 0x35 * / 0x836489847293c2d6 , <nl> + / * 0x36 * / 0x270883ef450bf1c8 , / * 0x37 * / 0x4abf5dea3bb7fb98 , <nl> + / * 0x38 * / 0x5a6892806deadcb2 , / * 0x39 * / 0xdeaa9c7264ffdad0 , <nl> + / * 0x3a * / 0xabd49df61b8b4756 , / * 0x3b * / 0xc068ab004a6dc8de , <nl> + / * 0x3c * / 0xc2a9f41025e2711b , / * 0x3d * / 0x47cb9c887443ad40 , <nl> + / * 0x3e * / 0xa57497643e705316 , / * 0x3f * / 0xea4688cdf868a142 , <nl> + / * 0x40 * / 0x49e51044202c2999 , / * 0x41 * / 0x0000000000000000 , <nl> + / * 0x42 * / 0x0000000000000000 , / * 0x43 * / 0x0000000000000000 , <nl> + / * 0x44 * / 0x0000000000000000 , / * 0x45 * / 0x0000000000000000 , <nl> + / * 0x46 * / 0x0000000000000000 , / * 0x47 * / 0x0000000000000000 , <nl> + / * 0x48 * / 0x0000000000000000 , / * 0x49 * / 0x0000000000000000 , <nl> + / * 0x4a * / 0x0000000000000000 , / * 0x4b * / 0x0000000000000000 , <nl> + / * 0x4c * / 0x0000000000000000 , / * 0x4d * / 0x0000000000000000 , <nl> + / * 0x4e * / 0x0000000000000000 , / * 0x4f * / 0x0000000000000000 , <nl> + / * 0x50 * / 0x0000000000000000 , / * 0x51 * / 0x0000000000000000 , <nl> + / * 0x52 * / 0x0000000000000000 , / * 0x53 * / 0x0000000000000000 , <nl> + / * 0x54 * / 0x0000000000000000 , / * 0x55 * / 0x0000000000000000 , <nl> + / * 0x56 * / 0x0000000000000000 , / * 0x57 * / 0x0000000000000000 , <nl> + / * 0x58 * / 0x0000000000000000 , / * 0x59 * / 0x0000000000000000 , <nl> + / * 0x5a * / 0x0000000000000000 , / * 0x5b * / 0x0000000000000000 , <nl> + / * 0x5c * / 0x0000000000000000 , / * 0x5d * / 0x0000000000000000 , <nl> + / * 0x5e * / 0x0000000000000000 , / * 0x5f * / 0x0000000000000000 , <nl> + / * 0x60 * / 0x0000000000000000 , / * 0x61 * / 0x0000000000000000 , <nl> + / * 0x62 * / 0x0000000000000000 , / * 0x63 * / 0x0000000000000000 , <nl> + / * 0x64 * / 0x0000000000000000 , / * 0x65 * / 0x0000000000000000 , <nl> + / * 0x66 * / 0x0000000000000000 , / * 0x67 * / 0x0000000000000000 , <nl> + / * 0x68 * / 0x0000000000000000 , / * 0x69 * / 0x0000000000000000 , <nl> + / * 0x6a * / 0x0000000000000000 , / * 0x6b * / 0x0000000000000000 , <nl> + / * 0x6c * / 0x0000000000000000 , / * 0x6d * / 0x0000000000000000 , <nl> + / * 0x6e * / 0x0000000000000000 , / * 0x6f * / 0x0000000000000000 , <nl> + / * 0x70 * / 0x0000000000000000 , / * 0x71 * / 0x0000000000000000 , <nl> + / * 0x72 * / 0x0000000000000000 , / * 0x73 * / 0x0000000000000000 , <nl> + / * 0x74 * / 0x0000000000000000 , / * 0x75 * / 0x0000000000000000 , <nl> + / * 0x76 * / 0x0000000000000000 , / * 0x77 * / 0x0000000000000000 , <nl> + / * 0x78 * / 0x0000000000000000 , / * 0x79 * / 0x0000000000000000 , <nl> + / * 0x7a * / 0x0000000000000000 , / * 0x7b * / 0x0000000000000000 , <nl> + / * 0x7c * / 0x0000000000000000 , / * 0x7d * / 0x0000000000000000 , <nl> + / * 0x7e * / 0x0000000000000000 , / * 0x7f * / 0x0000000000000000 , <nl> + / * 0x80 * / 0x0000000000000000 , / * 0x81 * / 0x0000000000000000 , <nl> + / * 0x82 * / 0x0000000000000000 , / * 0x83 * / 0x0000000000000000 , <nl> + / * 0x84 * / 0x0000000000000000 , / * 0x85 * / 0x0000000000000000 , <nl> + / * 0x86 * / 0x0000000000000000 , / * 0x87 * / 0x0000000000000000 , <nl> + / * 0x88 * / 0x0000000000000000 , / * 0x89 * / 0x0000000000000000 , <nl> + / * 0x8a * / 0x0000000000000000 , / * 0x8b * / 0x0000000000000000 , <nl> + / * 0x8c * / 0x0000000000000000 , / * 0x8d * / 0x0000000000000000 , <nl> + / * 0x8e * / 0x0000000000000000 , / * 0x8f * / 0x0000000000000000 , <nl> + / * 0x90 * / 0x0000000000000000 , / * 0x91 * / 0x0000000000000000 , <nl> + / * 0x92 * / 0x0000000000000000 , / * 0x93 * / 0x0000000000000000 , <nl> + / * 0x94 * / 0x0000000000000000 , / * 0x95 * / 0x0000000000000000 , <nl> + / * 0x96 * / 0x0000000000000000 , / * 0x97 * / 0x0000000000000000 , <nl> + / * 0x98 * / 0x0000000000000000 , / * 0x99 * / 0x0000000000000000 , <nl> + / * 0x9a * / 0x0000000000000000 , / * 0x9b * / 0x0000000000000000 , <nl> + / * 0x9c * / 0x0000000000000000 , / * 0x9d * / 0x0000000000000000 , <nl> + / * 0x9e * / 0x0000000000000000 , / * 0x9f * / 0x0000000000000000 , <nl> + / * 0xa0 * / 0x0000000000000000 , / * 0xa1 * / 0x0000000000000000 , <nl> + / * 0xa2 * / 0x0000000000000000 , / * 0xa3 * / 0x0000000000000000 , <nl> + / * 0xa4 * / 0x0000000000000000 , / * 0xa5 * / 0x0000000000000000 , <nl> + / * 0xa6 * / 0x0000000000000000 , / * 0xa7 * / 0x0000000000000000 , <nl> + / * 0xa8 * / 0x0000000000000000 , / * 0xa9 * / 0x0000000000000000 , <nl> + / * 0xaa * / 0x0000000000000000 , / * 0xab * / 0x0000000000000000 , <nl> + / * 0xac * / 0x0000000000000000 , / * 0xad * / 0x0000000000000000 , <nl> + / * 0xae * / 0x0000000000000000 , / * 0xaf * / 0x0000000000000000 , <nl> + / * 0xb0 * / 0x0000000000000000 , / * 0xb1 * / 0x0000000000000000 , <nl> + / * 0xb2 * / 0x0000000000000000 , / * 0xb3 * / 0x0000000000000000 , <nl> + / * 0xb4 * / 0x0000000000000000 , / * 0xb5 * / 0x0000000000000000 , <nl> + / * 0xb6 * / 0x0000000000000000 , / * 0xb7 * / 0x0000000000000000 , <nl> + / * 0xb8 * / 0x0000000000000000 , / * 0xb9 * / 0x0000000000000000 , <nl> + / * 0xba * / 0x0000000000000000 , / * 0xbb * / 0x0000000000000000 , <nl> + / * 0xbc * / 0x0000000000000000 , / * 0xbd * / 0x0000000000000000 , <nl> + / * 0xbe * / 0x0000000000000000 , / * 0xbf * / 0x0000000000000000 , <nl> + / * 0xc0 * / 0x0000000000000000 , / * 0xc1 * / 0x0000000000000000 , <nl> + / * 0xc2 * / 0x0000000000000000 , / * 0xc3 * / 0x0000000000000000 , <nl> + / * 0xc4 * / 0x0000000000000000 , / * 0xc5 * / 0x0000000000000000 , <nl> + / * 0xc6 * / 0x0000000000000000 , / * 0xc7 * / 0x0000000000000000 , <nl> + / * 0xc8 * / 0x0000000000000000 , / * 0xc9 * / 0x0000000000000000 , <nl> + / * 0xca * / 0x0000000000000000 , / * 0xcb * / 0x0000000000000000 , <nl> + / * 0xcc * / 0x0000000000000000 , / * 0xcd * / 0x0000000000000000 , <nl> + / * 0xce * / 0x0000000000000000 , / * 0xcf * / 0x0000000000000000 , <nl> + / * 0xd0 * / 0x0000000000000000 , / * 0xd1 * / 0x0000000000000000 , <nl> + / * 0xd2 * / 0x0000000000000000 , / * 0xd3 * / 0x0000000000000000 , <nl> + / * 0xd4 * / 0x0000000000000000 , / * 0xd5 * / 0x0000000000000000 , <nl> + / * 0xd6 * / 0x0000000000000000 , / * 0xd7 * / 0x0000000000000000 , <nl> + / * 0xd8 * / 0x0000000000000000 , / * 0xd9 * / 0x0000000000000000 , <nl> + / * 0xda * / 0x0000000000000000 , / * 0xdb * / 0x0000000000000000 , <nl> + / * 0xdc * / 0x0000000000000000 , / * 0xdd * / 0x0000000000000000 , <nl> + / * 0xde * / 0x0000000000000000 , / * 0xdf * / 0x0000000000000000 , <nl> + / * 0xe0 * / 0x0000000000000000 , / * 0xe1 * / 0x0000000000000000 , <nl> + / * 0xe2 * / 0x0000000000000000 , / * 0xe3 * / 0x0000000000000000 , <nl> + / * 0xe4 * / 0x0000000000000000 , / * 0xe5 * / 0x0000000000000000 , <nl> + / * 0xe6 * / 0x0000000000000000 , / * 0xe7 * / 0x0000000000000000 , <nl> + / * 0xe8 * / 0x0000000000000000 , / * 0xe9 * / 0x0000000000000000 , <nl> + / * 0xea * / 0x0000000000000000 , / * 0xeb * / 0x0000000000000000 , <nl> + / * 0xec * / 0x0000000000000000 , / * 0xed * / 0x0000000000000000 , <nl> + / * 0xee * / 0x0000000000000000 , / * 0xef * / 0x0000000000000000 , <nl> + / * 0xf0 * / 0x0000000000000000 , / * 0xf1 * / 0x0000000000000000 , <nl> + / * 0xf2 * / 0x0000000000000000 , / * 0xf3 * / 0x0000000000000000 , <nl> + / * 0xf4 * / 0x0000000000000000 , / * 0xf5 * / 0x0000000000000000 , <nl> + / * 0xf6 * / 0x0000000000000000 , / * 0xf7 * / 0x0000000000000000 , <nl> + / * 0xf8 * / 0x0000000000000000 , / * 0xf9 * / 0x0000000000000000 , <nl> + / * 0xfa * / 0x0000000000000000 , / * 0xfb * / 0x0000000000000000 , <nl> + / * 0xfc * / 0x0000000000000000 , / * 0xfd * / 0x0000000000000000 , <nl> + / * 0xfe * / 0x0000000000000000 , / * 0xff * / 0x0000000000000000 <nl> + # endif <nl> + } ; <nl> + <nl> / / creates a Slice from Json and adds it to a scope <nl> Slice Slice : : fromJson ( SliceScope & scope , std : : string const & json , <nl> Options const * options ) { <nl> uint64_t Slice : : normalizedHash ( uint64_t seed ) const { <nl> } else if ( isArray ( ) ) { <nl> / / normalize arrays by hashing array length and iterating <nl> / / over all array members <nl> - uint64_t const n = length ( ) ^ 0xba5bedf00d ; <nl> + ArrayIterator it ( * this ) ; <nl> + uint64_t const n = it . size ( ) ^ 0xba5bedf00d ; <nl> value = VELOCYPACK_HASH ( & n , sizeof ( n ) , seed ) ; <nl> - for ( auto const & it : ArrayIterator ( * this ) ) { <nl> - value ^ = it . normalizedHash ( value ) ; <nl> + while ( it . valid ( ) ) { <nl> + value ^ = it . value ( ) . normalizedHash ( value ) ; <nl> + it . next ( ) ; <nl> } <nl> } else if ( isObject ( ) ) { <nl> / / normalize objects by hashing object length and iterating <nl> / / over all object members <nl> - uint64_t const n = length ( ) ^ 0xf00ba44ba5 ; <nl> + ObjectIterator it ( * this , true ) ; <nl> + uint64_t const n = it . size ( ) ^ 0xf00ba44ba5 ; <nl> uint64_t seed2 = VELOCYPACK_HASH ( & n , sizeof ( n ) , seed ) ; <nl> value = seed2 ; <nl> - for ( auto const & it : ObjectIterator ( * this , true ) ) { <nl> - uint64_t seed3 = it . key . makeKey ( ) . normalizedHash ( seed2 ) ; <nl> + while ( it . valid ( ) ) { <nl> + auto current = ( * it ) ; <nl> + uint64_t seed3 = current . key . normalizedHash ( seed2 ) ; <nl> value ^ = seed3 ; <nl> - value ^ = it . value . normalizedHash ( seed3 ) ; <nl> + value ^ = current . value . normalizedHash ( seed3 ) ; <nl> + it . next ( ) ; <nl> } <nl> } else { <nl> / / fall back to regular hash function <nl> | update velocypack library to latest version ( ) | arangodb/arangodb | af7299691d893f71f8a735fc82b48213bf3bed73 | 2018-06-18T09:58:57Z |
mmm a / AUTHORS <nl> ppp b / AUTHORS <nl> Developers : <nl> <nl> flamingo ( flaming0 ) <nl> Null pointer check in order to prevent crashes . <nl> + Updating spine - runtime to EsotericSoftware / spine - runtimes @ 5f90386 . <nl> <nl> rtissera ( Romain TISSERAND ) <nl> Adding missing JNIEXPORT / JNICALL declarations . <nl> Developers : <nl> Lee , Jae - Hong ( pyrasis ) <nl> Maintainer of tizen port . <nl> <nl> + lumendes <nl> + Updating spine - runtime to EsotericSoftware / spine - runtimes @ 5f90386 . <nl> + <nl> Retired Core Developers : <nl> WenSheng Yang <nl> Author of windows port , CCTextField , <nl> mmm a / extensions / Android . mk <nl> ppp b / extensions / Android . mk <nl> spine / Skin . cpp \ <nl> spine / Slot . cpp \ <nl> spine / SlotData . cpp \ <nl> spine / extension . cpp \ <nl> + spine / CCSkeletonAnimation . cpp \ <nl> + spine / CCSkeleton . cpp \ <nl> spine / spine - cocos2dx . cpp <nl> <nl> LOCAL_WHOLE_STATIC_LIBRARIES : = cocos2dx_static <nl> mmm a / extensions / proj . linux / Makefile <nl> ppp b / extensions / proj . linux / Makefile <nl> SOURCES = . . / CCBReader / CCBFileLoader . cpp \ <nl> . . / spine / Slot . cpp \ <nl> . . / spine / SlotData . cpp \ <nl> . . / spine / extension . cpp \ <nl> - . . / spine / spine - cocos2dx . cpp <nl> + . . / spine / spine - cocos2dx . cpp \ <nl> + . . / spine / CCSkeleton . cpp \ <nl> + . . / spine / CCSkeletonAnimation . cpp <nl> <nl> include $ ( COCOS_ROOT ) / cocos2dx / proj . linux / cocos2dx . mk <nl> <nl> mmm a / extensions / proj . nacl / Makefile <nl> ppp b / extensions / proj . nacl / Makefile <nl> EXTENSIONS_SOURCES = . . / CCBReader / CCBFileLoader . cpp \ <nl> . . / spine / Slot . cpp \ <nl> . . / spine / SlotData . cpp \ <nl> . . / spine / extension . cpp \ <nl> + . . / spine / CCSkeleton . cpp \ <nl> + . . / spine / CCSkeletonAnimation . cpp \ <nl> . . / spine / spine - cocos2dx . cpp <nl> <nl> all : <nl> mmm a / extensions / proj . win32 / libExtensions . vcxproj <nl> ppp b / extensions / proj . win32 / libExtensions . vcxproj <nl> <nl> < ClCompile Include = " . . \ spine \ AttachmentLoader . cpp " / > <nl> < ClCompile Include = " . . \ spine \ Bone . cpp " / > <nl> < ClCompile Include = " . . \ spine \ BoneData . cpp " / > <nl> + < ClCompile Include = " . . \ spine \ CCSkeleton . cpp " / > <nl> + < ClCompile Include = " . . \ spine \ CCSkeletonAnimation . cpp " / > <nl> < ClCompile Include = " . . \ spine \ extension . cpp " / > <nl> < ClCompile Include = " . . \ spine \ Json . cpp " / > <nl> < ClCompile Include = " . . \ spine \ RegionAttachment . cpp " / > <nl> <nl> < ClInclude Include = " . . \ spine \ AttachmentLoader . h " / > <nl> < ClInclude Include = " . . \ spine \ Bone . h " / > <nl> < ClInclude Include = " . . \ spine \ BoneData . h " / > <nl> + < ClInclude Include = " . . \ spine \ CCSkeleton . h " / > <nl> + < ClInclude Include = " . . \ spine \ CCSkeletonAnimation . h " / > <nl> < ClInclude Include = " . . \ spine \ extension . h " / > <nl> < ClInclude Include = " . . \ spine \ Json . h " / > <nl> < ClInclude Include = " . . \ spine \ RegionAttachment . h " / > <nl> mmm a / extensions / proj . win32 / libExtensions . vcxproj . filters <nl> ppp b / extensions / proj . win32 / libExtensions . vcxproj . filters <nl> <nl> < ClCompile Include = " . . \ spine \ spine - cocos2dx . cpp " > <nl> < Filter > spine < / Filter > <nl> < / ClCompile > <nl> + < ClCompile Include = " . . \ spine \ CCSkeleton . cpp " > <nl> + < Filter > spine < / Filter > <nl> + < / ClCompile > <nl> + < ClCompile Include = " . . \ spine \ CCSkeletonAnimation . cpp " > <nl> + < Filter > spine < / Filter > <nl> + < / ClCompile > <nl> < ClCompile Include = " . . \ network \ Websocket . cpp " > <nl> < Filter > network < / Filter > <nl> < / ClCompile > <nl> <nl> < ClInclude Include = " . . \ spine \ spine . h " > <nl> < Filter > spine < / Filter > <nl> < / ClInclude > <nl> + < ClInclude Include = " . . \ spine \ CCSkeleton . h " > <nl> + < Filter > spine < / Filter > <nl> + < / ClInclude > <nl> + < ClInclude Include = " . . \ spine \ CCSkeletonAnimation . h " > <nl> + < Filter > spine < / Filter > <nl> + < / ClInclude > <nl> < ClInclude Include = " . . \ network \ Websocket . h " > <nl> < Filter > network < / Filter > <nl> < / ClInclude > <nl> < / ItemGroup > <nl> - < / Project > <nl> \ No newline at end of file <nl> + < / Project > <nl> mmm a / extensions / spine / Animation . cpp <nl> ppp b / extensions / spine / Animation . cpp <nl> void Animation_dispose ( Animation * self ) { <nl> for ( i = 0 ; i < self - > timelineCount ; + + i ) <nl> Timeline_dispose ( self - > timelines [ i ] ) ; <nl> FREE ( self - > timelines ) ; <nl> + FREE ( self - > name ) ; <nl> FREE ( self ) ; <nl> } <nl> <nl> void Animation_apply ( const Animation * self , Skeleton * skeleton , float time , int / * bool * / loop ) { <nl> + int i , n = self - > timelineCount ; <nl> + <nl> + # ifdef __STDC_VERSION__ <nl> if ( loop & & self - > duration ) time = fmodf ( time , self - > duration ) ; <nl> + # else <nl> + if ( loop & & self - > duration ) time = ( float ) fmod ( time , self - > duration ) ; <nl> + # endif <nl> <nl> - int i , n = self - > timelineCount ; <nl> for ( i = 0 ; i < n ; + + i ) <nl> Timeline_apply ( self - > timelines [ i ] , skeleton , time , 1 ) ; <nl> } <nl> <nl> void Animation_mix ( const Animation * self , Skeleton * skeleton , float time , int / * bool * / loop , float alpha ) { <nl> + int i , n = self - > timelineCount ; <nl> + <nl> + # ifdef __STDC_VERSION__ <nl> if ( loop & & self - > duration ) time = fmodf ( time , self - > duration ) ; <nl> + # else <nl> + if ( loop & & self - > duration ) time = ( float ) fmod ( time , self - > duration ) ; <nl> + # endif <nl> <nl> - int i , n = self - > timelineCount ; <nl> for ( i = 0 ; i < n ; + + i ) <nl> Timeline_apply ( self - > timelines [ i ] , skeleton , time , alpha ) ; <nl> } <nl> typedef struct _TimelineVtable { <nl> void ( * dispose ) ( Timeline * self ) ; <nl> } _TimelineVtable ; <nl> <nl> - void _Timeline_init ( Timeline * self , / / <nl> - void ( * dispose ) ( Timeline * self ) , / / <nl> + void _Timeline_init ( Timeline * self , / * * / <nl> + void ( * dispose ) ( Timeline * self ) , / * * / <nl> void ( * apply ) ( const Timeline * self , Skeleton * skeleton , float time , float alpha ) ) { <nl> - CONST_CAST ( void * , self - > vtable ) = NEW ( _TimelineVtable ) ; <nl> + CONST_CAST ( _TimelineVtable * , self - > vtable ) = NEW ( _TimelineVtable ) ; <nl> VTABLE ( Timeline , self ) - > dispose = dispose ; <nl> VTABLE ( Timeline , self ) - > apply = apply ; <nl> } <nl> static const float CURVE_LINEAR = 0 ; <nl> static const float CURVE_STEPPED = - 1 ; <nl> static const int CURVE_SEGMENTS = 10 ; <nl> <nl> - void _CurveTimeline_init ( CurveTimeline * self , int frameCount , / / <nl> - void ( * dispose ) ( Timeline * self ) , / / <nl> + void _CurveTimeline_init ( CurveTimeline * self , int frameCount , / * * / <nl> + void ( * dispose ) ( Timeline * self ) , / * * / <nl> void ( * apply ) ( const Timeline * self , Skeleton * skeleton , float time , float alpha ) ) { <nl> _Timeline_init ( SUPER ( self ) , dispose , apply ) ; <nl> self - > curves = CALLOC ( float , ( frameCount - 1 ) * 6 ) ; <nl> void CurveTimeline_setCurve ( CurveTimeline * self , int frameIndex , float cx1 , flo <nl> } <nl> <nl> float CurveTimeline_getCurvePercent ( const CurveTimeline * self , int frameIndex , float percent ) { <nl> + float dfy ; <nl> + float ddfx ; <nl> + float ddfy ; <nl> + float dddfx ; <nl> + float dddfy ; <nl> + float x , y ; <nl> + int i ; <nl> int curveIndex = frameIndex * 6 ; <nl> float dfx = self - > curves [ curveIndex ] ; <nl> if ( dfx = = CURVE_LINEAR ) return percent ; <nl> if ( dfx = = CURVE_STEPPED ) return 0 ; <nl> - float dfy = self - > curves [ curveIndex + 1 ] ; <nl> - float ddfx = self - > curves [ curveIndex + 2 ] ; <nl> - float ddfy = self - > curves [ curveIndex + 3 ] ; <nl> - float dddfx = self - > curves [ curveIndex + 4 ] ; <nl> - float dddfy = self - > curves [ curveIndex + 5 ] ; <nl> - float x = dfx , y = dfy ; <nl> - int i = CURVE_SEGMENTS - 2 ; <nl> + dfy = self - > curves [ curveIndex + 1 ] ; <nl> + ddfx = self - > curves [ curveIndex + 2 ] ; <nl> + ddfy = self - > curves [ curveIndex + 3 ] ; <nl> + dddfx = self - > curves [ curveIndex + 4 ] ; <nl> + dddfy = self - > curves [ curveIndex + 5 ] ; <nl> + x = dfx , y = dfy ; <nl> + i = CURVE_SEGMENTS - 2 ; <nl> while ( 1 ) { <nl> if ( x > = percent ) { <nl> float lastX = x - dfx ; <nl> float CurveTimeline_getCurvePercent ( const CurveTimeline * self , int frameIndex , <nl> <nl> / * @ param target After the first and before the last entry . * / <nl> static int binarySearch ( float * values , int valuesLength , float target , int step ) { <nl> - int low = 0 ; <nl> + int low = 0 , current ; <nl> int high = valuesLength / step - 2 ; <nl> if ( high = = 0 ) return step ; <nl> - int current = high > > 1 ; <nl> + current = high > > 1 ; <nl> while ( 1 ) { <nl> if ( values [ ( current + 1 ) * step ] < = target ) <nl> low = current + 1 ; <nl> void _BaseTimeline_dispose ( Timeline * timeline ) { <nl> } <nl> <nl> / * Many timelines have structure identical to struct BaseTimeline and extend CurveTimeline . * * / <nl> - struct BaseTimeline * _BaseTimeline_create ( int frameCount , int frameSize , / / <nl> + struct BaseTimeline * _BaseTimeline_create ( int frameCount , int frameSize , / * * / <nl> void ( * apply ) ( const Timeline * self , Skeleton * skeleton , float time , float alpha ) ) { <nl> <nl> struct BaseTimeline * self = NEW ( struct BaseTimeline ) ; <nl> static const int ROTATE_LAST_FRAME_TIME = - 2 ; <nl> static const int ROTATE_FRAME_VALUE = 1 ; <nl> <nl> void _RotateTimeline_apply ( const Timeline * timeline , Skeleton * skeleton , float time , float alpha ) { <nl> + Bone * bone ; <nl> + int frameIndex ; <nl> + float lastFrameValue , frameTime , percent , amount ; <nl> + <nl> RotateTimeline * self = SUB_CAST ( RotateTimeline , timeline ) ; <nl> <nl> if ( time < self - > frames [ 0 ] ) return ; / * Time is before first frame . * / <nl> <nl> - Bone * bone = skeleton - > bones [ self - > boneIndex ] ; <nl> + bone = skeleton - > bones [ self - > boneIndex ] ; <nl> <nl> if ( time > = self - > frames [ self - > framesLength - 2 ] ) { / * Time is after last frame . * / <nl> float amount = bone - > data - > rotation + self - > frames [ self - > framesLength - 1 ] - bone - > rotation ; <nl> void _RotateTimeline_apply ( const Timeline * timeline , Skeleton * skeleton , float <nl> } <nl> <nl> / * Interpolate between the last frame and the current frame . * / <nl> - int frameIndex = binarySearch ( self - > frames , self - > framesLength , time , 2 ) ; <nl> - float lastFrameValue = self - > frames [ frameIndex - 1 ] ; <nl> - float frameTime = self - > frames [ frameIndex ] ; <nl> - float percent = 1 - ( time - frameTime ) / ( self - > frames [ frameIndex + ROTATE_LAST_FRAME_TIME ] - frameTime ) ; <nl> + frameIndex = binarySearch ( self - > frames , self - > framesLength , time , 2 ) ; <nl> + lastFrameValue = self - > frames [ frameIndex - 1 ] ; <nl> + frameTime = self - > frames [ frameIndex ] ; <nl> + percent = 1 - ( time - frameTime ) / ( self - > frames [ frameIndex + ROTATE_LAST_FRAME_TIME ] - frameTime ) ; <nl> percent = CurveTimeline_getCurvePercent ( SUPER ( self ) , frameIndex / 2 - 1 , percent < 0 ? 0 : ( percent > 1 ? 1 : percent ) ) ; <nl> <nl> - float amount = self - > frames [ frameIndex + ROTATE_FRAME_VALUE ] - lastFrameValue ; <nl> + amount = self - > frames [ frameIndex + ROTATE_FRAME_VALUE ] - lastFrameValue ; <nl> while ( amount > 180 ) <nl> amount - = 360 ; <nl> while ( amount < - 180 ) <nl> static const int TRANSLATE_FRAME_X = 1 ; <nl> static const int TRANSLATE_FRAME_Y = 2 ; <nl> <nl> void _TranslateTimeline_apply ( const Timeline * timeline , Skeleton * skeleton , float time , float alpha ) { <nl> + Bone * bone ; <nl> + int frameIndex ; <nl> + float lastFrameX , lastFrameY , frameTime , percent ; <nl> + <nl> TranslateTimeline * self = SUB_CAST ( TranslateTimeline , timeline ) ; <nl> <nl> if ( time < self - > frames [ 0 ] ) return ; / * Time is before first frame . * / <nl> <nl> - Bone * bone = skeleton - > bones [ self - > boneIndex ] ; <nl> + bone = skeleton - > bones [ self - > boneIndex ] ; <nl> <nl> if ( time > = self - > frames [ self - > framesLength - 3 ] ) { / * Time is after last frame . * / <nl> bone - > x + = ( bone - > data - > x + self - > frames [ self - > framesLength - 2 ] - bone - > x ) * alpha ; <nl> void _TranslateTimeline_apply ( const Timeline * timeline , Skeleton * skeleton , flo <nl> } <nl> <nl> / * Interpolate between the last frame and the current frame . * / <nl> - int frameIndex = binarySearch ( self - > frames , self - > framesLength , time , 3 ) ; <nl> - float lastFrameX = self - > frames [ frameIndex - 2 ] ; <nl> - float lastFrameY = self - > frames [ frameIndex - 1 ] ; <nl> - float frameTime = self - > frames [ frameIndex ] ; <nl> - float percent = 1 - ( time - frameTime ) / ( self - > frames [ frameIndex + TRANSLATE_LAST_FRAME_TIME ] - frameTime ) ; <nl> + frameIndex = binarySearch ( self - > frames , self - > framesLength , time , 3 ) ; <nl> + lastFrameX = self - > frames [ frameIndex - 2 ] ; <nl> + lastFrameY = self - > frames [ frameIndex - 1 ] ; <nl> + frameTime = self - > frames [ frameIndex ] ; <nl> + percent = 1 - ( time - frameTime ) / ( self - > frames [ frameIndex + TRANSLATE_LAST_FRAME_TIME ] - frameTime ) ; <nl> percent = CurveTimeline_getCurvePercent ( SUPER ( self ) , frameIndex / 3 - 1 , percent < 0 ? 0 : ( percent > 1 ? 1 : percent ) ) ; <nl> <nl> bone - > x + = ( bone - > data - > x + lastFrameX + ( self - > frames [ frameIndex + TRANSLATE_FRAME_X ] - lastFrameX ) * percent - bone - > x ) <nl> void TranslateTimeline_setFrame ( TranslateTimeline * self , int frameIndex , float <nl> / * * / <nl> <nl> void _ScaleTimeline_apply ( const Timeline * timeline , Skeleton * skeleton , float time , float alpha ) { <nl> - ScaleTimeline * self = SUB_CAST ( ScaleTimeline , timeline ) ; <nl> + Bone * bone ; <nl> + int frameIndex ; <nl> + float lastFrameX , lastFrameY , frameTime , percent ; <nl> <nl> + ScaleTimeline * self = SUB_CAST ( ScaleTimeline , timeline ) ; <nl> + <nl> if ( time < self - > frames [ 0 ] ) return ; / * Time is before first frame . * / <nl> <nl> - Bone * bone = skeleton - > bones [ self - > boneIndex ] ; <nl> + bone = skeleton - > bones [ self - > boneIndex ] ; <nl> if ( time > = self - > frames [ self - > framesLength - 3 ] ) { / * Time is after last frame . * / <nl> bone - > scaleX + = ( bone - > data - > scaleX - 1 + self - > frames [ self - > framesLength - 2 ] - bone - > scaleX ) * alpha ; <nl> bone - > scaleY + = ( bone - > data - > scaleY - 1 + self - > frames [ self - > framesLength - 1 ] - bone - > scaleY ) * alpha ; <nl> void _ScaleTimeline_apply ( const Timeline * timeline , Skeleton * skeleton , float t <nl> } <nl> <nl> / * Interpolate between the last frame and the current frame . * / <nl> - int frameIndex = binarySearch ( self - > frames , self - > framesLength , time , 3 ) ; <nl> - float lastFrameX = self - > frames [ frameIndex - 2 ] ; <nl> - float lastFrameY = self - > frames [ frameIndex - 1 ] ; <nl> - float frameTime = self - > frames [ frameIndex ] ; <nl> - float percent = 1 - ( time - frameTime ) / ( self - > frames [ frameIndex + TRANSLATE_LAST_FRAME_TIME ] - frameTime ) ; <nl> + frameIndex = binarySearch ( self - > frames , self - > framesLength , time , 3 ) ; <nl> + lastFrameX = self - > frames [ frameIndex - 2 ] ; <nl> + lastFrameY = self - > frames [ frameIndex - 1 ] ; <nl> + frameTime = self - > frames [ frameIndex ] ; <nl> + percent = 1 - ( time - frameTime ) / ( self - > frames [ frameIndex + TRANSLATE_LAST_FRAME_TIME ] - frameTime ) ; <nl> percent = CurveTimeline_getCurvePercent ( SUPER ( self ) , frameIndex / 3 - 1 , percent < 0 ? 0 : ( percent > 1 ? 1 : percent ) ) ; <nl> <nl> bone - > scaleX + = ( bone - > data - > scaleX - 1 + lastFrameX + ( self - > frames [ frameIndex + TRANSLATE_FRAME_X ] - lastFrameX ) * percent <nl> static const int COLOR_FRAME_B = 3 ; <nl> static const int COLOR_FRAME_A = 4 ; <nl> <nl> void _ColorTimeline_apply ( const Timeline * timeline , Skeleton * skeleton , float time , float alpha ) { <nl> + Slot * slot ; <nl> + int frameIndex ; <nl> + float lastFrameR , lastFrameG , lastFrameB , lastFrameA , percent , frameTime ; <nl> + float r , g , b , a ; <nl> ColorTimeline * self = ( ColorTimeline * ) timeline ; <nl> <nl> if ( time < self - > frames [ 0 ] ) return ; / * Time is before first frame . * / <nl> <nl> - Slot * slot = skeleton - > slots [ self - > slotIndex ] ; <nl> + slot = skeleton - > slots [ self - > slotIndex ] ; <nl> <nl> if ( time > = self - > frames [ self - > framesLength - 5 ] ) { / * Time is after last frame . * / <nl> int i = self - > framesLength - 1 ; <nl> void _ColorTimeline_apply ( const Timeline * timeline , Skeleton * skeleton , float t <nl> } <nl> <nl> / * Interpolate between the last frame and the current frame . * / <nl> - int frameIndex = binarySearch ( self - > frames , self - > framesLength , time , 5 ) ; <nl> - float lastFrameR = self - > frames [ frameIndex - 4 ] ; <nl> - float lastFrameG = self - > frames [ frameIndex - 3 ] ; <nl> - float lastFrameB = self - > frames [ frameIndex - 2 ] ; <nl> - float lastFrameA = self - > frames [ frameIndex - 1 ] ; <nl> - float frameTime = self - > frames [ frameIndex ] ; <nl> - float percent = 1 - ( time - frameTime ) / ( self - > frames [ frameIndex + COLOR_LAST_FRAME_TIME ] - frameTime ) ; <nl> + frameIndex = binarySearch ( self - > frames , self - > framesLength , time , 5 ) ; <nl> + lastFrameR = self - > frames [ frameIndex - 4 ] ; <nl> + lastFrameG = self - > frames [ frameIndex - 3 ] ; <nl> + lastFrameB = self - > frames [ frameIndex - 2 ] ; <nl> + lastFrameA = self - > frames [ frameIndex - 1 ] ; <nl> + frameTime = self - > frames [ frameIndex ] ; <nl> + percent = 1 - ( time - frameTime ) / ( self - > frames [ frameIndex + COLOR_LAST_FRAME_TIME ] - frameTime ) ; <nl> percent = CurveTimeline_getCurvePercent ( SUPER ( self ) , frameIndex / 5 - 1 , percent < 0 ? 0 : ( percent > 1 ? 1 : percent ) ) ; <nl> <nl> - float r = lastFrameR + ( self - > frames [ frameIndex + COLOR_FRAME_R ] - lastFrameR ) * percent ; <nl> - float g = lastFrameG + ( self - > frames [ frameIndex + COLOR_FRAME_G ] - lastFrameG ) * percent ; <nl> - float b = lastFrameB + ( self - > frames [ frameIndex + COLOR_FRAME_B ] - lastFrameB ) * percent ; <nl> - float a = lastFrameA + ( self - > frames [ frameIndex + COLOR_FRAME_A ] - lastFrameA ) * percent ; <nl> + r = lastFrameR + ( self - > frames [ frameIndex + COLOR_FRAME_R ] - lastFrameR ) * percent ; <nl> + g = lastFrameG + ( self - > frames [ frameIndex + COLOR_FRAME_G ] - lastFrameG ) * percent ; <nl> + b = lastFrameB + ( self - > frames [ frameIndex + COLOR_FRAME_B ] - lastFrameB ) * percent ; <nl> + a = lastFrameA + ( self - > frames [ frameIndex + COLOR_FRAME_A ] - lastFrameA ) * percent ; <nl> if ( alpha < 1 ) { <nl> slot - > r + = ( r - slot - > r ) * alpha ; <nl> slot - > g + = ( g - slot - > g ) * alpha ; <nl> void ColorTimeline_setFrame ( ColorTimeline * self , int frameIndex , float time , fl <nl> / * * / <nl> <nl> void _AttachmentTimeline_apply ( const Timeline * timeline , Skeleton * skeleton , float time , float alpha ) { <nl> + int frameIndex ; <nl> + const char * attachmentName ; <nl> AttachmentTimeline * self = ( AttachmentTimeline * ) timeline ; <nl> <nl> if ( time < self - > frames [ 0 ] ) return ; / * Time is before first frame . * / <nl> <nl> - int frameIndex ; <nl> if ( time > = self - > frames [ self - > framesLength - 1 ] ) / * Time is after last frame . * / <nl> frameIndex = self - > framesLength - 1 ; <nl> else <nl> frameIndex = binarySearch ( self - > frames , self - > framesLength , time , 1 ) - 1 ; <nl> <nl> - const char * attachmentName = self - > attachmentNames [ frameIndex ] ; <nl> + attachmentName = self - > attachmentNames [ frameIndex ] ; <nl> Slot_setAttachment ( skeleton - > slots [ self - > slotIndex ] , <nl> attachmentName ? Skeleton_getAttachmentForSlotIndex ( skeleton , self - > slotIndex , attachmentName ) : 0 ) ; <nl> } <nl> <nl> void _AttachmentTimeline_dispose ( Timeline * timeline ) { <nl> + AttachmentTimeline * self ; <nl> + int i ; <nl> + <nl> _Timeline_deinit ( timeline ) ; <nl> - AttachmentTimeline * self = ( AttachmentTimeline * ) timeline ; <nl> + self = ( AttachmentTimeline * ) timeline ; <nl> <nl> - int i ; <nl> for ( i = 0 ; i < self - > framesLength ; + + i ) <nl> FREE ( self - > attachmentNames [ i ] ) ; <nl> FREE ( self - > attachmentNames ) ; <nl> - <nl> + FREE ( self - > frames ) ; <nl> FREE ( self ) ; <nl> } <nl> <nl> void AttachmentTimeline_setFrame ( AttachmentTimeline * self , int frameIndex , floa <nl> self - > attachmentNames [ frameIndex ] = 0 ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / AnimationState . cpp <nl> ppp b / extensions / spine / AnimationState . cpp <nl> <nl> <nl> # include < spine / AnimationState . h > <nl> # include < spine / extension . h > <nl> + # include < stdio . h > <nl> <nl> namespace cocos2d { namespace extension { <nl> <nl> + typedef struct _Entry _Entry ; <nl> + struct _Entry { <nl> + Animation * animation ; <nl> + int / * bool * / loop ; <nl> + float delay ; <nl> + _Entry * next ; <nl> + } ; <nl> + <nl> typedef struct { <nl> AnimationState super ; <nl> Animation * previous ; <nl> typedef struct { <nl> int / * bool * / previousLoop ; <nl> float mixTime ; <nl> float mixDuration ; <nl> + _Entry * queue ; <nl> } _Internal ; <nl> <nl> AnimationState * AnimationState_create ( AnimationStateData * data ) { <nl> AnimationState * AnimationState_create ( AnimationStateData * data ) { <nl> return self ; <nl> } <nl> <nl> + void _AnimationState_clearQueue ( AnimationState * self ) { <nl> + _Internal * internal = SUB_CAST ( _Internal , self ) ; <nl> + _Entry * entry = internal - > queue ; <nl> + while ( entry ) { <nl> + _Entry * nextEntry = entry - > next ; <nl> + FREE ( entry ) ; <nl> + entry = nextEntry ; <nl> + } <nl> + internal - > queue = 0 ; <nl> + } <nl> + <nl> void AnimationState_dispose ( AnimationState * self ) { <nl> + _AnimationState_clearQueue ( self ) ; <nl> FREE ( self ) ; <nl> } <nl> <nl> - void AnimationState_update ( AnimationState * self , float delta ) { <nl> - self - > time + = delta ; <nl> - SUB_CAST ( _Internal , self ) - > previousTime + = delta ; <nl> - SUB_CAST ( _Internal , self ) - > mixTime + = delta ; <nl> - } <nl> + void AnimationState_addAnimation ( AnimationState * self , Animation * animation , int / * bool * / loop , float delay ) { <nl> + _Entry * existingEntry ; <nl> + Animation * previousAnimation ; <nl> <nl> - void AnimationState_apply ( AnimationState * self , Skeleton * skeleton ) { <nl> - if ( ! self - > animation ) return ; <nl> _Internal * internal = SUB_CAST ( _Internal , self ) ; <nl> - if ( internal - > previous ) { <nl> - Animation_apply ( internal - > previous , skeleton , internal - > previousTime , internal - > previousLoop ) ; <nl> - float alpha = internal - > mixTime / internal - > mixDuration ; <nl> - if ( alpha > = 1 ) { <nl> - alpha = 1 ; <nl> - internal - > previous = 0 ; <nl> - } <nl> - Animation_mix ( self - > animation , skeleton , self - > time , self - > loop , alpha ) ; <nl> - } else <nl> - Animation_apply ( self - > animation , skeleton , self - > time , self - > loop ) ; <nl> + _Entry * entry = NEW ( _Entry ) ; <nl> + entry - > animation = animation ; <nl> + entry - > loop = loop ; <nl> + <nl> + existingEntry = internal - > queue ; <nl> + if ( existingEntry ) { <nl> + while ( existingEntry - > next ) <nl> + existingEntry = existingEntry - > next ; <nl> + existingEntry - > next = entry ; <nl> + previousAnimation = existingEntry - > animation ; <nl> + } else { <nl> + internal - > queue = entry ; <nl> + previousAnimation = self - > animation ; <nl> + } <nl> + <nl> + if ( delay < = 0 ) { <nl> + if ( previousAnimation ) <nl> + delay = previousAnimation - > duration - AnimationStateData_getMix ( self - > data , previousAnimation , animation ) + delay ; <nl> + else <nl> + delay = 0 ; <nl> + } <nl> + entry - > delay = delay ; <nl> } <nl> <nl> - void AnimationState_setAnimationByName ( AnimationState * self , const char * animationName , int / * * / loop ) { <nl> - Animation * animation = SkeletonData_findAnimation ( self - > data - > skeletonData , animationName ) ; <nl> - AnimationState_setAnimation ( self , animation , loop ) ; <nl> + void AnimationState_addAnimationByName ( AnimationState * self , const char * animationName , int / * bool * / loop , float delay ) { <nl> + Animation * animation = animationName ? SkeletonData_findAnimation ( self - > data - > skeletonData , animationName ) : 0 ; <nl> + AnimationState_addAnimation ( self , animation , loop , delay ) ; <nl> } <nl> <nl> - void AnimationState_setAnimation ( AnimationState * self , Animation * newAnimation , int / * * / loop ) { <nl> + void _AnimationState_setAnimation ( AnimationState * self , Animation * newAnimation , int / * bool * / loop ) { <nl> _Internal * internal = SUB_CAST ( _Internal , self ) ; <nl> internal - > previous = 0 ; <nl> if ( newAnimation & & self - > animation & & self - > data ) { <nl> void AnimationState_setAnimation ( AnimationState * self , Animation * newAnimation , <nl> self - > time = 0 ; <nl> } <nl> <nl> + void AnimationState_setAnimation ( AnimationState * self , Animation * newAnimation , int / * bool * / loop ) { <nl> + _AnimationState_clearQueue ( self ) ; <nl> + _AnimationState_setAnimation ( self , newAnimation , loop ) ; <nl> + } <nl> + <nl> + void AnimationState_setAnimationByName ( AnimationState * self , const char * animationName , int / * bool * / loop ) { <nl> + Animation * animation = animationName ? SkeletonData_findAnimation ( self - > data - > skeletonData , animationName ) : 0 ; <nl> + AnimationState_setAnimation ( self , animation , loop ) ; <nl> + } <nl> + <nl> void AnimationState_clearAnimation ( AnimationState * self ) { <nl> - SUB_CAST ( _Internal , self ) - > previous = 0 ; <nl> + _Internal * internal = SUB_CAST ( _Internal , self ) ; <nl> + internal - > previous = 0 ; <nl> CONST_CAST ( Animation * , self - > animation ) = 0 ; <nl> + _AnimationState_clearQueue ( self ) ; <nl> + } <nl> + <nl> + void AnimationState_update ( AnimationState * self , float delta ) { <nl> + _Entry * next ; <nl> + _Internal * internal = SUB_CAST ( _Internal , self ) ; <nl> + <nl> + self - > time + = delta ; <nl> + internal - > previousTime + = delta ; <nl> + internal - > mixTime + = delta ; <nl> + <nl> + if ( internal - > queue & & self - > time > = internal - > queue - > delay ) { <nl> + _AnimationState_setAnimation ( self , internal - > queue - > animation , internal - > queue - > loop ) ; <nl> + next = internal - > queue - > next ; <nl> + FREE ( internal - > queue ) ; <nl> + internal - > queue = next ; <nl> + } <nl> + } <nl> + <nl> + void AnimationState_apply ( AnimationState * self , Skeleton * skeleton ) { <nl> + _Internal * internal ; <nl> + float alpha ; <nl> + <nl> + if ( ! self - > animation ) return ; <nl> + internal = SUB_CAST ( _Internal , self ) ; <nl> + if ( internal - > previous ) { <nl> + Animation_apply ( internal - > previous , skeleton , internal - > previousTime , internal - > previousLoop ) ; <nl> + alpha = internal - > mixTime / internal - > mixDuration ; <nl> + if ( alpha > = 1 ) { <nl> + alpha = 1 ; <nl> + internal - > previous = 0 ; <nl> + } <nl> + Animation_mix ( self - > animation , skeleton , self - > time , self - > loop , alpha ) ; <nl> + } else <nl> + Animation_apply ( self - > animation , skeleton , self - > time , self - > loop ) ; <nl> } <nl> <nl> int / * bool * / AnimationState_isComplete ( AnimationState * self ) { <nl> return ! self - > animation | | self - > time > = self - > animation - > duration ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / AnimationState . h <nl> ppp b / extensions / spine / AnimationState . h <nl> void AnimationState_update ( AnimationState * self , float delta ) ; <nl> void AnimationState_apply ( AnimationState * self , struct Skeleton * skeleton ) ; <nl> <nl> / * @ param animationName May be 0 . * / <nl> - void AnimationState_setAnimationByName ( AnimationState * self , const char * animationName , int / * * / loop ) ; <nl> + void AnimationState_setAnimationByName ( AnimationState * self , const char * animationName , int / * bool * / loop ) ; <nl> / * @ param animation May be 0 . * / <nl> - void AnimationState_setAnimation ( AnimationState * self , Animation * animation , int / * * / loop ) ; <nl> + void AnimationState_setAnimation ( AnimationState * self , Animation * animation , int / * bool * / loop ) ; <nl> + <nl> + / * * @ param animationName May be 0 . <nl> + * @ param delay May be < = 0 to use duration of previous animation minus any mix duration plus the negative delay . * / <nl> + void AnimationState_addAnimationByName ( AnimationState * self , const char * animationName , int / * bool * / loop , float delay ) ; <nl> + / * * @ param animation May be 0 . <nl> + * @ param delay May be < = 0 to use duration of previous animation minus any mix duration plus the negative delay . * / <nl> + void AnimationState_addAnimation ( AnimationState * self , Animation * animation , int / * bool * / loop , float delay ) ; <nl> + <nl> void AnimationState_clearAnimation ( AnimationState * self ) ; <nl> <nl> int / * bool * / AnimationState_isComplete ( AnimationState * self ) ; <nl> mmm a / extensions / spine / AnimationStateData . cpp <nl> ppp b / extensions / spine / AnimationStateData . cpp <nl> AnimationStateData * AnimationStateData_create ( SkeletonData * skeletonData ) { <nl> } <nl> <nl> void AnimationStateData_dispose ( AnimationStateData * self ) { <nl> + _ToEntry * toEntry ; <nl> + _ToEntry * nextToEntry ; <nl> + _FromEntry * nextFromEntry ; <nl> + <nl> _FromEntry * fromEntry = ( _FromEntry * ) self - > entries ; <nl> while ( fromEntry ) { <nl> - _ToEntry * toEntry = fromEntry - > toEntries ; <nl> + toEntry = fromEntry - > toEntries ; <nl> while ( toEntry ) { <nl> - _ToEntry * next = toEntry - > next ; <nl> + nextToEntry = toEntry - > next ; <nl> _ToEntry_dispose ( toEntry ) ; <nl> - toEntry = next ; <nl> + toEntry = nextToEntry ; <nl> } <nl> - _FromEntry * next = fromEntry - > next ; <nl> + nextFromEntry = fromEntry - > next ; <nl> _FromEntry_dispose ( fromEntry ) ; <nl> - fromEntry = next ; <nl> + fromEntry = nextFromEntry ; <nl> } <nl> <nl> FREE ( self ) ; <nl> } <nl> <nl> void AnimationStateData_setMixByName ( AnimationStateData * self , const char * fromName , const char * toName , float duration ) { <nl> + Animation * to ; <nl> Animation * from = SkeletonData_findAnimation ( self - > skeletonData , fromName ) ; <nl> if ( ! from ) return ; <nl> - Animation * to = SkeletonData_findAnimation ( self - > skeletonData , toName ) ; <nl> + to = SkeletonData_findAnimation ( self - > skeletonData , toName ) ; <nl> if ( ! to ) return ; <nl> AnimationStateData_setMix ( self , from , to , duration ) ; <nl> } <nl> <nl> void AnimationStateData_setMix ( AnimationStateData * self , Animation * from , Animation * to , float duration ) { <nl> / * Find existing FromEntry . * / <nl> + _ToEntry * toEntry ; <nl> _FromEntry * fromEntry = ( _FromEntry * ) self - > entries ; <nl> while ( fromEntry ) { <nl> if ( fromEntry - > animation = = from ) { <nl> / * Find existing ToEntry . * / <nl> - _ToEntry * toEntry = fromEntry - > toEntries ; <nl> + toEntry = fromEntry - > toEntries ; <nl> while ( toEntry ) { <nl> if ( toEntry - > animation = = to ) { <nl> toEntry - > duration = duration ; <nl> void AnimationStateData_setMix ( AnimationStateData * self , Animation * from , Anima <nl> if ( ! fromEntry ) { <nl> fromEntry = _FromEntry_create ( from ) ; <nl> fromEntry - > next = ( _FromEntry * ) self - > entries ; <nl> - CONST_CAST ( void * , self - > entries ) = fromEntry ; <nl> + CONST_CAST ( _FromEntry * , self - > entries ) = fromEntry ; <nl> } <nl> - _ToEntry * toEntry = _ToEntry_create ( to , duration ) ; <nl> + toEntry = _ToEntry_create ( to , duration ) ; <nl> toEntry - > next = fromEntry - > toEntries ; <nl> fromEntry - > toEntries = toEntry ; <nl> } <nl> float AnimationStateData_getMix ( AnimationStateData * self , Animation * from , Anim <nl> return 0 ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / Atlas . cpp <nl> ppp b / extensions / spine / Atlas . cpp <nl> AtlasPage * AtlasPage_create ( const char * name ) { <nl> } <nl> <nl> void AtlasPage_dispose ( AtlasPage * self ) { <nl> - FREE ( self - > name ) ; <nl> _AtlasPage_disposeTexture ( self ) ; <nl> + FREE ( self - > name ) ; <nl> + FREE ( self ) ; <nl> } <nl> <nl> / * * / <nl> void AtlasRegion_dispose ( AtlasRegion * self ) { <nl> <nl> / * * / <nl> <nl> - typedef struct _struct_Str { <nl> + typedef struct { <nl> const char * begin ; <nl> const char * end ; <nl> - <nl> - _struct_Str ( ) <nl> - : begin ( NULL ) <nl> - , end ( NULL ) { <nl> - } <nl> } Str ; <nl> <nl> static void trim ( Str * str ) { <nl> static int readValue ( const char * end , Str * str ) { <nl> <nl> / * Returns the number of tuple values read ( 2 , 4 , or 0 for failure ) . * / <nl> static int readTuple ( const char * end , Str tuple [ ] ) { <nl> + int i ; <nl> Str str ; <nl> readLine ( 0 , end , & str ) ; <nl> if ( ! beginPast ( & str , ' : ' ) ) return 0 ; <nl> - int i = 0 ; <nl> + <nl> for ( i = 0 ; i < 3 ; + + i ) { <nl> tuple [ i ] . begin = str . begin ; <nl> if ( ! beginPast ( & str , ' , ' ) ) { <nl> static const char * textureFilterNames [ ] = { " Nearest " , " Linear " , " MipMap " , " MipMa <nl> " MipMapNearestLinear " , " MipMapLinearLinear " } ; <nl> <nl> Atlas * Atlas_readAtlas ( const char * begin , int length , const char * dir ) { <nl> + int count ; <nl> const char * end = begin + length ; <nl> int dirLength = strlen ( dir ) ; <nl> int needsSlash = dirLength > 0 & & dir [ dirLength - 1 ] ! = ' / ' & & dir [ dirLength - 1 ] ! = ' \ \ ' ; <nl> Atlas * Atlas_readAtlas ( const char * begin , int length , const char * dir ) { <nl> <nl> region - > u = region - > x / ( float ) page - > width ; <nl> region - > v = region - > y / ( float ) page - > height ; <nl> - region - > u2 = ( region - > x + region - > width ) / ( float ) page - > width ; <nl> - region - > v2 = ( region - > y + region - > height ) / ( float ) page - > height ; <nl> + if ( region - > rotate ) { <nl> + region - > u2 = ( region - > x + region - > height ) / ( float ) page - > width ; <nl> + region - > v2 = ( region - > y + region - > width ) / ( float ) page - > height ; <nl> + } else { <nl> + region - > u2 = ( region - > x + region - > width ) / ( float ) page - > width ; <nl> + region - > v2 = ( region - > y + region - > height ) / ( float ) page - > height ; <nl> + } <nl> <nl> - int count ; <nl> if ( ! ( count = readTuple ( end , tuple ) ) ) return abortAtlas ( self ) ; <nl> if ( count = = 4 ) { / * split is optional * / <nl> region - > splits = MALLOC ( int , 4 ) ; <nl> Atlas * Atlas_readAtlas ( const char * begin , int length , const char * dir ) { <nl> region - > originalHeight = toInt ( tuple + 1 ) ; <nl> <nl> readTuple ( end , tuple ) ; <nl> - region - > offsetX = ( float ) toInt ( tuple ) ; <nl> - region - > offsetY = ( float ) toInt ( tuple + 1 ) ; <nl> + region - > offsetX = toInt ( tuple ) ; <nl> + region - > offsetY = toInt ( tuple + 1 ) ; <nl> <nl> if ( ! readValue ( end , & str ) ) return abortAtlas ( self ) ; <nl> region - > index = toInt ( & str ) ; <nl> Atlas * Atlas_readAtlas ( const char * begin , int length , const char * dir ) { <nl> } <nl> <nl> Atlas * Atlas_readAtlasFile ( const char * path ) { <nl> + int dirLength ; <nl> + char * dir ; <nl> + int length ; <nl> + const char * data ; <nl> + <nl> Atlas * atlas = 0 ; <nl> <nl> / * Get directory from atlas path . * / <nl> Atlas * Atlas_readAtlasFile ( const char * path ) { <nl> const char * lastBackwardSlash = strrchr ( path , ' \ \ ' ) ; <nl> const char * lastSlash = lastForwardSlash > lastBackwardSlash ? lastForwardSlash : lastBackwardSlash ; <nl> if ( lastSlash = = path ) lastSlash + + ; / * Never drop starting slash . * / <nl> - int dirLength = lastSlash ? lastSlash - path : 0 ; <nl> - char * dir = MALLOC ( char , dirLength + 1 ) ; <nl> + dirLength = lastSlash ? lastSlash - path : 0 ; <nl> + dir = MALLOC ( char , dirLength + 1 ) ; <nl> memcpy ( dir , path , dirLength ) ; <nl> dir [ dirLength ] = ' \ 0 ' ; <nl> <nl> - int length ; <nl> - const char * data = _Util_readFile ( path , & length ) ; <nl> + data = _Util_readFile ( path , & length ) ; <nl> if ( data ) atlas = Atlas_readAtlas ( data , length , dir ) ; <nl> <nl> FREE ( data ) ; <nl> Atlas * Atlas_readAtlasFile ( const char * path ) { <nl> } <nl> <nl> void Atlas_dispose ( Atlas * self ) { <nl> + AtlasRegion * region , * nextRegion ; <nl> AtlasPage * page = self - > pages ; <nl> while ( page ) { <nl> AtlasPage * nextPage = page - > next ; <nl> void Atlas_dispose ( Atlas * self ) { <nl> page = nextPage ; <nl> } <nl> <nl> - AtlasRegion * region = self - > regions ; <nl> + region = self - > regions ; <nl> while ( region ) { <nl> - AtlasRegion * nextRegion = region - > next ; <nl> + nextRegion = region - > next ; <nl> AtlasRegion_dispose ( region ) ; <nl> region = nextRegion ; <nl> } <nl> AtlasRegion * Atlas_findRegion ( const Atlas * self , const char * name ) { <nl> return 0 ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / Atlas . h <nl> ppp b / extensions / spine / Atlas . h <nl> struct AtlasPage { <nl> AtlasFilter minFilter , magFilter ; <nl> AtlasWrap uWrap , vWrap ; <nl> <nl> - void * texture ; <nl> + void * rendererObject ; <nl> int width , height ; <nl> <nl> AtlasPage * next ; <nl> struct AtlasRegion { <nl> const char * name ; <nl> int x , y , width , height ; <nl> float u , v , u2 , v2 ; <nl> - float offsetX , offsetY ; <nl> + int offsetX , offsetY ; <nl> int originalWidth , originalHeight ; <nl> int index ; <nl> int / * bool * / rotate ; <nl> mmm a / extensions / spine / AtlasAttachmentLoader . cpp <nl> ppp b / extensions / spine / AtlasAttachmentLoader . cpp <nl> Attachment * _AtlasAttachmentLoader_newAttachment ( AttachmentLoader * loader , Skin <nl> AtlasAttachmentLoader * self = SUB_CAST ( AtlasAttachmentLoader , loader ) ; <nl> switch ( type ) { <nl> case ATTACHMENT_REGION : { <nl> + RegionAttachment * attachment ; <nl> AtlasRegion * region = Atlas_findRegion ( self - > atlas , name ) ; <nl> if ( ! region ) { <nl> _AttachmentLoader_setError ( loader , " Region not found : " , name ) ; <nl> return 0 ; <nl> } <nl> - RegionAttachment * attachment = RegionAttachment_create ( name ) ; <nl> - attachment - > region = region ; <nl> + attachment = RegionAttachment_create ( name ) ; <nl> + attachment - > rendererObject = region ; <nl> + RegionAttachment_setUVs ( attachment , region - > u , region - > v , region - > u2 , region - > v2 , region - > rotate ) ; <nl> + attachment - > regionOffsetX = region - > offsetX ; <nl> + attachment - > regionOffsetY = region - > offsetY ; <nl> + attachment - > regionWidth = region - > width ; <nl> + attachment - > regionHeight = region - > height ; <nl> + attachment - > regionOriginalWidth = region - > originalWidth ; <nl> + attachment - > regionOriginalHeight = region - > originalHeight ; <nl> return SUPER ( attachment ) ; <nl> } <nl> default : <nl> AtlasAttachmentLoader * AtlasAttachmentLoader_create ( Atlas * atlas ) { <nl> return self ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / Attachment . cpp <nl> ppp b / extensions / spine / Attachment . cpp <nl> typedef struct _AttachmentVtable { <nl> void ( * dispose ) ( Attachment * self ) ; <nl> } _AttachmentVtable ; <nl> <nl> - void _Attachment_init ( Attachment * self , const char * name , AttachmentType type , / / <nl> + void _Attachment_init ( Attachment * self , const char * name , AttachmentType type , / * * / <nl> void ( * dispose ) ( Attachment * self ) ) { <nl> <nl> - CONST_CAST ( void * , self - > vtable ) = NEW ( _AttachmentVtable ) ; <nl> + CONST_CAST ( _AttachmentVtable * , self - > vtable ) = NEW ( _AttachmentVtable ) ; <nl> VTABLE ( Attachment , self ) - > dispose = dispose ; <nl> <nl> MALLOC_STR ( self - > name , name ) ; <nl> void _Attachment_deinit ( Attachment * self ) { <nl> <nl> void Attachment_dispose ( Attachment * self ) { <nl> VTABLE ( Attachment , self ) - > dispose ( self ) ; <nl> + FREE ( self ) ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / AttachmentLoader . cpp <nl> ppp b / extensions / spine / AttachmentLoader . cpp <nl> typedef struct _AttachmentLoaderVtable { <nl> void ( * dispose ) ( AttachmentLoader * self ) ; <nl> } _AttachmentLoaderVtable ; <nl> <nl> - void _AttachmentLoader_init ( AttachmentLoader * self , / / <nl> - void ( * dispose ) ( AttachmentLoader * self ) , / / <nl> + void _AttachmentLoader_init ( AttachmentLoader * self , / * * / <nl> + void ( * dispose ) ( AttachmentLoader * self ) , / * * / <nl> Attachment * ( * newAttachment ) ( AttachmentLoader * self , Skin * skin , AttachmentType type , const char * name ) ) { <nl> - CONST_CAST ( void * , self - > vtable ) = NEW ( _AttachmentLoaderVtable ) ; <nl> + CONST_CAST ( _AttachmentLoaderVtable * , self - > vtable ) = NEW ( _AttachmentLoaderVtable ) ; <nl> VTABLE ( AttachmentLoader , self ) - > dispose = dispose ; <nl> VTABLE ( AttachmentLoader , self ) - > newAttachment = newAttachment ; <nl> } <nl> void _AttachmentLoader_deinit ( AttachmentLoader * self ) { <nl> <nl> void AttachmentLoader_dispose ( AttachmentLoader * self ) { <nl> VTABLE ( AttachmentLoader , self ) - > dispose ( self ) ; <nl> + FREE ( self ) ; <nl> } <nl> <nl> Attachment * AttachmentLoader_newAttachment ( AttachmentLoader * self , Skin * skin , AttachmentType type , const char * name ) { <nl> void _AttachmentLoader_setUnknownTypeError ( AttachmentLoader * self , AttachmentTy <nl> _AttachmentLoader_setError ( self , " Unknown attachment type : " , buffer ) ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / AttachmentLoader . h <nl> ppp b / extensions / spine / AttachmentLoader . h <nl> struct AttachmentLoader { <nl> const char * error2 ; <nl> <nl> const void * const vtable ; <nl> + # ifdef __cplusplus <nl> + AttachmentLoader ( ) : error1 ( 0 ) , error2 ( 0 ) , vtable ( 0 ) { } <nl> + # endif <nl> } ; <nl> <nl> void AttachmentLoader_dispose ( AttachmentLoader * self ) ; <nl> mmm a / extensions / spine / Bone . cpp <nl> ppp b / extensions / spine / Bone . cpp <nl> Bone * Bone_create ( BoneData * data , Bone * parent ) { <nl> Bone * self = NEW ( Bone ) ; <nl> CONST_CAST ( BoneData * , self - > data ) = data ; <nl> CONST_CAST ( Bone * , self - > parent ) = parent ; <nl> - Bone_setToBindPose ( self ) ; <nl> + Bone_setToSetupPose ( self ) ; <nl> return self ; <nl> } <nl> <nl> void Bone_dispose ( Bone * self ) { <nl> FREE ( self ) ; <nl> } <nl> <nl> - void Bone_setToBindPose ( Bone * self ) { <nl> + void Bone_setToSetupPose ( Bone * self ) { <nl> self - > x = self - > data - > x ; <nl> self - > y = self - > data - > y ; <nl> self - > rotation = self - > data - > rotation ; <nl> void Bone_setToBindPose ( Bone * self ) { <nl> } <nl> <nl> void Bone_updateWorldTransform ( Bone * self , int flipX , int flipY ) { <nl> + float radians , cosine , sine ; <nl> if ( self - > parent ) { <nl> CONST_CAST ( float , self - > worldX ) = self - > x * self - > parent - > m00 + self - > y * self - > parent - > m01 + self - > parent - > worldX ; <nl> CONST_CAST ( float , self - > worldY ) = self - > x * self - > parent - > m10 + self - > y * self - > parent - > m11 + self - > parent - > worldY ; <nl> void Bone_updateWorldTransform ( Bone * self , int flipX , int flipY ) { <nl> CONST_CAST ( float , self - > worldScaleY ) = self - > parent - > worldScaleY * self - > scaleY ; <nl> CONST_CAST ( float , self - > worldRotation ) = self - > parent - > worldRotation + self - > rotation ; <nl> } else { <nl> - CONST_CAST ( float , self - > worldX ) = self - > x ; <nl> - CONST_CAST ( float , self - > worldY ) = self - > y ; <nl> + CONST_CAST ( float , self - > worldX ) = flipX ? - self - > x : self - > x ; <nl> + CONST_CAST ( float , self - > worldY ) = flipX ? - self - > y : self - > y ; <nl> CONST_CAST ( float , self - > worldScaleX ) = self - > scaleX ; <nl> CONST_CAST ( float , self - > worldScaleY ) = self - > scaleY ; <nl> CONST_CAST ( float , self - > worldRotation ) = self - > rotation ; <nl> } <nl> - float radians = ( float ) ( self - > worldRotation * 3 . 1415926535897932385 / 180 ) ; <nl> - float cosine = cosf ( radians ) ; <nl> - float sine = sinf ( radians ) ; <nl> + radians = ( float ) ( self - > worldRotation * 3 . 1415926535897932385 / 180 ) ; <nl> + # ifdef __STDC_VERSION__ <nl> + cosine = cosf ( radians ) ; <nl> + sine = sinf ( radians ) ; <nl> + # else <nl> + cosine = ( float ) cos ( radians ) ; <nl> + sine = ( float ) sin ( radians ) ; <nl> + # endif <nl> CONST_CAST ( float , self - > m00 ) = cosine * self - > worldScaleX ; <nl> CONST_CAST ( float , self - > m10 ) = sine * self - > worldScaleX ; <nl> CONST_CAST ( float , self - > m01 ) = - sine * self - > worldScaleY ; <nl> void Bone_updateWorldTransform ( Bone * self , int flipX , int flipY ) { <nl> } <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / Bone . h <nl> ppp b / extensions / spine / Bone . h <nl> void Bone_setYDown ( int / * bool * / yDown ) ; <nl> Bone * Bone_create ( BoneData * data , Bone * parent ) ; <nl> void Bone_dispose ( Bone * self ) ; <nl> <nl> - void Bone_setToBindPose ( Bone * self ) ; <nl> + void Bone_setToSetupPose ( Bone * self ) ; <nl> <nl> void Bone_updateWorldTransform ( Bone * self , int / * bool * / flipX , int / * bool * / flipY ) ; <nl> <nl> mmm a / extensions / spine / BoneData . cpp <nl> ppp b / extensions / spine / BoneData . cpp <nl> void BoneData_dispose ( BoneData * self ) { <nl> FREE ( self ) ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000000 . . 844ee494543c <nl> mmm / dev / null <nl> ppp b / extensions / spine / CCSkeleton . cpp <nl> <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Copyright ( c ) 2013 , Esoteric Software <nl> + * All rights reserved . <nl> + * <nl> + * Redistribution and use in source and binary forms , with or without <nl> + * modification , are permitted provided that the following conditions are met : <nl> + * <nl> + * 1 . Redistributions of source code must retain the above copyright notice , this <nl> + * list of conditions and the following disclaimer . <nl> + * 2 . Redistributions in binary form must reproduce the above copyright notice , <nl> + * this list of conditions and the following disclaimer in the documentation <nl> + * and / or other materials provided with the distribution . <nl> + * <nl> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS " AND <nl> + * ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE IMPLIED <nl> + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE <nl> + * DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR <nl> + * ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES <nl> + * ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; <nl> + * LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND <nl> + * ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + * ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE OF THIS <nl> + * SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + # include < spine / CCSkeleton . h > <nl> + # include < spine / spine - cocos2dx . h > <nl> + <nl> + USING_NS_CC ; <nl> + using std : : min ; <nl> + using std : : max ; <nl> + <nl> + namespace cocos2d { namespace extension { <nl> + <nl> + CCSkeleton * CCSkeleton : : createWithData ( SkeletonData * skeletonData , bool ownsSkeletonData ) { <nl> + CCSkeleton * node = new CCSkeleton ( skeletonData , ownsSkeletonData ) ; <nl> + node - > autorelease ( ) ; <nl> + return node ; <nl> + } <nl> + <nl> + CCSkeleton * CCSkeleton : : createWithFile ( const char * skeletonDataFile , Atlas * atlas , float scale ) { <nl> + CCSkeleton * node = new CCSkeleton ( skeletonDataFile , atlas , scale ) ; <nl> + node - > autorelease ( ) ; <nl> + return node ; <nl> + } <nl> + <nl> + CCSkeleton * CCSkeleton : : createWithFile ( const char * skeletonDataFile , const char * atlasFile , float scale ) { <nl> + CCSkeleton * node = new CCSkeleton ( skeletonDataFile , atlasFile , scale ) ; <nl> + node - > autorelease ( ) ; <nl> + return node ; <nl> + } <nl> + <nl> + void CCSkeleton : : initialize ( ) { <nl> + atlas = 0 ; <nl> + debugSlots = false ; <nl> + debugBones = false ; <nl> + timeScale = 1 ; <nl> + <nl> + blendFunc . src = GL_ONE ; <nl> + blendFunc . dst = GL_ONE_MINUS_SRC_ALPHA ; <nl> + setOpacityModifyRGB ( true ) ; <nl> + <nl> + setShaderProgram ( CCShaderCache : : sharedShaderCache ( ) - > programForKey ( kCCShader_PositionTextureColor ) ) ; <nl> + scheduleUpdate ( ) ; <nl> + } <nl> + <nl> + void CCSkeleton : : setSkeletonData ( SkeletonData * skeletonData , bool ownsSkeletonData ) { <nl> + skeleton = Skeleton_create ( skeletonData ) ; <nl> + rootBone = skeleton - > bones [ 0 ] ; <nl> + this - > ownsSkeletonData = ownsSkeletonData ; <nl> + } <nl> + <nl> + CCSkeleton : : CCSkeleton ( ) { <nl> + initialize ( ) ; <nl> + } <nl> + <nl> + CCSkeleton : : CCSkeleton ( SkeletonData * skeletonData , bool ownsSkeletonData ) { <nl> + initialize ( ) ; <nl> + <nl> + setSkeletonData ( skeletonData , ownsSkeletonData ) ; <nl> + } <nl> + <nl> + CCSkeleton : : CCSkeleton ( const char * skeletonDataFile , Atlas * atlas , float scale ) { <nl> + initialize ( ) ; <nl> + <nl> + SkeletonJson * json = SkeletonJson_create ( atlas ) ; <nl> + json - > scale = scale ; <nl> + SkeletonData * skeletonData = SkeletonJson_readSkeletonDataFile ( json , skeletonDataFile ) ; <nl> + CCAssert ( skeletonData , json - > error ? json - > error : " Error reading skeleton data . " ) ; <nl> + SkeletonJson_dispose ( json ) ; <nl> + <nl> + setSkeletonData ( skeletonData , true ) ; <nl> + } <nl> + <nl> + CCSkeleton : : CCSkeleton ( const char * skeletonDataFile , const char * atlasFile , float scale ) { <nl> + initialize ( ) ; <nl> + <nl> + atlas = Atlas_readAtlasFile ( atlasFile ) ; <nl> + CCAssert ( atlas , " Error reading atlas file . " ) ; <nl> + <nl> + SkeletonJson * json = SkeletonJson_create ( atlas ) ; <nl> + json - > scale = scale ; <nl> + SkeletonData * skeletonData = SkeletonJson_readSkeletonDataFile ( json , skeletonDataFile ) ; <nl> + CCAssert ( skeletonData , json - > error ? json - > error : " Error reading skeleton data file . " ) ; <nl> + SkeletonJson_dispose ( json ) ; <nl> + <nl> + setSkeletonData ( skeletonData , true ) ; <nl> + } <nl> + <nl> + CCSkeleton : : ~ CCSkeleton ( ) { <nl> + if ( ownsSkeletonData ) SkeletonData_dispose ( skeleton - > data ) ; <nl> + if ( atlas ) Atlas_dispose ( atlas ) ; <nl> + Skeleton_dispose ( skeleton ) ; <nl> + } <nl> + <nl> + void CCSkeleton : : update ( float deltaTime ) { <nl> + Skeleton_update ( skeleton , deltaTime * timeScale ) ; <nl> + } <nl> + <nl> + void CCSkeleton : : draw ( ) { <nl> + CC_NODE_DRAW_SETUP ( ) ; <nl> + <nl> + ccGLBlendFunc ( blendFunc . src , blendFunc . dst ) ; <nl> + ccColor3B color = getColor ( ) ; <nl> + skeleton - > r = color . r / ( float ) 255 ; <nl> + skeleton - > g = color . g / ( float ) 255 ; <nl> + skeleton - > b = color . b / ( float ) 255 ; <nl> + skeleton - > a = getOpacity ( ) / ( float ) 255 ; <nl> + if ( premultipliedAlpha ) { <nl> + skeleton - > r * = skeleton - > a ; <nl> + skeleton - > g * = skeleton - > a ; <nl> + skeleton - > b * = skeleton - > a ; <nl> + } <nl> + <nl> + CCTextureAtlas * textureAtlas = 0 ; <nl> + ccV3F_C4B_T2F_Quad quad ; <nl> + quad . tl . vertices . z = 0 ; <nl> + quad . tr . vertices . z = 0 ; <nl> + quad . bl . vertices . z = 0 ; <nl> + quad . br . vertices . z = 0 ; <nl> + for ( int i = 0 , n = skeleton - > slotCount ; i < n ; i + + ) { <nl> + Slot * slot = skeleton - > slots [ i ] ; <nl> + if ( ! slot - > attachment | | slot - > attachment - > type ! = ATTACHMENT_REGION ) continue ; <nl> + RegionAttachment * attachment = ( RegionAttachment * ) slot - > attachment ; <nl> + CCTextureAtlas * regionTextureAtlas = getTextureAtlas ( attachment ) ; <nl> + if ( regionTextureAtlas ! = textureAtlas ) { <nl> + if ( textureAtlas ) { <nl> + textureAtlas - > drawQuads ( ) ; <nl> + textureAtlas - > removeAllQuads ( ) ; <nl> + } <nl> + } <nl> + textureAtlas = regionTextureAtlas ; <nl> + if ( textureAtlas - > getCapacity ( ) = = textureAtlas - > getTotalQuads ( ) & & <nl> + ! textureAtlas - > resizeCapacity ( textureAtlas - > getCapacity ( ) * 2 ) ) return ; <nl> + RegionAttachment_updateQuad ( attachment , slot , & quad , premultipliedAlpha ) ; <nl> + textureAtlas - > updateQuad ( & quad , textureAtlas - > getTotalQuads ( ) ) ; <nl> + } <nl> + if ( textureAtlas ) { <nl> + textureAtlas - > drawQuads ( ) ; <nl> + textureAtlas - > removeAllQuads ( ) ; <nl> + } <nl> + <nl> + if ( debugSlots ) { <nl> + / / Slots . <nl> + ccDrawColor4B ( 0 , 0 , 255 , 255 ) ; <nl> + glLineWidth ( 1 ) ; <nl> + CCPoint points [ 4 ] ; <nl> + ccV3F_C4B_T2F_Quad quad ; <nl> + for ( int i = 0 , n = skeleton - > slotCount ; i < n ; i + + ) { <nl> + Slot * slot = skeleton - > slots [ i ] ; <nl> + if ( ! slot - > attachment | | slot - > attachment - > type ! = ATTACHMENT_REGION ) continue ; <nl> + RegionAttachment * attachment = ( RegionAttachment * ) slot - > attachment ; <nl> + RegionAttachment_updateQuad ( attachment , slot , & quad ) ; <nl> + points [ 0 ] = ccp ( quad . bl . vertices . x , quad . bl . vertices . y ) ; <nl> + points [ 1 ] = ccp ( quad . br . vertices . x , quad . br . vertices . y ) ; <nl> + points [ 2 ] = ccp ( quad . tr . vertices . x , quad . tr . vertices . y ) ; <nl> + points [ 3 ] = ccp ( quad . tl . vertices . x , quad . tl . vertices . y ) ; <nl> + ccDrawPoly ( points , 4 , true ) ; <nl> + } <nl> + } <nl> + if ( debugBones ) { <nl> + / / Bone lengths . <nl> + glLineWidth ( 2 ) ; <nl> + ccDrawColor4B ( 255 , 0 , 0 , 255 ) ; <nl> + for ( int i = 0 , n = skeleton - > boneCount ; i < n ; i + + ) { <nl> + Bone * bone = skeleton - > bones [ i ] ; <nl> + float x = bone - > data - > length * bone - > m00 + bone - > worldX ; <nl> + float y = bone - > data - > length * bone - > m10 + bone - > worldY ; <nl> + ccDrawLine ( ccp ( bone - > worldX , bone - > worldY ) , ccp ( x , y ) ) ; <nl> + } <nl> + / / Bone origins . <nl> + ccPointSize ( 4 ) ; <nl> + ccDrawColor4B ( 0 , 0 , 255 , 255 ) ; / / Root bone is blue . <nl> + for ( int i = 0 , n = skeleton - > boneCount ; i < n ; i + + ) { <nl> + Bone * bone = skeleton - > bones [ i ] ; <nl> + ccDrawPoint ( ccp ( bone - > worldX , bone - > worldY ) ) ; <nl> + if ( i = = 0 ) ccDrawColor4B ( 0 , 255 , 0 , 255 ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + CCTextureAtlas * CCSkeleton : : getTextureAtlas ( RegionAttachment * regionAttachment ) const { <nl> + return ( CCTextureAtlas * ) ( ( AtlasRegion * ) regionAttachment - > rendererObject ) - > page - > rendererObject ; <nl> + } <nl> + <nl> + CCRect CCSkeleton : : boundingBox ( ) { <nl> + float minX = FLT_MAX , minY = FLT_MAX , maxX = FLT_MIN , maxY = FLT_MIN ; <nl> + float scaleX = getScaleX ( ) ; <nl> + float scaleY = getScaleY ( ) ; <nl> + float vertices [ 8 ] ; <nl> + for ( int i = 0 ; i < skeleton - > slotCount ; + + i ) { <nl> + Slot * slot = skeleton - > slots [ i ] ; <nl> + if ( ! slot - > attachment | | slot - > attachment - > type ! = ATTACHMENT_REGION ) continue ; <nl> + RegionAttachment * attachment = ( RegionAttachment * ) slot - > attachment ; <nl> + RegionAttachment_computeVertices ( attachment , slot - > skeleton - > x , slot - > skeleton - > y , slot - > bone , vertices ) ; <nl> + minX = min ( minX , vertices [ VERTEX_X1 ] * scaleX ) ; <nl> + minY = min ( minY , vertices [ VERTEX_Y1 ] * scaleY ) ; <nl> + maxX = max ( maxX , vertices [ VERTEX_X1 ] * scaleX ) ; <nl> + maxY = max ( maxY , vertices [ VERTEX_Y1 ] * scaleY ) ; <nl> + minX = min ( minX , vertices [ VERTEX_X4 ] * scaleX ) ; <nl> + minY = min ( minY , vertices [ VERTEX_Y4 ] * scaleY ) ; <nl> + maxX = max ( maxX , vertices [ VERTEX_X4 ] * scaleX ) ; <nl> + maxY = max ( maxY , vertices [ VERTEX_Y4 ] * scaleY ) ; <nl> + minX = min ( minX , vertices [ VERTEX_X2 ] * scaleX ) ; <nl> + minY = min ( minY , vertices [ VERTEX_Y2 ] * scaleY ) ; <nl> + maxX = max ( maxX , vertices [ VERTEX_X2 ] * scaleX ) ; <nl> + maxY = max ( maxY , vertices [ VERTEX_Y2 ] * scaleY ) ; <nl> + minX = min ( minX , vertices [ VERTEX_X3 ] * scaleX ) ; <nl> + minY = min ( minY , vertices [ VERTEX_Y3 ] * scaleY ) ; <nl> + maxX = max ( maxX , vertices [ VERTEX_X3 ] * scaleX ) ; <nl> + maxY = max ( maxY , vertices [ VERTEX_Y3 ] * scaleY ) ; <nl> + } <nl> + CCPoint position = getPosition ( ) ; <nl> + return CCRectMake ( position . x + minX , position . y + minY , maxX - minX , maxY - minY ) ; <nl> + } <nl> + <nl> + / / mmm Convenience methods for Skeleton_ * functions . <nl> + <nl> + void CCSkeleton : : updateWorldTransform ( ) { <nl> + Skeleton_updateWorldTransform ( skeleton ) ; <nl> + } <nl> + <nl> + void CCSkeleton : : setToSetupPose ( ) { <nl> + Skeleton_setToSetupPose ( skeleton ) ; <nl> + } <nl> + void CCSkeleton : : setBonesToSetupPose ( ) { <nl> + Skeleton_setBonesToSetupPose ( skeleton ) ; <nl> + } <nl> + void CCSkeleton : : setSlotsToSetupPose ( ) { <nl> + Skeleton_setSlotsToSetupPose ( skeleton ) ; <nl> + } <nl> + <nl> + Bone * CCSkeleton : : findBone ( const char * boneName ) const { <nl> + return Skeleton_findBone ( skeleton , boneName ) ; <nl> + } <nl> + <nl> + Slot * CCSkeleton : : findSlot ( const char * slotName ) const { <nl> + return Skeleton_findSlot ( skeleton , slotName ) ; <nl> + } <nl> + <nl> + bool CCSkeleton : : setSkin ( const char * skinName ) { <nl> + return Skeleton_setSkinByName ( skeleton , skinName ) ? true : false ; <nl> + } <nl> + <nl> + Attachment * CCSkeleton : : getAttachment ( const char * slotName , const char * attachmentName ) const { <nl> + return Skeleton_getAttachmentForSlotName ( skeleton , slotName , attachmentName ) ; <nl> + } <nl> + bool CCSkeleton : : setAttachment ( const char * slotName , const char * attachmentName ) { <nl> + return Skeleton_setAttachment ( skeleton , slotName , attachmentName ) ? true : false ; <nl> + } <nl> + <nl> + / / mmm CCBlendProtocol <nl> + <nl> + ccBlendFunc CCSkeleton : : getBlendFunc ( ) { <nl> + return blendFunc ; <nl> + } <nl> + <nl> + void CCSkeleton : : setBlendFunc ( ccBlendFunc blendFunc ) { <nl> + this - > blendFunc = blendFunc ; <nl> + } <nl> + <nl> + void CCSkeleton : : setOpacityModifyRGB ( bool value ) { <nl> + premultipliedAlpha = value ; <nl> + } <nl> + <nl> + bool CCSkeleton : : isOpacityModifyRGB ( ) { <nl> + return premultipliedAlpha ; <nl> + } <nl> + <nl> + } } / / namespace cocos2d { namespace extension { <nl> new file mode 100644 <nl> index 000000000000 . . 12bc66664fcb <nl> mmm / dev / null <nl> ppp b / extensions / spine / CCSkeleton . h <nl> <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Copyright ( c ) 2013 , Esoteric Software <nl> + * All rights reserved . <nl> + * <nl> + * Redistribution and use in source and binary forms , with or without <nl> + * modification , are permitted provided that the following conditions are met : <nl> + * <nl> + * 1 . Redistributions of source code must retain the above copyright notice , this <nl> + * list of conditions and the following disclaimer . <nl> + * 2 . Redistributions in binary form must reproduce the above copyright notice , <nl> + * this list of conditions and the following disclaimer in the documentation <nl> + * and / or other materials provided with the distribution . <nl> + * <nl> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS " AND <nl> + * ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE IMPLIED <nl> + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE <nl> + * DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR <nl> + * ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES <nl> + * ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; <nl> + * LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND <nl> + * ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + * ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE OF THIS <nl> + * SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + # ifndef SPINE_CCSKELETON_H_ <nl> + # define SPINE_CCSKELETON_H_ <nl> + <nl> + # include < spine / spine . h > <nl> + # include " cocos2d . h " <nl> + <nl> + namespace cocos2d { namespace extension { <nl> + <nl> + / * * <nl> + Draws a skeleton . <nl> + * / <nl> + class CCSkeleton : public cocos2d : : CCNodeRGBA , public cocos2d : : CCBlendProtocol { <nl> + public : <nl> + Skeleton * skeleton ; <nl> + Bone * rootBone ; <nl> + float timeScale ; <nl> + bool debugSlots ; <nl> + bool debugBones ; <nl> + bool premultipliedAlpha ; <nl> + <nl> + static CCSkeleton * createWithData ( SkeletonData * skeletonData , bool ownsSkeletonData = false ) ; <nl> + static CCSkeleton * createWithFile ( const char * skeletonDataFile , Atlas * atlas , float scale = 1 ) ; <nl> + static CCSkeleton * createWithFile ( const char * skeletonDataFile , const char * atlasFile , float scale = 1 ) ; <nl> + <nl> + CCSkeleton ( SkeletonData * skeletonData , bool ownsSkeletonData = false ) ; <nl> + CCSkeleton ( const char * skeletonDataFile , Atlas * atlas , float scale = 1 ) ; <nl> + CCSkeleton ( const char * skeletonDataFile , const char * atlasFile , float scale = 1 ) ; <nl> + <nl> + virtual ~ CCSkeleton ( ) ; <nl> + <nl> + virtual void update ( float deltaTime ) ; <nl> + virtual void draw ( ) ; <nl> + virtual cocos2d : : CCRect boundingBox ( ) ; <nl> + <nl> + / / mmm Convenience methods for common Skeleton_ * functions . <nl> + void updateWorldTransform ( ) ; <nl> + <nl> + void setToSetupPose ( ) ; <nl> + void setBonesToSetupPose ( ) ; <nl> + void setSlotsToSetupPose ( ) ; <nl> + <nl> + / * Returns 0 if the bone was not found . * / <nl> + Bone * findBone ( const char * boneName ) const ; <nl> + / * Returns 0 if the slot was not found . * / <nl> + Slot * findSlot ( const char * slotName ) const ; <nl> + <nl> + / * Sets the skin used to look up attachments not found in the SkeletonData defaultSkin . Attachments from the new skin are <nl> + * attached if the corresponding attachment from the old skin was attached . Returns false if the skin was not found . <nl> + * @ param skin May be 0 . * / <nl> + bool setSkin ( const char * skinName ) ; <nl> + <nl> + / * Returns 0 if the slot or attachment was not found . * / <nl> + Attachment * getAttachment ( const char * slotName , const char * attachmentName ) const ; <nl> + / * Returns false if the slot or attachment was not found . * / <nl> + bool setAttachment ( const char * slotName , const char * attachmentName ) ; <nl> + <nl> + / / mmm CCBlendProtocol <nl> + CC_PROPERTY ( cocos2d : : ccBlendFunc , blendFunc , BlendFunc ) ; <nl> + virtual void setOpacityModifyRGB ( bool value ) ; <nl> + virtual bool isOpacityModifyRGB ( ) ; <nl> + <nl> + protected : <nl> + CCSkeleton ( ) ; <nl> + void setSkeletonData ( SkeletonData * skeletonData , bool ownsSkeletonData ) ; <nl> + cocos2d : : CCTextureAtlas * getTextureAtlas ( RegionAttachment * regionAttachment ) const ; <nl> + <nl> + private : <nl> + bool ownsSkeletonData ; <nl> + Atlas * atlas ; <nl> + void initialize ( ) ; <nl> + } ; <nl> + <nl> + } } / / namespace cocos2d { namespace extension { <nl> + <nl> + # endif / * SPINE_CCSKELETON_H_ * / <nl> new file mode 100644 <nl> index 000000000000 . . 28f87e0f7489 <nl> mmm / dev / null <nl> ppp b / extensions / spine / CCSkeletonAnimation . cpp <nl> <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Copyright ( c ) 2013 , Esoteric Software <nl> + * All rights reserved . <nl> + * <nl> + * Redistribution and use in source and binary forms , with or without <nl> + * modification , are permitted provided that the following conditions are met : <nl> + * <nl> + * 1 . Redistributions of source code must retain the above copyright notice , this <nl> + * list of conditions and the following disclaimer . <nl> + * 2 . Redistributions in binary form must reproduce the above copyright notice , <nl> + * this list of conditions and the following disclaimer in the documentation <nl> + * and / or other materials provided with the distribution . <nl> + * <nl> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS " AND <nl> + * ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE IMPLIED <nl> + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE <nl> + * DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR <nl> + * ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES <nl> + * ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; <nl> + * LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND <nl> + * ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + * ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE OF THIS <nl> + * SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + # include < spine / CCSkeletonAnimation . h > <nl> + # include < spine / extension . h > <nl> + # include < spine / spine - cocos2dx . h > <nl> + <nl> + USING_NS_CC ; <nl> + using std : : min ; <nl> + using std : : max ; <nl> + using std : : vector ; <nl> + <nl> + namespace cocos2d { namespace extension { <nl> + <nl> + CCSkeletonAnimation * CCSkeletonAnimation : : createWithData ( SkeletonData * skeletonData ) { <nl> + CCSkeletonAnimation * node = new CCSkeletonAnimation ( skeletonData ) ; <nl> + node - > autorelease ( ) ; <nl> + return node ; <nl> + } <nl> + <nl> + CCSkeletonAnimation * CCSkeletonAnimation : : createWithFile ( const char * skeletonDataFile , Atlas * atlas , float scale ) { <nl> + CCSkeletonAnimation * node = new CCSkeletonAnimation ( skeletonDataFile , atlas , scale ) ; <nl> + node - > autorelease ( ) ; <nl> + return node ; <nl> + } <nl> + <nl> + CCSkeletonAnimation * CCSkeletonAnimation : : createWithFile ( const char * skeletonDataFile , const char * atlasFile , float scale ) { <nl> + CCSkeletonAnimation * node = new CCSkeletonAnimation ( skeletonDataFile , atlasFile , scale ) ; <nl> + node - > autorelease ( ) ; <nl> + return node ; <nl> + } <nl> + <nl> + CCSkeletonAnimation : : CCSkeletonAnimation ( SkeletonData * skeletonData ) <nl> + : CCSkeleton ( skeletonData ) { <nl> + addAnimationState ( ) ; <nl> + } <nl> + <nl> + CCSkeletonAnimation : : CCSkeletonAnimation ( const char * skeletonDataFile , Atlas * atlas , float scale ) <nl> + : CCSkeleton ( skeletonDataFile , atlas , scale ) { <nl> + addAnimationState ( ) ; <nl> + } <nl> + <nl> + CCSkeletonAnimation : : CCSkeletonAnimation ( const char * skeletonDataFile , const char * atlasFile , float scale ) <nl> + : CCSkeleton ( skeletonDataFile , atlasFile , scale ) { <nl> + addAnimationState ( ) ; <nl> + } <nl> + <nl> + CCSkeletonAnimation : : ~ CCSkeletonAnimation ( ) { <nl> + for ( std : : vector < AnimationStateData * > : : iterator iter = stateDatas . begin ( ) ; iter ! = stateDatas . end ( ) ; + + iter ) <nl> + AnimationStateData_dispose ( * iter ) ; <nl> + <nl> + for ( std : : vector < AnimationState * > : : iterator iter = states . begin ( ) ; iter ! = states . end ( ) ; + + iter ) <nl> + AnimationState_dispose ( * iter ) ; <nl> + } <nl> + <nl> + void CCSkeletonAnimation : : update ( float deltaTime ) { <nl> + super : : update ( deltaTime ) ; <nl> + <nl> + deltaTime * = timeScale ; <nl> + for ( std : : vector < AnimationState * > : : iterator iter = states . begin ( ) ; iter ! = states . end ( ) ; + + iter ) { <nl> + AnimationState_update ( * iter , deltaTime ) ; <nl> + AnimationState_apply ( * iter , skeleton ) ; <nl> + } <nl> + Skeleton_updateWorldTransform ( skeleton ) ; <nl> + } <nl> + <nl> + void CCSkeletonAnimation : : addAnimationState ( AnimationStateData * stateData ) { <nl> + if ( ! stateData ) { <nl> + stateData = AnimationStateData_create ( skeleton - > data ) ; <nl> + stateDatas . push_back ( stateData ) ; <nl> + } <nl> + AnimationState * state = AnimationState_create ( stateData ) ; <nl> + states . push_back ( state ) ; <nl> + } <nl> + <nl> + void CCSkeletonAnimation : : setAnimationStateData ( AnimationStateData * stateData , int stateIndex ) { <nl> + CCAssert ( stateIndex > = 0 & & stateIndex < ( int ) states . size ( ) , " stateIndex out of range . " ) ; <nl> + CCAssert ( stateData , " stateData cannot be null . " ) ; <nl> + <nl> + AnimationState * state = states [ stateIndex ] ; <nl> + for ( std : : vector < AnimationStateData * > : : iterator iter = stateDatas . begin ( ) ; iter ! = stateDatas . end ( ) ; + + iter ) { <nl> + if ( state - > data = = * iter ) { <nl> + AnimationStateData_dispose ( state - > data ) ; <nl> + stateDatas . erase ( iter ) ; <nl> + break ; <nl> + } <nl> + } <nl> + for ( std : : vector < AnimationState * > : : iterator iter = states . begin ( ) ; iter ! = states . end ( ) ; + + iter ) { <nl> + if ( state = = * iter ) { <nl> + states . erase ( iter ) ; <nl> + break ; <nl> + } <nl> + } <nl> + AnimationState_dispose ( state ) ; <nl> + <nl> + state = AnimationState_create ( stateData ) ; <nl> + states [ stateIndex ] = state ; <nl> + } <nl> + <nl> + void CCSkeletonAnimation : : setMix ( const char * fromAnimation , const char * toAnimation , float duration , int stateIndex ) { <nl> + CCAssert ( stateIndex > = 0 & & stateIndex < ( int ) states . size ( ) , " stateIndex out of range . " ) ; <nl> + AnimationStateData_setMixByName ( states [ stateIndex ] - > data , fromAnimation , toAnimation , duration ) ; <nl> + } <nl> + <nl> + void CCSkeletonAnimation : : setAnimation ( const char * name , bool loop , int stateIndex ) { <nl> + CCAssert ( stateIndex > = 0 & & stateIndex < ( int ) states . size ( ) , " stateIndex out of range . " ) ; <nl> + AnimationState_setAnimationByName ( states [ stateIndex ] , name , loop ) ; <nl> + } <nl> + <nl> + void CCSkeletonAnimation : : addAnimation ( const char * name , bool loop , float delay , int stateIndex ) { <nl> + CCAssert ( stateIndex > = 0 & & stateIndex < ( int ) states . size ( ) , " stateIndex out of range . " ) ; <nl> + AnimationState_addAnimationByName ( states [ stateIndex ] , name , loop , delay ) ; <nl> + } <nl> + <nl> + void CCSkeletonAnimation : : clearAnimation ( int stateIndex ) { <nl> + CCAssert ( stateIndex > = 0 & & stateIndex < ( int ) states . size ( ) , " stateIndex out of range . " ) ; <nl> + AnimationState_clearAnimation ( states [ stateIndex ] ) ; <nl> + } <nl> + <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000000 . . e38ef9c19e1a <nl> mmm / dev / null <nl> ppp b / extensions / spine / CCSkeletonAnimation . h <nl> <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * Copyright ( c ) 2013 , Esoteric Software <nl> + * All rights reserved . <nl> + * <nl> + * Redistribution and use in source and binary forms , with or without <nl> + * modification , are permitted provided that the following conditions are met : <nl> + * <nl> + * 1 . Redistributions of source code must retain the above copyright notice , this <nl> + * list of conditions and the following disclaimer . <nl> + * 2 . Redistributions in binary form must reproduce the above copyright notice , <nl> + * this list of conditions and the following disclaimer in the documentation <nl> + * and / or other materials provided with the distribution . <nl> + * <nl> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS " AND <nl> + * ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE IMPLIED <nl> + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE <nl> + * DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR <nl> + * ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES <nl> + * ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; <nl> + * LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND <nl> + * ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + * ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE OF THIS <nl> + * SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + # ifndef SPINE_CCSKELETONANIMATION_H_ <nl> + # define SPINE_CCSKELETONANIMATION_H_ <nl> + <nl> + # include < spine / spine . h > <nl> + # include < spine / CCSkeleton . h > <nl> + # include " cocos2d . h " <nl> + <nl> + namespace cocos2d { namespace extension { <nl> + <nl> + / * * <nl> + Draws an animated skeleton , providing a simple API for applying one or more animations and queuing animations to be played later . <nl> + * / <nl> + class CCSkeletonAnimation : public CCSkeleton { <nl> + public : <nl> + std : : vector < AnimationState * > states ; <nl> + <nl> + static CCSkeletonAnimation * createWithData ( SkeletonData * skeletonData ) ; <nl> + static CCSkeletonAnimation * createWithFile ( const char * skeletonDataFile , Atlas * atlas , float scale = 1 ) ; <nl> + static CCSkeletonAnimation * createWithFile ( const char * skeletonDataFile , const char * atlasFile , float scale = 1 ) ; <nl> + <nl> + CCSkeletonAnimation ( SkeletonData * skeletonData ) ; <nl> + CCSkeletonAnimation ( const char * skeletonDataFile , Atlas * atlas , float scale = 1 ) ; <nl> + CCSkeletonAnimation ( const char * skeletonDataFile , const char * atlasFile , float scale = 1 ) ; <nl> + <nl> + virtual ~ CCSkeletonAnimation ( ) ; <nl> + <nl> + virtual void update ( float deltaTime ) ; <nl> + <nl> + void addAnimationState ( AnimationStateData * stateData = 0 ) ; <nl> + void setAnimationStateData ( AnimationStateData * stateData , int stateIndex = 0 ) ; <nl> + void setMix ( const char * fromAnimation , const char * toAnimation , float duration , int stateIndex = 0 ) ; <nl> + void setAnimation ( const char * name , bool loop , int stateIndex = 0 ) ; <nl> + void addAnimation ( const char * name , bool loop , float delay = 0 , int stateIndex = 0 ) ; <nl> + void clearAnimation ( int stateIndex = 0 ) ; <nl> + <nl> + protected : <nl> + CCSkeletonAnimation ( ) ; <nl> + <nl> + private : <nl> + typedef CCSkeleton super ; <nl> + std : : vector < AnimationStateData * > stateDatas ; <nl> + <nl> + void initialize ( ) ; <nl> + } ; <nl> + <nl> + } } / / namespace cocos2d { namespace extension { <nl> + <nl> + # endif / * SPINE_CCSKELETONANIMATION_H_ * / <nl> mmm a / extensions / spine / Json . cpp <nl> ppp b / extensions / spine / Json . cpp <nl> static const char * parse_number ( Json * item , const char * num ) { <nl> subscale = ( subscale * 10 ) + ( * num + + - ' 0 ' ) ; / * Number ? * / <nl> } <nl> <nl> - n = sign * n * pow ( 10 . 0f , ( scale + subscale * signsubscale ) ) ; / * number = + / - number . fraction * 10 ^ + / - exponent * / <nl> + n = sign * n * ( float ) pow ( 10 . 0f , ( scale + subscale * signsubscale ) ) ; / * number = + / - number . fraction * 10 ^ + / - exponent * / <nl> <nl> item - > valuefloat = n ; <nl> item - > valueint = ( int ) n ; <nl> int Json_getInt ( Json * value , const char * name , int defaultValue ) { <nl> return value ? ( int ) value - > valuefloat : defaultValue ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / RegionAttachment . cpp <nl> ppp b / extensions / spine / RegionAttachment . cpp <nl> RegionAttachment * RegionAttachment_create ( const char * name ) { <nl> return self ; <nl> } <nl> <nl> - void RegionAttachment_updateOffset ( RegionAttachment * self ) { <nl> - float localX2 = self - > width / 2 ; <nl> - float localY2 = self - > height / 2 ; <nl> - float localX = - localX2 ; <nl> - float localY = - localY2 ; <nl> - if ( self - > region - > rotate ) { <nl> - localX + = self - > region - > offsetX / self - > region - > originalWidth * self - > height ; <nl> - localY + = self - > region - > offsetY / self - > region - > originalHeight * self - > width ; <nl> - localX2 - = ( self - > region - > originalWidth - self - > region - > offsetX - self - > region - > height ) / self - > region - > originalWidth * self - > width ; <nl> - localY2 - = ( self - > region - > originalHeight - self - > region - > offsetY - self - > region - > width ) / self - > region - > originalHeight * self - > height ; <nl> + void RegionAttachment_setUVs ( RegionAttachment * self , float u , float v , float u2 , float v2 , int / * bool * / rotate ) { <nl> + if ( rotate ) { <nl> + self - > uvs [ VERTEX_X2 ] = u ; <nl> + self - > uvs [ VERTEX_Y2 ] = v2 ; <nl> + self - > uvs [ VERTEX_X3 ] = u ; <nl> + self - > uvs [ VERTEX_Y3 ] = v ; <nl> + self - > uvs [ VERTEX_X4 ] = u2 ; <nl> + self - > uvs [ VERTEX_Y4 ] = v ; <nl> + self - > uvs [ VERTEX_X1 ] = u2 ; <nl> + self - > uvs [ VERTEX_Y1 ] = v2 ; <nl> } else { <nl> - localX + = self - > region - > offsetX / self - > region - > originalWidth * self - > width ; <nl> - localY + = self - > region - > offsetY / self - > region - > originalHeight * self - > height ; <nl> - localX2 - = ( self - > region - > originalWidth - self - > region - > offsetX - self - > region - > width ) / self - > region - > originalWidth * self - > width ; <nl> - localY2 - = ( self - > region - > originalHeight - self - > region - > offsetY - self - > region - > height ) / self - > region - > originalHeight * self - > height ; <nl> + self - > uvs [ VERTEX_X1 ] = u ; <nl> + self - > uvs [ VERTEX_Y1 ] = v2 ; <nl> + self - > uvs [ VERTEX_X2 ] = u ; <nl> + self - > uvs [ VERTEX_Y2 ] = v ; <nl> + self - > uvs [ VERTEX_X3 ] = u2 ; <nl> + self - > uvs [ VERTEX_Y3 ] = v ; <nl> + self - > uvs [ VERTEX_X4 ] = u2 ; <nl> + self - > uvs [ VERTEX_Y4 ] = v2 ; <nl> } <nl> - localX * = self - > scaleX ; <nl> - localY * = self - > scaleY ; <nl> - localX2 * = self - > scaleX ; <nl> - localY2 * = self - > scaleY ; <nl> + } <nl> + <nl> + void RegionAttachment_updateOffset ( RegionAttachment * self ) { <nl> + float regionScaleX = self - > width / self - > regionOriginalWidth * self - > scaleX ; <nl> + float regionScaleY = self - > height / self - > regionOriginalHeight * self - > scaleY ; <nl> + float localX = - self - > width / 2 * self - > scaleX + self - > regionOffsetX * regionScaleX ; <nl> + float localY = - self - > height / 2 * self - > scaleY + self - > regionOffsetY * regionScaleY ; <nl> + float localX2 = localX + self - > regionWidth * regionScaleX ; <nl> + float localY2 = localY + self - > regionHeight * regionScaleY ; <nl> float radians = ( float ) ( self - > rotation * 3 . 1415926535897932385 / 180 ) ; <nl> + # ifdef __STDC_VERSION__ <nl> float cosine = cosf ( radians ) ; <nl> float sine = sinf ( radians ) ; <nl> + # else <nl> + float cosine = ( float ) cos ( radians ) ; <nl> + float sine = ( float ) sin ( radians ) ; <nl> + # endif <nl> float localXCos = localX * cosine + self - > x ; <nl> float localXSin = localX * sine ; <nl> float localYCos = localY * cosine + self - > y ; <nl> void RegionAttachment_updateOffset ( RegionAttachment * self ) { <nl> self - > offset [ VERTEX_Y4 ] = localYCos + localX2Sin ; <nl> } <nl> <nl> - void RegionAttachment_updateVertices ( RegionAttachment * self , Slot * slot ) { <nl> + void RegionAttachment_computeVertices ( RegionAttachment * self , float x , float y , Bone * bone , float * vertices ) { <nl> float * offset = self - > offset ; <nl> - Bone * bone = slot - > bone ; <nl> - self - > vertices [ VERTEX_X1 ] = offset [ VERTEX_X1 ] * bone - > m00 + offset [ VERTEX_Y1 ] * bone - > m01 + bone - > worldX ; <nl> - self - > vertices [ VERTEX_Y1 ] = offset [ VERTEX_X1 ] * bone - > m10 + offset [ VERTEX_Y1 ] * bone - > m11 + bone - > worldY ; <nl> - self - > vertices [ VERTEX_X2 ] = offset [ VERTEX_X2 ] * bone - > m00 + offset [ VERTEX_Y2 ] * bone - > m01 + bone - > worldX ; <nl> - self - > vertices [ VERTEX_Y2 ] = offset [ VERTEX_X2 ] * bone - > m10 + offset [ VERTEX_Y2 ] * bone - > m11 + bone - > worldY ; <nl> - self - > vertices [ VERTEX_X3 ] = offset [ VERTEX_X3 ] * bone - > m00 + offset [ VERTEX_Y3 ] * bone - > m01 + bone - > worldX ; <nl> - self - > vertices [ VERTEX_Y3 ] = offset [ VERTEX_X3 ] * bone - > m10 + offset [ VERTEX_Y3 ] * bone - > m11 + bone - > worldY ; <nl> - self - > vertices [ VERTEX_X4 ] = offset [ VERTEX_X4 ] * bone - > m00 + offset [ VERTEX_Y4 ] * bone - > m01 + bone - > worldX ; <nl> - self - > vertices [ VERTEX_Y4 ] = offset [ VERTEX_X4 ] * bone - > m10 + offset [ VERTEX_Y4 ] * bone - > m11 + bone - > worldY ; <nl> + x + = bone - > worldX ; <nl> + y + = bone - > worldY ; <nl> + vertices [ VERTEX_X1 ] = offset [ VERTEX_X1 ] * bone - > m00 + offset [ VERTEX_Y1 ] * bone - > m01 + x ; <nl> + vertices [ VERTEX_Y1 ] = offset [ VERTEX_X1 ] * bone - > m10 + offset [ VERTEX_Y1 ] * bone - > m11 + y ; <nl> + vertices [ VERTEX_X2 ] = offset [ VERTEX_X2 ] * bone - > m00 + offset [ VERTEX_Y2 ] * bone - > m01 + x ; <nl> + vertices [ VERTEX_Y2 ] = offset [ VERTEX_X2 ] * bone - > m10 + offset [ VERTEX_Y2 ] * bone - > m11 + y ; <nl> + vertices [ VERTEX_X3 ] = offset [ VERTEX_X3 ] * bone - > m00 + offset [ VERTEX_Y3 ] * bone - > m01 + x ; <nl> + vertices [ VERTEX_Y3 ] = offset [ VERTEX_X3 ] * bone - > m10 + offset [ VERTEX_Y3 ] * bone - > m11 + y ; <nl> + vertices [ VERTEX_X4 ] = offset [ VERTEX_X4 ] * bone - > m00 + offset [ VERTEX_Y4 ] * bone - > m01 + x ; <nl> + vertices [ VERTEX_Y4 ] = offset [ VERTEX_X4 ] * bone - > m10 + offset [ VERTEX_Y4 ] * bone - > m11 + y ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / RegionAttachment . h <nl> ppp b / extensions / spine / RegionAttachment . h <nl> typedef struct RegionAttachment RegionAttachment ; <nl> struct RegionAttachment { <nl> Attachment super ; <nl> float x , y , scaleX , scaleY , rotation , width , height ; <nl> - AtlasRegion * region ; <nl> + <nl> + void * rendererObject ; <nl> + int regionOffsetX , regionOffsetY ; / * Pixels stripped from the bottom left , unrotated . * / <nl> + int regionWidth , regionHeight ; / * Unrotated , stripped pixel size . * / <nl> + int regionOriginalWidth , regionOriginalHeight ; / * Unrotated , unstripped pixel size . * / <nl> + <nl> float offset [ 8 ] ; <nl> - float vertices [ 8 ] ; <nl> float uvs [ 8 ] ; <nl> } ; <nl> <nl> RegionAttachment * RegionAttachment_create ( const char * name ) ; <nl> - <nl> + void RegionAttachment_setUVs ( RegionAttachment * self , float u , float v , float u2 , float v2 , int / * bool * / rotate ) ; <nl> void RegionAttachment_updateOffset ( RegionAttachment * self ) ; <nl> - void RegionAttachment_updateVertices ( RegionAttachment * self , Slot * slot ) ; <nl> + void RegionAttachment_computeVertices ( RegionAttachment * self , float x , float y , Bone * bone , float * vertices ) ; <nl> <nl> } } / / namespace cocos2d { namespace extension { <nl> <nl> mmm a / extensions / spine / Skeleton . cpp <nl> ppp b / extensions / spine / Skeleton . cpp <nl> <nl> namespace cocos2d { namespace extension { <nl> <nl> Skeleton * Skeleton_create ( SkeletonData * data ) { <nl> + int i , ii ; <nl> + <nl> Skeleton * self = NEW ( Skeleton ) ; <nl> CONST_CAST ( SkeletonData * , self - > data ) = data ; <nl> <nl> self - > boneCount = self - > data - > boneCount ; <nl> self - > bones = MALLOC ( Bone * , self - > boneCount ) ; <nl> - int i , ii ; <nl> + <nl> for ( i = 0 ; i < self - > boneCount ; + + i ) { <nl> BoneData * boneData = self - > data - > bones [ i ] ; <nl> Bone * parent = 0 ; <nl> Skeleton * Skeleton_create ( SkeletonData * data ) { <nl> SlotData * slotData = data - > slots [ i ] ; <nl> <nl> / * Find bone for the slotData ' s boneData . * / <nl> - Bone * bone = NULL ; <nl> + Bone * bone = 0 ; <nl> for ( ii = 0 ; ii < self - > boneCount ; + + ii ) { <nl> if ( data - > bones [ ii ] = = slotData - > boneData ) { <nl> bone = self - > bones [ ii ] ; <nl> break ; <nl> } <nl> } <nl> - <nl> self - > slots [ i ] = Slot_create ( slotData , self , bone ) ; <nl> } <nl> <nl> void Skeleton_dispose ( Skeleton * self ) { <nl> FREE ( self - > slots ) ; <nl> <nl> FREE ( self - > drawOrder ) ; <nl> + FREE ( self ) ; <nl> } <nl> <nl> void Skeleton_updateWorldTransform ( const Skeleton * self ) { <nl> void Skeleton_updateWorldTransform ( const Skeleton * self ) { <nl> Bone_updateWorldTransform ( self - > bones [ i ] , self - > flipX , self - > flipY ) ; <nl> } <nl> <nl> - void Skeleton_setToBindPose ( const Skeleton * self ) { <nl> - Skeleton_setBonesToBindPose ( self ) ; <nl> - Skeleton_setSlotsToBindPose ( self ) ; <nl> + void Skeleton_setToSetupPose ( const Skeleton * self ) { <nl> + Skeleton_setBonesToSetupPose ( self ) ; <nl> + Skeleton_setSlotsToSetupPose ( self ) ; <nl> } <nl> <nl> - void Skeleton_setBonesToBindPose ( const Skeleton * self ) { <nl> + void Skeleton_setBonesToSetupPose ( const Skeleton * self ) { <nl> int i ; <nl> for ( i = 0 ; i < self - > boneCount ; + + i ) <nl> - Bone_setToBindPose ( self - > bones [ i ] ) ; <nl> + Bone_setToSetupPose ( self - > bones [ i ] ) ; <nl> } <nl> <nl> - void Skeleton_setSlotsToBindPose ( const Skeleton * self ) { <nl> + void Skeleton_setSlotsToSetupPose ( const Skeleton * self ) { <nl> int i ; <nl> for ( i = 0 ; i < self - > slotCount ; + + i ) <nl> - Slot_setToBindPose ( self - > slots [ i ] ) ; <nl> + Slot_setToSetupPose ( self - > slots [ i ] ) ; <nl> } <nl> <nl> Bone * Skeleton_findBone ( const Skeleton * self , const char * boneName ) { <nl> int Skeleton_findSlotIndex ( const Skeleton * self , const char * slotName ) { <nl> } <nl> <nl> int Skeleton_setSkinByName ( Skeleton * self , const char * skinName ) { <nl> + Skin * skin ; <nl> if ( ! skinName ) { <nl> Skeleton_setSkin ( self , 0 ) ; <nl> return 1 ; <nl> } <nl> - Skin * skin = SkeletonData_findSkin ( self - > data , skinName ) ; <nl> + skin = SkeletonData_findSkin ( self - > data , skinName ) ; <nl> if ( ! skin ) return 0 ; <nl> Skeleton_setSkin ( self , skin ) ; <nl> return 1 ; <nl> void Skeleton_update ( Skeleton * self , float deltaTime ) { <nl> self - > time + = deltaTime ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / Skeleton . h <nl> ppp b / extensions / spine / Skeleton . h <nl> struct Skeleton { <nl> float r , g , b , a ; <nl> float time ; <nl> int / * bool * / flipX , flipY ; <nl> + float x , y ; <nl> } ; <nl> <nl> Skeleton * Skeleton_create ( SkeletonData * data ) ; <nl> void Skeleton_dispose ( Skeleton * self ) ; <nl> <nl> void Skeleton_updateWorldTransform ( const Skeleton * self ) ; <nl> <nl> - void Skeleton_setToBindPose ( const Skeleton * self ) ; <nl> - void Skeleton_setBonesToBindPose ( const Skeleton * self ) ; <nl> - void Skeleton_setSlotsToBindPose ( const Skeleton * self ) ; <nl> + void Skeleton_setToSetupPose ( const Skeleton * self ) ; <nl> + void Skeleton_setBonesToSetupPose ( const Skeleton * self ) ; <nl> + void Skeleton_setSlotsToSetupPose ( const Skeleton * self ) ; <nl> <nl> / * Returns 0 if the bone was not found . * / <nl> Bone * Skeleton_findBone ( const Skeleton * self , const char * boneName ) ; <nl> mmm a / extensions / spine / SkeletonData . cpp <nl> ppp b / extensions / spine / SkeletonData . cpp <nl> Animation * SkeletonData_findAnimation ( const SkeletonData * self , const char * ani <nl> return 0 ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / SkeletonJson . cpp <nl> ppp b / extensions / spine / SkeletonJson . cpp <nl> void SkeletonJson_dispose ( SkeletonJson * self ) { <nl> } <nl> <nl> void _SkeletonJson_setError ( SkeletonJson * self , Json * root , const char * value1 , const char * value2 ) { <nl> - FREE ( self - > error ) ; <nl> char message [ 256 ] ; <nl> + int length ; <nl> + FREE ( self - > error ) ; <nl> strcpy ( message , value1 ) ; <nl> - int length = strlen ( value1 ) ; <nl> + length = strlen ( value1 ) ; <nl> if ( value2 ) strncat ( message + length , value2 , 256 - length ) ; <nl> MALLOC_STR ( self - > error , message ) ; <nl> if ( root ) Json_dispose ( root ) ; <nl> } <nl> <nl> static float toColor ( const char * value , int index ) { <nl> + char digits [ 3 ] ; <nl> + char * error ; <nl> + int color ; <nl> + <nl> if ( strlen ( value ) ! = 8 ) return - 1 ; <nl> value + = index * 2 ; <nl> - char digits [ 3 ] ; <nl> + <nl> digits [ 0 ] = * value ; <nl> digits [ 1 ] = * ( value + 1 ) ; <nl> digits [ 2 ] = ' \ 0 ' ; <nl> - char * error ; <nl> - int color = strtoul ( digits , & error , 16 ) ; <nl> + color = strtoul ( digits , & error , 16 ) ; <nl> if ( * error ! = 0 ) return - 1 ; <nl> return color / ( float ) 255 ; <nl> } <nl> static void readCurve ( CurveTimeline * timeline , int frameIndex , Json * frame ) { <nl> } <nl> <nl> static Animation * _SkeletonJson_readAnimation ( SkeletonJson * self , Json * root , SkeletonData * skeletonData ) { <nl> + Animation * animation ; <nl> + <nl> Json * bones = Json_getItem ( root , " bones " ) ; <nl> int boneCount = bones ? Json_getSize ( bones ) : 0 ; <nl> <nl> static Animation * _SkeletonJson_readAnimation ( SkeletonJson * self , Json * root , S <nl> timelineCount + = Json_getSize ( Json_getItemAt ( bones , i ) ) ; <nl> for ( i = 0 ; i < slotCount ; + + i ) <nl> timelineCount + = Json_getSize ( Json_getItemAt ( slots , i ) ) ; <nl> - Animation * animation = Animation_create ( root - > name , timelineCount ) ; <nl> + animation = Animation_create ( root - > name , timelineCount ) ; <nl> animation - > timelineCount = 0 ; <nl> skeletonData - > animations [ skeletonData - > animationCount ] = animation ; <nl> skeletonData - > animationCount + + ; <nl> <nl> for ( i = 0 ; i < boneCount ; + + i ) { <nl> + int timelineCount ; <nl> Json * boneMap = Json_getItemAt ( bones , i ) ; <nl> <nl> const char * boneName = boneMap - > name ; <nl> static Animation * _SkeletonJson_readAnimation ( SkeletonJson * self , Json * root , S <nl> return 0 ; <nl> } <nl> <nl> - int timelineCount = Json_getSize ( boneMap ) ; <nl> + timelineCount = Json_getSize ( boneMap ) ; <nl> for ( ii = 0 ; ii < timelineCount ; + + ii ) { <nl> + float duration ; <nl> Json * timelineArray = Json_getItemAt ( boneMap , ii ) ; <nl> int frameCount = Json_getSize ( timelineArray ) ; <nl> const char * timelineType = timelineArray - > name ; <nl> <nl> if ( strcmp ( timelineType , " rotate " ) = = 0 ) { <nl> + <nl> RotateTimeline * timeline = RotateTimeline_create ( frameCount ) ; <nl> timeline - > boneIndex = boneIndex ; <nl> for ( iii = 0 ; iii < frameCount ; + + iii ) { <nl> static Animation * _SkeletonJson_readAnimation ( SkeletonJson * self , Json * root , S <nl> readCurve ( SUPER ( timeline ) , iii , frame ) ; <nl> } <nl> animation - > timelines [ animation - > timelineCount + + ] = ( Timeline * ) timeline ; <nl> - float duration = timeline - > frames [ frameCount * 2 - 2 ] ; <nl> + duration = timeline - > frames [ frameCount * 2 - 2 ] ; <nl> if ( duration > animation - > duration ) animation - > duration = duration ; <nl> <nl> } else { <nl> int isScale = strcmp ( timelineType , " scale " ) = = 0 ; <nl> if ( isScale | | strcmp ( timelineType , " translate " ) = = 0 ) { <nl> - TranslateTimeline * timeline = isScale ? ScaleTimeline_create ( frameCount ) : TranslateTimeline_create ( frameCount ) ; <nl> float scale = isScale ? 1 : self - > scale ; <nl> + TranslateTimeline * timeline = isScale ? ScaleTimeline_create ( frameCount ) : TranslateTimeline_create ( frameCount ) ; <nl> timeline - > boneIndex = boneIndex ; <nl> for ( iii = 0 ; iii < frameCount ; + + iii ) { <nl> Json * frame = Json_getItemAt ( timelineArray , iii ) ; <nl> static Animation * _SkeletonJson_readAnimation ( SkeletonJson * self , Json * root , S <nl> readCurve ( SUPER ( timeline ) , iii , frame ) ; <nl> } <nl> animation - > timelines [ animation - > timelineCount + + ] = ( Timeline * ) timeline ; <nl> - float duration = timeline - > frames [ frameCount * 3 - 3 ] ; <nl> + duration = timeline - > frames [ frameCount * 3 - 3 ] ; <nl> if ( duration > animation - > duration ) animation - > duration = duration ; <nl> } else { <nl> Animation_dispose ( animation ) ; <nl> static Animation * _SkeletonJson_readAnimation ( SkeletonJson * self , Json * root , S <nl> } <nl> <nl> for ( i = 0 ; i < slotCount ; + + i ) { <nl> + int timelineCount ; <nl> Json * slotMap = Json_getItemAt ( slots , i ) ; <nl> const char * slotName = slotMap - > name ; <nl> <nl> static Animation * _SkeletonJson_readAnimation ( SkeletonJson * self , Json * root , S <nl> return 0 ; <nl> } <nl> <nl> - int timelineCount = Json_getSize ( slotMap ) ; <nl> + timelineCount = Json_getSize ( slotMap ) ; <nl> for ( ii = 0 ; ii < timelineCount ; + + ii ) { <nl> + float duration ; <nl> Json * timelineArray = Json_getItemAt ( slotMap , ii ) ; <nl> int frameCount = Json_getSize ( timelineArray ) ; <nl> const char * timelineType = timelineArray - > name ; <nl> static Animation * _SkeletonJson_readAnimation ( SkeletonJson * self , Json * root , S <nl> readCurve ( SUPER ( timeline ) , iii , frame ) ; <nl> } <nl> animation - > timelines [ animation - > timelineCount + + ] = ( Timeline * ) timeline ; <nl> - float duration = timeline - > frames [ frameCount * 5 - 5 ] ; <nl> + duration = timeline - > frames [ frameCount * 5 - 5 ] ; <nl> if ( duration > animation - > duration ) animation - > duration = duration ; <nl> <nl> } else if ( strcmp ( timelineType , " attachment " ) = = 0 ) { <nl> static Animation * _SkeletonJson_readAnimation ( SkeletonJson * self , Json * root , S <nl> name - > type = = Json_NULL ? 0 : name - > valuestring ) ; <nl> } <nl> animation - > timelines [ animation - > timelineCount + + ] = ( Timeline * ) timeline ; <nl> - float duration = timeline - > frames [ frameCount - 1 ] ; <nl> + duration = timeline - > frames [ frameCount - 1 ] ; <nl> if ( duration > animation - > duration ) animation - > duration = duration ; <nl> <nl> } else { <nl> static Animation * _SkeletonJson_readAnimation ( SkeletonJson * self , Json * root , S <nl> <nl> SkeletonData * SkeletonJson_readSkeletonDataFile ( SkeletonJson * self , const char * path ) { <nl> int length ; <nl> + SkeletonData * skeletonData ; <nl> const char * json = _Util_readFile ( path , & length ) ; <nl> if ( ! json ) { <nl> _SkeletonJson_setError ( self , 0 , " Unable to read skeleton file : " , path ) ; <nl> return 0 ; <nl> } <nl> - SkeletonData * skeletonData = SkeletonJson_readSkeletonData ( self , json ) ; <nl> + skeletonData = SkeletonJson_readSkeletonData ( self , json ) ; <nl> FREE ( json ) ; <nl> return skeletonData ; <nl> } <nl> <nl> SkeletonData * SkeletonJson_readSkeletonData ( SkeletonJson * self , const char * json ) { <nl> + SkeletonData * skeletonData ; <nl> + Json * root , * bones ; <nl> + int i , ii , iii , boneCount ; <nl> + Json * slots ; <nl> + Json * skinsMap ; <nl> + Json * animations ; <nl> + <nl> FREE ( self - > error ) ; <nl> CONST_CAST ( char * , self - > error ) = 0 ; <nl> <nl> - Json * root = Json_create ( json ) ; <nl> + root = Json_create ( json ) ; <nl> if ( ! root ) { <nl> _SkeletonJson_setError ( self , 0 , " Invalid skeleton JSON : " , Json_getError ( ) ) ; <nl> return 0 ; <nl> } <nl> <nl> - SkeletonData * skeletonData = SkeletonData_create ( ) ; <nl> - int i , ii , iii ; <nl> + skeletonData = SkeletonData_create ( ) ; <nl> <nl> - Json * bones = Json_getItem ( root , " bones " ) ; <nl> - int boneCount = Json_getSize ( bones ) ; <nl> + bones = Json_getItem ( root , " bones " ) ; <nl> + boneCount = Json_getSize ( bones ) ; <nl> skeletonData - > bones = MALLOC ( BoneData * , boneCount ) ; <nl> for ( i = 0 ; i < boneCount ; + + i ) { <nl> Json * boneMap = Json_getItemAt ( bones , i ) ; <nl> + BoneData * boneData ; <nl> <nl> const char * boneName = Json_getString ( boneMap , " name " , 0 ) ; <nl> <nl> SkeletonData * SkeletonJson_readSkeletonData ( SkeletonJson * self , const char * jso <nl> } <nl> } <nl> <nl> - BoneData * boneData = BoneData_create ( boneName , parent ) ; <nl> + boneData = BoneData_create ( boneName , parent ) ; <nl> boneData - > length = Json_getFloat ( boneMap , " length " , 0 ) * self - > scale ; <nl> boneData - > x = Json_getFloat ( boneMap , " x " , 0 ) * self - > scale ; <nl> boneData - > y = Json_getFloat ( boneMap , " y " , 0 ) * self - > scale ; <nl> SkeletonData * SkeletonJson_readSkeletonData ( SkeletonJson * self , const char * jso <nl> skeletonData - > boneCount + + ; <nl> } <nl> <nl> - Json * slots = Json_getItem ( root , " slots " ) ; <nl> + slots = Json_getItem ( root , " slots " ) ; <nl> if ( slots ) { <nl> int slotCount = Json_getSize ( slots ) ; <nl> skeletonData - > slots = MALLOC ( SlotData * , slotCount ) ; <nl> for ( i = 0 ; i < slotCount ; + + i ) { <nl> + SlotData * slotData ; <nl> + const char * color ; <nl> + Json * attachmentItem ; <nl> Json * slotMap = Json_getItemAt ( slots , i ) ; <nl> <nl> const char * slotName = Json_getString ( slotMap , " name " , 0 ) ; <nl> SkeletonData * SkeletonJson_readSkeletonData ( SkeletonJson * self , const char * jso <nl> return 0 ; <nl> } <nl> <nl> - SlotData * slotData = SlotData_create ( slotName , boneData ) ; <nl> + slotData = SlotData_create ( slotName , boneData ) ; <nl> <nl> - const char * color = Json_getString ( slotMap , " color " , 0 ) ; <nl> + color = Json_getString ( slotMap , " color " , 0 ) ; <nl> if ( color ) { <nl> slotData - > r = toColor ( color , 0 ) ; <nl> slotData - > g = toColor ( color , 1 ) ; <nl> SkeletonData * SkeletonJson_readSkeletonData ( SkeletonJson * self , const char * jso <nl> slotData - > a = toColor ( color , 3 ) ; <nl> } <nl> <nl> - Json * attachmentItem = Json_getItem ( slotMap , " attachment " ) ; <nl> + attachmentItem = Json_getItem ( slotMap , " attachment " ) ; <nl> if ( attachmentItem ) SlotData_setAttachmentName ( slotData , attachmentItem - > valuestring ) ; <nl> <nl> skeletonData - > slots [ i ] = slotData ; <nl> SkeletonData * SkeletonJson_readSkeletonData ( SkeletonJson * self , const char * jso <nl> } <nl> } <nl> <nl> - Json * skinsMap = Json_getItem ( root , " skins " ) ; <nl> + skinsMap = Json_getItem ( root , " skins " ) ; <nl> if ( skinsMap ) { <nl> int skinCount = Json_getSize ( skinsMap ) ; <nl> skeletonData - > skins = MALLOC ( Skin * , skinCount ) ; <nl> SkeletonData * SkeletonJson_readSkeletonData ( SkeletonJson * self , const char * jso <nl> Json * slotMap = Json_getItemAt ( skinsMap , i ) ; <nl> const char * skinName = slotMap - > name ; <nl> Skin * skin = Skin_create ( skinName ) ; <nl> + int slotNameCount ; <nl> + <nl> skeletonData - > skins [ i ] = skin ; <nl> skeletonData - > skinCount + + ; <nl> if ( strcmp ( skinName , " default " ) = = 0 ) skeletonData - > defaultSkin = skin ; <nl> <nl> - int slotNameCount = Json_getSize ( slotMap ) ; <nl> + slotNameCount = Json_getSize ( slotMap ) ; <nl> for ( ii = 0 ; ii < slotNameCount ; + + ii ) { <nl> Json * attachmentsMap = Json_getItemAt ( slotMap , ii ) ; <nl> const char * slotName = attachmentsMap - > name ; <nl> SkeletonData * SkeletonJson_readSkeletonData ( SkeletonJson * self , const char * jso <nl> <nl> int attachmentCount = Json_getSize ( attachmentsMap ) ; <nl> for ( iii = 0 ; iii < attachmentCount ; + + iii ) { <nl> + Attachment * attachment ; <nl> Json * attachmentMap = Json_getItemAt ( attachmentsMap , iii ) ; <nl> const char * skinAttachmentName = attachmentMap - > name ; <nl> const char * attachmentName = Json_getString ( attachmentMap , " name " , skinAttachmentName ) ; <nl> SkeletonData * SkeletonJson_readSkeletonData ( SkeletonJson * self , const char * jso <nl> return 0 ; <nl> } <nl> <nl> - Attachment * attachment = AttachmentLoader_newAttachment ( self - > attachmentLoader , skin , type , attachmentName ) ; <nl> + attachment = AttachmentLoader_newAttachment ( self - > attachmentLoader , skin , type , attachmentName ) ; <nl> if ( ! attachment ) { <nl> if ( self - > attachmentLoader - > error1 ) { <nl> SkeletonData_dispose ( skeletonData ) ; <nl> SkeletonData * SkeletonJson_readSkeletonData ( SkeletonJson * self , const char * jso <nl> } <nl> } <nl> <nl> - Json * animations = Json_getItem ( root , " animations " ) ; <nl> + animations = Json_getItem ( root , " animations " ) ; <nl> if ( animations ) { <nl> int animationCount = Json_getSize ( animations ) ; <nl> skeletonData - > animations = MALLOC ( Animation * , animationCount ) ; <nl> SkeletonData * SkeletonJson_readSkeletonData ( SkeletonJson * self , const char * jso <nl> return skeletonData ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / Skin . cpp <nl> ppp b / extensions / spine / Skin . cpp <nl> <nl> <nl> # include < spine / Skin . h > <nl> # include < spine / extension . h > <nl> - # include < stdio . h > <nl> <nl> namespace cocos2d { namespace extension { <nl> <nl> Skin * Skin_create ( const char * name ) { <nl> } <nl> <nl> void Skin_dispose ( Skin * self ) { <nl> - _Entry * entry = SUB_CAST ( _Internal , self ) - > entries ; <nl> + _Entry * entry = SUB_CAST ( _Internal , self ) - > entries ; <nl> while ( entry ) { <nl> - _Entry * nextEtry = entry - > next ; <nl> + _Entry * nextEntry = entry - > next ; <nl> _Entry_dispose ( entry ) ; <nl> - entry = nextEtry ; <nl> + entry = nextEntry ; <nl> } <nl> <nl> FREE ( self - > name ) ; <nl> void Skin_dispose ( Skin * self ) { <nl> <nl> void Skin_addAttachment ( Skin * self , int slotIndex , const char * name , Attachment * attachment ) { <nl> _Entry * newEntry = _Entry_create ( slotIndex , name , attachment ) ; <nl> - newEntry - > next = SUB_CAST ( _Internal , self ) - > entries ; <nl> - SUB_CAST ( _Internal , self ) - > entries = newEntry ; <nl> + newEntry - > next = SUB_CAST ( _Internal , self ) - > entries ; <nl> + SUB_CAST ( _Internal , self ) - > entries = newEntry ; <nl> } <nl> <nl> Attachment * Skin_getAttachment ( const Skin * self , int slotIndex , const char * name ) { <nl> Attachment * Skin_getAttachment ( const Skin * self , int slotIndex , const char * nam <nl> return 0 ; <nl> } <nl> <nl> + const char * Skin_getAttachmentName ( const Skin * self , int slotIndex , int attachmentIndex ) { <nl> + const _Entry * entry = SUB_CAST ( _Internal , self ) - > entries ; <nl> + int i = 0 ; <nl> + while ( entry ) { <nl> + if ( entry - > slotIndex = = slotIndex ) { <nl> + if ( i = = attachmentIndex ) return entry - > name ; <nl> + i + + ; <nl> + } <nl> + entry = entry - > next ; <nl> + } <nl> + return 0 ; <nl> + } <nl> + <nl> void Skin_attachAll ( const Skin * self , Skeleton * skeleton , const Skin * oldSkin ) { <nl> const _Entry * entry = SUB_CAST ( _Internal , oldSkin ) - > entries ; <nl> while ( entry ) { <nl> void Skin_attachAll ( const Skin * self , Skeleton * skeleton , const Skin * oldSkin ) <nl> } <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / Skin . h <nl> ppp b / extensions / spine / Skin . h <nl> void Skin_addAttachment ( Skin * self , int slotIndex , const char * name , Attachment <nl> / * Returns 0 if the attachment was not found . * / <nl> Attachment * Skin_getAttachment ( const Skin * self , int slotIndex , const char * name ) ; <nl> <nl> + / * Returns 0 if the slot or attachment was not found . * / <nl> + const char * Skin_getAttachmentName ( const Skin * self , int slotIndex , int attachmentIndex ) ; <nl> + <nl> / * * Attach each attachment in this skin if the corresponding attachment in oldSkin is currently attached . * / <nl> void Skin_attachAll ( const Skin * self , struct Skeleton * skeleton , const Skin * oldSkin ) ; <nl> <nl> mmm a / extensions / spine / Slot . cpp <nl> ppp b / extensions / spine / Slot . cpp <nl> Slot * Slot_create ( SlotData * data , Skeleton * skeleton , Bone * bone ) { <nl> CONST_CAST ( SlotData * , self - > data ) = data ; <nl> CONST_CAST ( Skeleton * , self - > skeleton ) = skeleton ; <nl> CONST_CAST ( Bone * , self - > bone ) = bone ; <nl> - Slot_setToBindPose ( self ) ; <nl> + Slot_setToSetupPose ( self ) ; <nl> return self ; <nl> } <nl> <nl> float Slot_getAttachmentTime ( const Slot * self ) { <nl> return self - > skeleton - > time - SUB_CAST ( _Internal , self ) - > attachmentTime ; <nl> } <nl> <nl> - void Slot_setToBindPose ( Slot * self ) { <nl> + void Slot_setToSetupPose ( Slot * self ) { <nl> + Attachment * attachment = 0 ; <nl> self - > r = self - > data - > r ; <nl> self - > g = self - > data - > g ; <nl> self - > b = self - > data - > b ; <nl> self - > a = self - > data - > a ; <nl> <nl> - Attachment * attachment = 0 ; <nl> if ( self - > data - > attachmentName ) { <nl> / * Find slot index . * / <nl> int i ; <nl> void Slot_setToBindPose ( Slot * self ) { <nl> Slot_setAttachment ( self , attachment ) ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / Slot . h <nl> ppp b / extensions / spine / Slot . h <nl> void Slot_setAttachment ( Slot * self , Attachment * attachment ) ; <nl> void Slot_setAttachmentTime ( Slot * self , float time ) ; <nl> float Slot_getAttachmentTime ( const Slot * self ) ; <nl> <nl> - void Slot_setToBindPose ( Slot * self ) ; <nl> + void Slot_setToSetupPose ( Slot * self ) ; <nl> <nl> } } / / namespace cocos2d { namespace extension { <nl> <nl> mmm a / extensions / spine / SlotData . cpp <nl> ppp b / extensions / spine / SlotData . cpp <nl> void SlotData_setAttachmentName ( SlotData * self , const char * attachmentName ) { <nl> CONST_CAST ( char * , self - > attachmentName ) = 0 ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / SlotData . h <nl> ppp b / extensions / spine / SlotData . h <nl> typedef struct { <nl> SlotData * SlotData_create ( const char * name , BoneData * boneData ) ; <nl> void SlotData_dispose ( SlotData * self ) ; <nl> <nl> - / * @ param attachmentName May be 0 for no bind pose attachment . * / <nl> + / * @ param attachmentName May be 0 for no setup pose attachment . * / <nl> void SlotData_setAttachmentName ( SlotData * self , const char * attachmentName ) ; <nl> <nl> } } / / namespace cocos2d { namespace extension { <nl> mmm a / extensions / spine / extension . cpp <nl> ppp b / extensions / spine / extension . cpp <nl> void _setFree ( void ( * free ) ( void * ptr ) ) { <nl> } <nl> <nl> char * _readFile ( const char * path , int * length ) { <nl> + char * data ; <nl> FILE * file = fopen ( path , " rb " ) ; <nl> if ( ! file ) return 0 ; <nl> <nl> char * _readFile ( const char * path , int * length ) { <nl> * length = ftell ( file ) ; <nl> fseek ( file , 0 , SEEK_SET ) ; <nl> <nl> - char * data = MALLOC ( char , * length ) ; <nl> - int rtn = fread ( data , 1 , * length , file ) ; <nl> + data = MALLOC ( char , * length ) ; <nl> + fread ( data , 1 , * length , file ) ; <nl> fclose ( file ) ; <nl> - if ( rtn ! = * length ) return 0 ; <nl> <nl> return data ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / extension . h <nl> ppp b / extensions / spine / extension . h <nl> char * _readFile ( const char * path , int * length ) ; <nl> <nl> / * * / <nl> <nl> - void _AttachmentLoader_init ( AttachmentLoader * self , / / <nl> - void ( * dispose ) ( AttachmentLoader * self ) , / / <nl> + void _AttachmentLoader_init ( AttachmentLoader * self , / * * / <nl> + void ( * dispose ) ( AttachmentLoader * self ) , / * * / <nl> Attachment * ( * newAttachment ) ( AttachmentLoader * self , Skin * skin , AttachmentType type , const char * name ) ) ; <nl> void _AttachmentLoader_deinit ( AttachmentLoader * self ) ; <nl> void _AttachmentLoader_setError ( AttachmentLoader * self , const char * error1 , const char * error2 ) ; <nl> void _AttachmentLoader_setUnknownTypeError ( AttachmentLoader * self , AttachmentTy <nl> <nl> / * * / <nl> <nl> - void _Attachment_init ( Attachment * self , const char * name , AttachmentType type , / / <nl> + void _Attachment_init ( Attachment * self , const char * name , AttachmentType type , / * * / <nl> void ( * dispose ) ( Attachment * self ) ) ; <nl> void _Attachment_deinit ( Attachment * self ) ; <nl> <nl> / * * / <nl> <nl> - void _Timeline_init ( Timeline * self , / / <nl> - void ( * dispose ) ( Timeline * self ) , / / <nl> + void _Timeline_init ( Timeline * self , / * * / <nl> + void ( * dispose ) ( Timeline * self ) , / * * / <nl> void ( * apply ) ( const Timeline * self , Skeleton * skeleton , float time , float alpha ) ) ; <nl> void _Timeline_deinit ( Timeline * self ) ; <nl> <nl> / * * / <nl> <nl> - void _CurveTimeline_init ( CurveTimeline * self , int frameCount , / / <nl> - void ( * dispose ) ( Timeline * self ) , / / <nl> + void _CurveTimeline_init ( CurveTimeline * self , int frameCount , / * * / <nl> + void ( * dispose ) ( Timeline * self ) , / * * / <nl> void ( * apply ) ( const Timeline * self , Skeleton * skeleton , float time , float alpha ) ) ; <nl> void _CurveTimeline_deinit ( CurveTimeline * self ) ; <nl> <nl> mmm a / extensions / spine / spine - cocos2dx . cpp <nl> ppp b / extensions / spine / spine - cocos2dx . cpp <nl> <nl> # include < spine / extension . h > <nl> <nl> USING_NS_CC ; <nl> - using std : : min ; <nl> - using std : : max ; <nl> + <nl> namespace cocos2d { namespace extension { <nl> <nl> void _AtlasPage_createTexture ( AtlasPage * self , const char * path ) { <nl> CCTexture2D * texture = CCTextureCache : : sharedTextureCache ( ) - > addImage ( path ) ; <nl> CCTextureAtlas * textureAtlas = CCTextureAtlas : : createWithTexture ( texture , 4 ) ; <nl> textureAtlas - > retain ( ) ; <nl> - self - > texture = textureAtlas ; <nl> - <nl> - / / Using getContentSize to make it supports the strategy of loading resources in cocos2d - x . <nl> - <nl> - / / self - > width = texture - > getPixelsWide ( ) ; <nl> - / / self - > height = texture - > getPixelsHigh ( ) ; <nl> - <nl> - self - > width = texture - > getContentSize ( ) . width ; <nl> - self - > height = texture - > getContentSize ( ) . height ; <nl> + self - > rendererObject = textureAtlas ; <nl> + self - > width = texture - > getPixelsWide ( ) ; <nl> + self - > height = texture - > getPixelsHigh ( ) ; <nl> } <nl> <nl> void _AtlasPage_disposeTexture ( AtlasPage * self ) { <nl> - ( ( CCTextureAtlas * ) self - > texture ) - > release ( ) ; <nl> + ( ( CCTextureAtlas * ) self - > rendererObject ) - > release ( ) ; <nl> } <nl> <nl> char * _Util_readFile ( const char * path , int * length ) { <nl> unsigned long size ; <nl> - char * data = reinterpret_cast < char * > ( CCFileUtils : : sharedFileUtils ( ) - > getFileData ( path , " r " , & size ) ) ; <nl> + char * data = reinterpret_cast < char * > ( CCFileUtils : : sharedFileUtils ( ) - > getFileData ( <nl> + CCFileUtils : : sharedFileUtils ( ) - > fullPathForFilename ( path ) . c_str ( ) , " r " , & size ) ) ; <nl> * length = size ; <nl> return data ; <nl> } <nl> <nl> / * * / <nl> <nl> - void RegionAttachment_updateQuad ( RegionAttachment * self , Slot * slot , ccV3F_C4B_T2F_Quad * quad ) { <nl> - RegionAttachment_updateVertices ( self , slot ) ; <nl> + void RegionAttachment_updateQuad ( RegionAttachment * self , Slot * slot , ccV3F_C4B_T2F_Quad * quad , bool premultipliedAlpha ) { <nl> + float vertices [ 8 ] ; <nl> + RegionAttachment_computeVertices ( self , slot - > skeleton - > x , slot - > skeleton - > y , slot - > bone , vertices ) ; <nl> <nl> GLubyte r = slot - > skeleton - > r * slot - > r * 255 ; <nl> GLubyte g = slot - > skeleton - > g * slot - > g * 255 ; <nl> GLubyte b = slot - > skeleton - > b * slot - > b * 255 ; <nl> - GLubyte a = slot - > skeleton - > a * slot - > a * 255 ; <nl> + float normalizedAlpha = slot - > skeleton - > a * slot - > a ; <nl> + if ( premultipliedAlpha ) { <nl> + r * = normalizedAlpha ; <nl> + g * = normalizedAlpha ; <nl> + b * = normalizedAlpha ; <nl> + } <nl> + GLubyte a = normalizedAlpha * 255 ; <nl> quad - > bl . colors . r = r ; <nl> quad - > bl . colors . g = g ; <nl> quad - > bl . colors . b = b ; <nl> void RegionAttachment_updateQuad ( RegionAttachment * self , Slot * slot , ccV3F_C4B_ <nl> quad - > br . colors . b = b ; <nl> quad - > br . colors . a = a ; <nl> <nl> - quad - > bl . vertices . x = self - > vertices [ VERTEX_X1 ] ; <nl> - quad - > bl . vertices . y = self - > vertices [ VERTEX_Y1 ] ; <nl> - quad - > tl . vertices . x = self - > vertices [ VERTEX_X2 ] ; <nl> - quad - > tl . vertices . y = self - > vertices [ VERTEX_Y2 ] ; <nl> - quad - > tr . vertices . x = self - > vertices [ VERTEX_X3 ] ; <nl> - quad - > tr . vertices . y = self - > vertices [ VERTEX_Y3 ] ; <nl> - quad - > br . vertices . x = self - > vertices [ VERTEX_X4 ] ; <nl> - quad - > br . vertices . y = self - > vertices [ VERTEX_Y4 ] ; <nl> - <nl> - if ( self - > region - > rotate ) { <nl> - quad - > tl . texCoords . u = self - > region - > u ; <nl> - quad - > tl . texCoords . v = self - > region - > v2 ; <nl> - quad - > tr . texCoords . u = self - > region - > u ; <nl> - quad - > tr . texCoords . v = self - > region - > v ; <nl> - quad - > br . texCoords . u = self - > region - > u2 ; <nl> - quad - > br . texCoords . v = self - > region - > v ; <nl> - quad - > bl . texCoords . u = self - > region - > u2 ; <nl> - quad - > bl . texCoords . v = self - > region - > v2 ; <nl> - } else { <nl> - quad - > bl . texCoords . u = self - > region - > u ; <nl> - quad - > bl . texCoords . v = self - > region - > v2 ; <nl> - quad - > tl . texCoords . u = self - > region - > u ; <nl> - quad - > tl . texCoords . v = self - > region - > v ; <nl> - quad - > tr . texCoords . u = self - > region - > u2 ; <nl> - quad - > tr . texCoords . v = self - > region - > v ; <nl> - quad - > br . texCoords . u = self - > region - > u2 ; <nl> - quad - > br . texCoords . v = self - > region - > v2 ; <nl> - } <nl> - } <nl> - <nl> - / * * / <nl> - <nl> - CCSkeleton * CCSkeleton : : createWithFile ( const char * skeletonDataFile , Atlas * atlas , float scale ) { <nl> - SkeletonJson * json = SkeletonJson_create ( atlas ) ; <nl> - json - > scale = scale ; <nl> - SkeletonData * skeletonData = SkeletonJson_readSkeletonDataFile ( json , skeletonDataFile ) ; <nl> - SkeletonJson_dispose ( json ) ; <nl> - CCSkeleton * node = skeletonData ? createWithData ( skeletonData ) : 0 ; <nl> - node - > ownsSkeleton = true ; <nl> - return node ; <nl> - } <nl> - <nl> - CCSkeleton * CCSkeleton : : createWithFile ( const char * skeletonDataFile , const char * atlasFile , float scale ) { <nl> - Atlas * atlas = Atlas_readAtlasFile ( atlasFile ) ; <nl> - if ( ! atlas ) return 0 ; <nl> - SkeletonJson * json = SkeletonJson_create ( atlas ) ; <nl> - json - > scale = scale ; <nl> - SkeletonData * skeletonData = SkeletonJson_readSkeletonDataFile ( json , skeletonDataFile ) ; <nl> - SkeletonJson_dispose ( json ) ; <nl> - if ( ! skeletonData ) { <nl> - Atlas_dispose ( atlas ) ; <nl> - return 0 ; <nl> - } <nl> - CCSkeleton * node = createWithData ( skeletonData ) ; <nl> - node - > ownsSkeleton = true ; <nl> - node - > atlas = atlas ; <nl> - return node ; <nl> - } <nl> - <nl> - CCSkeleton * CCSkeleton : : createWithData ( SkeletonData * skeletonData , AnimationStateData * stateData ) { <nl> - CCSkeleton * node = new CCSkeleton ( skeletonData , stateData ) ; <nl> - node - > autorelease ( ) ; <nl> - return node ; <nl> - } <nl> - <nl> - CCSkeleton : : CCSkeleton ( SkeletonData * skeletonData , AnimationStateData * stateData ) : <nl> - ownsSkeleton ( false ) , ownsStateData ( false ) , atlas ( 0 ) , <nl> - skeleton ( 0 ) , state ( 0 ) , debugSlots ( false ) , debugBones ( false ) { <nl> - CONST_CAST ( Skeleton * , skeleton ) = Skeleton_create ( skeletonData ) ; <nl> - <nl> - if ( ! stateData ) { <nl> - stateData = AnimationStateData_create ( skeletonData ) ; <nl> - ownsStateData = true ; <nl> - } <nl> - CONST_CAST ( AnimationState * , state ) = AnimationState_create ( stateData ) ; <nl> - <nl> - blendFunc . src = GL_ONE ; <nl> - blendFunc . dst = GL_ONE_MINUS_SRC_ALPHA ; <nl> - <nl> - timeScale = 1 ; <nl> - <nl> - setShaderProgram ( CCShaderCache : : sharedShaderCache ( ) - > programForKey ( kCCShader_PositionTextureColor ) ) ; <nl> - scheduleUpdate ( ) ; <nl> - } <nl> - <nl> - CCSkeleton : : ~ CCSkeleton ( ) { <nl> - if ( ownsSkeleton ) Skeleton_dispose ( skeleton ) ; <nl> - if ( ownsStateData ) AnimationStateData_dispose ( state - > data ) ; <nl> - if ( atlas ) Atlas_dispose ( atlas ) ; <nl> - AnimationState_dispose ( state ) ; <nl> - } <nl> - <nl> - void CCSkeleton : : update ( float deltaTime ) { <nl> - Skeleton_update ( skeleton , deltaTime ) ; <nl> - AnimationState_update ( state , deltaTime * timeScale ) ; <nl> - AnimationState_apply ( state , skeleton ) ; <nl> - Skeleton_updateWorldTransform ( skeleton ) ; <nl> - } <nl> - <nl> - void CCSkeleton : : draw ( ) { <nl> - CC_NODE_DRAW_SETUP ( ) ; <nl> - <nl> - ccGLBlendFunc ( blendFunc . src , blendFunc . dst ) ; <nl> - ccColor3B color = getColor ( ) ; <nl> - skeleton - > r = color . r / ( float ) 255 ; <nl> - skeleton - > g = color . g / ( float ) 255 ; <nl> - skeleton - > b = color . b / ( float ) 255 ; <nl> - skeleton - > a = getOpacity ( ) / ( float ) 255 ; <nl> - <nl> - CCTextureAtlas * textureAtlas = 0 ; <nl> - ccV3F_C4B_T2F_Quad quad ; <nl> - quad . tl . vertices . z = 0 ; <nl> - quad . tr . vertices . z = 0 ; <nl> - quad . bl . vertices . z = 0 ; <nl> - quad . br . vertices . z = 0 ; <nl> - for ( int i = 0 , n = skeleton - > slotCount ; i < n ; i + + ) { <nl> - Slot * slot = skeleton - > slots [ i ] ; <nl> - if ( ! slot - > attachment | | slot - > attachment - > type ! = ATTACHMENT_REGION ) continue ; <nl> - RegionAttachment * attachment = ( RegionAttachment * ) slot - > attachment ; <nl> - CCTextureAtlas * regionTextureAtlas = ( CCTextureAtlas * ) attachment - > region - > page - > texture ; <nl> - if ( regionTextureAtlas ! = textureAtlas ) { <nl> - if ( textureAtlas ) { <nl> - textureAtlas - > drawQuads ( ) ; <nl> - textureAtlas - > removeAllQuads ( ) ; <nl> - } <nl> - } <nl> - textureAtlas = regionTextureAtlas ; <nl> - if ( textureAtlas - > getCapacity ( ) = = textureAtlas - > getTotalQuads ( ) & & <nl> - ! textureAtlas - > resizeCapacity ( textureAtlas - > getCapacity ( ) * 2 ) ) return ; <nl> - RegionAttachment_updateQuad ( attachment , slot , & quad ) ; <nl> - textureAtlas - > updateQuad ( & quad , textureAtlas - > getTotalQuads ( ) ) ; <nl> - } <nl> - if ( textureAtlas ) { <nl> - textureAtlas - > drawQuads ( ) ; <nl> - textureAtlas - > removeAllQuads ( ) ; <nl> - } <nl> - <nl> - if ( debugSlots ) { <nl> - / / Slots . <nl> - ccDrawColor4B ( 0 , 0 , 255 , 255 ) ; <nl> - glLineWidth ( 1 ) ; <nl> - CCPoint points [ 4 ] ; <nl> - ccV3F_C4B_T2F_Quad quad ; <nl> - for ( int i = 0 , n = skeleton - > slotCount ; i < n ; i + + ) { <nl> - Slot * slot = skeleton - > slots [ i ] ; <nl> - if ( ! slot - > attachment | | slot - > attachment - > type ! = ATTACHMENT_REGION ) continue ; <nl> - RegionAttachment * attachment = ( RegionAttachment * ) slot - > attachment ; <nl> - RegionAttachment_updateQuad ( attachment , slot , & quad ) ; <nl> - points [ 0 ] = ccp ( quad . bl . vertices . x , quad . bl . vertices . y ) ; <nl> - points [ 1 ] = ccp ( quad . br . vertices . x , quad . br . vertices . y ) ; <nl> - points [ 2 ] = ccp ( quad . tr . vertices . x , quad . tr . vertices . y ) ; <nl> - points [ 3 ] = ccp ( quad . tl . vertices . x , quad . tl . vertices . y ) ; <nl> - ccDrawPoly ( points , 4 , true ) ; <nl> - } <nl> - } <nl> - if ( debugBones ) { <nl> - / / Bone lengths . <nl> - glLineWidth ( 2 ) ; <nl> - ccDrawColor4B ( 255 , 0 , 0 , 255 ) ; <nl> - for ( int i = 0 , n = skeleton - > boneCount ; i < n ; i + + ) { <nl> - Bone * bone = skeleton - > bones [ i ] ; <nl> - float x = bone - > data - > length * bone - > m00 + bone - > worldX ; <nl> - float y = bone - > data - > length * bone - > m10 + bone - > worldY ; <nl> - ccDrawLine ( ccp ( bone - > worldX , bone - > worldY ) , ccp ( x , y ) ) ; <nl> - } <nl> - / / Bone origins . <nl> - ccPointSize ( 4 ) ; <nl> - ccDrawColor4B ( 0 , 0 , 255 , 255 ) ; / / Root bone is blue . <nl> - for ( int i = 0 , n = skeleton - > boneCount ; i < n ; i + + ) { <nl> - Bone * bone = skeleton - > bones [ i ] ; <nl> - ccDrawPoint ( ccp ( bone - > worldX , bone - > worldY ) ) ; <nl> - if ( i = = 0 ) ccDrawColor4B ( 0 , 255 , 0 , 255 ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - CCRect CCSkeleton : : boundingBox ( ) { <nl> - float minX = FLT_MAX , minY = FLT_MAX , maxX = FLT_MIN , maxY = FLT_MIN ; <nl> - float scaleX = getScaleX ( ) ; <nl> - float scaleY = getScaleY ( ) ; <nl> - ccV3F_C4B_T2F_Quad quad ; <nl> - for ( int i = 0 ; i < skeleton - > slotCount ; + + i ) { <nl> - Slot * slot = skeleton - > slots [ i ] ; <nl> - if ( ! slot - > attachment | | slot - > attachment - > type ! = ATTACHMENT_REGION ) continue ; <nl> - RegionAttachment * attachment = ( RegionAttachment * ) slot - > attachment ; <nl> - RegionAttachment_updateQuad ( attachment , slot , & quad ) ; <nl> - minX = min ( minX , quad . bl . vertices . x * scaleX ) ; <nl> - minY = min ( minY , quad . bl . vertices . y * scaleY ) ; <nl> - maxX = max ( maxX , quad . bl . vertices . x * scaleX ) ; <nl> - maxY = max ( maxY , quad . bl . vertices . y * scaleY ) ; <nl> - minX = min ( minX , quad . br . vertices . x * scaleX ) ; <nl> - minY = min ( minY , quad . br . vertices . y * scaleY ) ; <nl> - maxX = max ( maxX , quad . br . vertices . x * scaleX ) ; <nl> - maxY = max ( maxY , quad . br . vertices . y * scaleY ) ; <nl> - minX = min ( minX , quad . tl . vertices . x * scaleX ) ; <nl> - minY = min ( minY , quad . tl . vertices . y * scaleY ) ; <nl> - maxX = max ( maxX , quad . tl . vertices . x * scaleX ) ; <nl> - maxY = max ( maxY , quad . tl . vertices . y * scaleY ) ; <nl> - minX = min ( minX , quad . tr . vertices . x * scaleX ) ; <nl> - minY = min ( minY , quad . tr . vertices . y * scaleY ) ; <nl> - maxX = max ( maxX , quad . tr . vertices . x * scaleX ) ; <nl> - maxY = max ( maxY , quad . tr . vertices . y * scaleY ) ; <nl> - } <nl> - CCPoint position = getPosition ( ) ; <nl> - minX = position . x + minX ; <nl> - minY = position . y + minY ; <nl> - maxX = position . x + maxX ; <nl> - maxY = position . y + maxY ; <nl> - return CCRectMake ( minX , minY , maxX - minX , maxY - minY ) ; <nl> - } <nl> - <nl> - / / CCBlendProtocol <nl> - <nl> - ccBlendFunc CCSkeleton : : getBlendFunc ( ) { <nl> - return blendFunc ; <nl> - } <nl> - <nl> - void CCSkeleton : : setBlendFunc ( ccBlendFunc blendFunc ) { <nl> - this - > blendFunc = blendFunc ; <nl> + quad - > bl . vertices . x = vertices [ VERTEX_X1 ] ; <nl> + quad - > bl . vertices . y = vertices [ VERTEX_Y1 ] ; <nl> + quad - > tl . vertices . x = vertices [ VERTEX_X2 ] ; <nl> + quad - > tl . vertices . y = vertices [ VERTEX_Y2 ] ; <nl> + quad - > tr . vertices . x = vertices [ VERTEX_X3 ] ; <nl> + quad - > tr . vertices . y = vertices [ VERTEX_Y3 ] ; <nl> + quad - > br . vertices . x = vertices [ VERTEX_X4 ] ; <nl> + quad - > br . vertices . y = vertices [ VERTEX_Y4 ] ; <nl> + <nl> + quad - > bl . texCoords . u = self - > uvs [ VERTEX_X1 ] ; <nl> + quad - > bl . texCoords . v = self - > uvs [ VERTEX_Y1 ] ; <nl> + quad - > tl . texCoords . u = self - > uvs [ VERTEX_X2 ] ; <nl> + quad - > tl . texCoords . v = self - > uvs [ VERTEX_Y2 ] ; <nl> + quad - > tr . texCoords . u = self - > uvs [ VERTEX_X3 ] ; <nl> + quad - > tr . texCoords . v = self - > uvs [ VERTEX_Y3 ] ; <nl> + quad - > br . texCoords . u = self - > uvs [ VERTEX_X4 ] ; <nl> + quad - > br . texCoords . v = self - > uvs [ VERTEX_Y4 ] ; <nl> } <nl> <nl> - } } / / namespace cocos2d { namespace extension { <nl> + } } / / namespace cocos2d { namespace extension { <nl> \ No newline at end of file <nl> mmm a / extensions / spine / spine - cocos2dx . h <nl> ppp b / extensions / spine / spine - cocos2dx . h <nl> <nl> <nl> # include < spine / spine . h > <nl> # include " cocos2d . h " <nl> + # include < spine / CCSkeleton . h > <nl> + # include < spine / CCSkeletonAnimation . h > <nl> <nl> namespace cocos2d { namespace extension { <nl> <nl> - class CCSkeleton : public cocos2d : : CCNodeRGBA , public cocos2d : : CCBlendProtocol { <nl> - private : <nl> - bool ownsSkeleton ; <nl> - bool ownsStateData ; <nl> - Atlas * atlas ; <nl> - <nl> - public : <nl> - Skeleton * const skeleton ; <nl> - AnimationState * const state ; <nl> - float timeScale ; <nl> - bool debugSlots ; <nl> - bool debugBones ; <nl> - <nl> - static CCSkeleton * createWithFile ( const char * skeletonDataFile , Atlas * atlas , float scale = 1 ) ; <nl> - static CCSkeleton * createWithFile ( const char * skeletonDataFile , const char * atlasFile , float scale = 1 ) ; <nl> - static CCSkeleton * createWithData ( SkeletonData * skeletonData , AnimationStateData * stateData = 0 ) ; <nl> - <nl> - CCSkeleton ( SkeletonData * skeletonData , AnimationStateData * stateData = 0 ) ; <nl> - virtual ~ CCSkeleton ( ) ; <nl> - <nl> - virtual void update ( float deltaTime ) ; <nl> - virtual void draw ( ) ; <nl> - virtual cocos2d : : CCRect boundingBox ( ) ; <nl> - <nl> - / / CCBlendProtocol <nl> - CC_PROPERTY ( cocos2d : : ccBlendFunc , blendFunc , BlendFunc ) ; <nl> - } ; <nl> - <nl> - / * * / <nl> - <nl> - void RegionAttachment_updateQuad ( RegionAttachment * self , Slot * slot , cocos2d : : ccV3F_C4B_T2F_Quad * quad ) ; <nl> + void RegionAttachment_updateQuad ( RegionAttachment * self , Slot * slot , cocos2d : : ccV3F_C4B_T2F_Quad * quad , bool premultiplied = false ) ; <nl> <nl> } } / / namespace cocos2d { namespace extension { <nl> <nl> mmm a / samples / Cpp / TestCpp / Classes / SpineTest / SpineTest . cpp <nl> ppp b / samples / Cpp / TestCpp / Classes / SpineTest / SpineTest . cpp <nl> void SpineTestScene : : runThisTest ( ) <nl> bool SpineTestLayer : : init ( ) { <nl> if ( ! CCLayer : : init ( ) ) return false ; <nl> <nl> - skeletonNode = CCSkeleton : : createWithFile ( " spine / spineboy . json " , " spine / spineboy . atlas " ) ; <nl> - AnimationStateData_setMixByName ( skeletonNode - > state - > data , " walk " , " jump " , 0 . 4f ) ; <nl> - AnimationStateData_setMixByName ( skeletonNode - > state - > data , " jump " , " walk " , 0 . 4f ) ; <nl> - AnimationState_setAnimationByName ( skeletonNode - > state , " walk " , true ) ; <nl> + skeletonNode = CCSkeletonAnimation : : createWithFile ( " spine / spineboy . json " , " spine / spineboy . atlas " ) ; <nl> + skeletonNode - > setMix ( " walk " , " jump " , 0 . 4f ) ; <nl> + skeletonNode - > setMix ( " jump " , " walk " , 0 . 4f ) ; <nl> + skeletonNode - > setAnimation ( " walk " , true ) ; <nl> + <nl> skeletonNode - > timeScale = 0 . 3f ; <nl> skeletonNode - > debugBones = true ; <nl> <nl> bool SpineTestLayer : : init ( ) { <nl> } <nl> <nl> void SpineTestLayer : : update ( float deltaTime ) { <nl> - if ( skeletonNode - > state - > loop ) { <nl> - if ( skeletonNode - > state - > time > 2 ) AnimationState_setAnimationByName ( skeletonNode - > state , " jump " , false ) ; <nl> + if ( skeletonNode - > states [ 0 ] - > loop ) { <nl> + if ( skeletonNode - > states [ 0 ] - > time > 2 ) <nl> + skeletonNode - > setAnimation ( " jump " , false ) ; <nl> } else { <nl> - if ( skeletonNode - > state - > time > 1 ) AnimationState_setAnimationByName ( skeletonNode - > state , " walk " , true ) ; <nl> + if ( skeletonNode - > states [ 0 ] - > time > 1 ) <nl> + skeletonNode - > setAnimation ( " walk " , true ) ; <nl> } <nl> } <nl> mmm a / samples / Cpp / TestCpp / Classes / SpineTest / SpineTest . h <nl> ppp b / samples / Cpp / TestCpp / Classes / SpineTest / SpineTest . h <nl> class SpineTestScene : public TestScene <nl> <nl> class SpineTestLayer : public cocos2d : : CCLayer { <nl> private : <nl> - cocos2d : : extension : : CCSkeleton * skeletonNode ; <nl> + cocos2d : : extension : : CCSkeletonAnimation * skeletonNode ; <nl> <nl> public : <nl> <nl> mmm a / samples / Cpp / TestCpp / proj . ios / TestCpp . xcodeproj / project . pbxproj . REMOVED . git - id <nl> ppp b / samples / Cpp / TestCpp / proj . ios / TestCpp . xcodeproj / project . pbxproj . REMOVED . git - id <nl> @ @ - 1 + 1 @ @ <nl> - dd49000446b982d873b507d09499340ea5735400 <nl> \ No newline at end of file <nl> + 98559cffb0522f553d9f6cf6a4d990b1b9fb2e83 <nl> \ No newline at end of file <nl> mmm a / samples / Cpp / TestCpp / proj . mac / TestCpp . xcodeproj / project . pbxproj . REMOVED . git - id <nl> ppp b / samples / Cpp / TestCpp / proj . mac / TestCpp . xcodeproj / project . pbxproj . REMOVED . git - id <nl> @ @ - 1 + 1 @ @ <nl> - 647c786f7e749b31546a70420232f01ca7c89f6f <nl> \ No newline at end of file <nl> + bd72399cc616a35a20111d4a5bedc5c473e93e37 <nl> \ No newline at end of file <nl> | Merge pull request from dumganhar / update - spine - runtime - develop | cocos2d/cocos2d-x | b27524adfec6deebe5f19afd38f362fba32d1f18 | 2013-06-04T07:39:49Z |
mmm a / editor / editor_node . cpp <nl> ppp b / editor / editor_node . cpp <nl> EditorNode : : EditorNode ( ) { <nl> bottom_panel - > add_child ( bottom_panel_vb ) ; <nl> <nl> bottom_panel_hb = memnew ( HBoxContainer ) ; <nl> - bottom_panel_hb - > set_custom_minimum_size ( Size2 ( 0 , 24 ) ) ; / / Adjust for the height of the " Expand Bottom Dock " icon . <nl> + bottom_panel_hb - > set_custom_minimum_size ( Size2 ( 0 , 24 * EDSCALE ) ) ; / / Adjust for the height of the " Expand Bottom Dock " icon . <nl> bottom_panel_vb - > add_child ( bottom_panel_hb ) ; <nl> <nl> bottom_panel_hb_editors = memnew ( HBoxContainer ) ; <nl> mmm a / scene / gui / color_picker . cpp <nl> ppp b / scene / gui / color_picker . cpp <nl> <nl> # include " core / os / os . h " <nl> <nl> # ifdef TOOLS_ENABLED <nl> + # include " editor_scale . h " <nl> # include " editor_settings . h " <nl> # endif <nl> <nl> void ColorPicker : : _value_changed ( double ) { <nl> <nl> void ColorPicker : : _html_entered ( const String & p_html ) { <nl> <nl> - if ( updating ) <nl> + if ( updating | | text_is_constructor | | ! c_text - > is_visible ( ) ) <nl> return ; <nl> <nl> float last_alpha = color . a ; <nl> void ColorPicker : : _update_presets ( ) { <nl> } <nl> <nl> void ColorPicker : : _text_type_toggled ( ) { <nl> - if ( ! Engine : : get_singleton ( ) - > is_editor_hint ( ) ) <nl> - return ; <nl> + <nl> text_is_constructor = ! text_is_constructor ; <nl> if ( text_is_constructor ) { <nl> text_type - > set_text ( " " ) ; <nl> text_type - > set_icon ( get_icon ( " Script " , " EditorIcons " ) ) ; <nl> + <nl> + c_text - > set_editable ( false ) ; <nl> } else { <nl> text_type - > set_text ( " # " ) ; <nl> text_type - > set_icon ( NULL ) ; <nl> + <nl> + c_text - > set_editable ( true ) ; <nl> } <nl> _update_color ( ) ; <nl> } <nl> bool ColorPicker : : is_deferred_mode ( ) const { <nl> void ColorPicker : : _update_text_value ( ) { <nl> bool visible = true ; <nl> if ( text_is_constructor ) { <nl> - String t = " Color ( " + String : : num ( color . r ) + " , " + String : : num ( color . g ) + " , " + String : : num ( color . b ) ; <nl> + String t = " Color ( " + String : : num ( color . r ) + " , " + String : : num ( color . g ) + " , " + String : : num ( color . b ) ; <nl> if ( edit_alpha & & color . a < 1 ) <nl> - t + = ( " , " + String : : num ( color . a ) + " ) " ) ; <nl> + t + = " , " + String : : num ( color . a ) + " ) " ; <nl> else <nl> t + = " ) " ; <nl> c_text - > set_text ( t ) ; <nl> - } else { <nl> - if ( color . r > 1 | | color . g > 1 | | color . b > 1 | | color . r < 0 | | color . g < 0 | | color . b < 0 ) { <nl> - visible = false ; <nl> - } else { <nl> - c_text - > set_text ( color . to_html ( edit_alpha & & color . a < 1 ) ) ; <nl> - } <nl> } <nl> + <nl> + if ( color . r > 1 | | color . g > 1 | | color . b > 1 | | color . r < 0 | | color . g < 0 | | color . b < 0 ) { <nl> + visible = false ; <nl> + } else if ( ! text_is_constructor ) { <nl> + c_text - > set_text ( color . to_html ( edit_alpha & & color . a < 1 ) ) ; <nl> + } <nl> + <nl> + text_type - > set_visible ( visible ) ; <nl> c_text - > set_visible ( visible ) ; <nl> } <nl> <nl> ColorPicker : : ColorPicker ( ) : <nl> screen = NULL ; <nl> <nl> HBoxContainer * hb_smpl = memnew ( HBoxContainer ) ; <nl> - btn_pick = memnew ( ToolButton ) ; <nl> - btn_pick - > connect ( " pressed " , this , " _screen_pick_pressed " ) ; <nl> + add_child ( hb_smpl ) ; <nl> <nl> sample = memnew ( TextureRect ) ; <nl> + hb_smpl - > add_child ( sample ) ; <nl> sample - > set_h_size_flags ( SIZE_EXPAND_FILL ) ; <nl> sample - > connect ( " draw " , this , " _sample_draw " ) ; <nl> <nl> - hb_smpl - > add_child ( sample ) ; <nl> + btn_pick = memnew ( ToolButton ) ; <nl> hb_smpl - > add_child ( btn_pick ) ; <nl> - add_child ( hb_smpl ) ; <nl> + btn_pick - > set_toggle_mode ( true ) ; <nl> + btn_pick - > set_tooltip ( TTR ( " Pick a color from the screen . " ) ) ; <nl> + btn_pick - > connect ( " pressed " , this , " _screen_pick_pressed " ) ; <nl> <nl> HBoxContainer * hb_edit = memnew ( HBoxContainer ) ; <nl> + add_child ( hb_edit ) ; <nl> hb_edit - > set_v_size_flags ( SIZE_EXPAND_FILL ) ; <nl> <nl> uv_edit = memnew ( Control ) ; <nl> - <nl> + hb_edit - > add_child ( uv_edit ) ; <nl> uv_edit - > connect ( " gui_input " , this , " _uv_input " ) ; <nl> uv_edit - > set_mouse_filter ( MOUSE_FILTER_PASS ) ; <nl> uv_edit - > set_h_size_flags ( SIZE_EXPAND_FILL ) ; <nl> ColorPicker : : ColorPicker ( ) : <nl> uv_edit - > set_custom_minimum_size ( Size2 ( get_constant ( " sv_width " ) , get_constant ( " sv_height " ) ) ) ; <nl> uv_edit - > connect ( " draw " , this , " _hsv_draw " , make_binds ( 0 , uv_edit ) ) ; <nl> <nl> - add_child ( hb_edit ) ; <nl> - <nl> w_edit = memnew ( Control ) ; <nl> + hb_edit - > add_child ( w_edit ) ; <nl> w_edit - > set_custom_minimum_size ( Size2 ( get_constant ( " h_width " ) , 0 ) ) ; <nl> w_edit - > set_h_size_flags ( SIZE_FILL ) ; <nl> w_edit - > set_v_size_flags ( SIZE_EXPAND_FILL ) ; <nl> w_edit - > connect ( " gui_input " , this , " _w_input " ) ; <nl> w_edit - > connect ( " draw " , this , " _hsv_draw " , make_binds ( 1 , w_edit ) ) ; <nl> <nl> - hb_edit - > add_child ( uv_edit ) ; <nl> - hb_edit - > add_child ( memnew ( VSeparator ) ) ; <nl> - hb_edit - > add_child ( w_edit ) ; <nl> - <nl> VBoxContainer * vbl = memnew ( VBoxContainer ) ; <nl> add_child ( vbl ) ; <nl> <nl> ColorPicker : : ColorPicker ( ) : <nl> } <nl> <nl> HBoxContainer * hhb = memnew ( HBoxContainer ) ; <nl> + vbr - > add_child ( hhb ) ; <nl> <nl> btn_mode = memnew ( CheckButton ) ; <nl> + hhb - > add_child ( btn_mode ) ; <nl> btn_mode - > set_text ( TTR ( " Raw Mode " ) ) ; <nl> btn_mode - > connect ( " toggled " , this , " set_raw_mode " ) ; <nl> - hhb - > add_child ( btn_mode ) ; <nl> - vbr - > add_child ( hhb ) ; <nl> + <nl> text_type = memnew ( Button ) ; <nl> - text_type - > set_flat ( true ) ; <nl> - text_type - > connect ( " pressed " , this , " _text_type_toggled " ) ; <nl> hhb - > add_child ( text_type ) ; <nl> + text_type - > set_text ( " # " ) ; <nl> + text_type - > set_tooltip ( TTR ( " Switch between hexadecimal and code values . " ) ) ; <nl> + if ( Engine : : get_singleton ( ) - > is_editor_hint ( ) ) { <nl> + <nl> + # ifdef TOOLS_ENABLED <nl> + text_type - > set_custom_minimum_size ( Size2 ( 28 * EDSCALE , 0 ) ) ; / / Adjust for the width of the " Script " icon . <nl> + # endif <nl> + text_type - > connect ( " pressed " , this , " _text_type_toggled " ) ; <nl> + } else { <nl> + <nl> + text_type - > set_flat ( true ) ; <nl> + text_type - > set_mouse_filter ( MOUSE_FILTER_IGNORE ) ; <nl> + } <nl> <nl> c_text = memnew ( LineEdit ) ; <nl> hhb - > add_child ( c_text ) ; <nl> + c_text - > set_h_size_flags ( SIZE_EXPAND_FILL ) ; <nl> c_text - > connect ( " text_entered " , this , " _html_entered " ) ; <nl> c_text - > connect ( " focus_entered " , this , " _focus_enter " ) ; <nl> c_text - > connect ( " focus_exited " , this , " _html_focus_exit " ) ; <nl> <nl> - text_type - > set_text ( " # " ) ; <nl> - c_text - > set_h_size_flags ( SIZE_EXPAND_FILL ) ; <nl> - <nl> _update_controls ( ) ; <nl> updating = false ; <nl> <nl> set_pick_color ( Color ( 1 , 1 , 1 ) ) ; <nl> <nl> + add_child ( memnew ( HSeparator ) ) ; <nl> + <nl> HBoxContainer * bbc = memnew ( HBoxContainer ) ; <nl> add_child ( bbc ) ; <nl> <nl> ColorPicker : : ColorPicker ( ) : <nl> preset - > connect ( " draw " , this , " _update_presets " ) ; <nl> <nl> bt_add_preset = memnew ( Button ) ; <nl> + bbc - > add_child ( bt_add_preset ) ; <nl> bt_add_preset - > set_tooltip ( TTR ( " Add current color as a preset " ) ) ; <nl> bt_add_preset - > connect ( " pressed " , this , " _add_preset_pressed " ) ; <nl> - bbc - > add_child ( bt_add_preset ) ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / <nl> void ColorPickerButton : : _update_picker ( ) { <nl> add_child ( popup ) ; <nl> picker - > connect ( " color_changed " , this , " _color_changed " ) ; <nl> popup - > connect ( " modal_closed " , this , " _modal_closed " ) ; <nl> + popup - > connect ( " about_to_show " , this , " set_pressed " , varray ( true ) ) ; <nl> + popup - > connect ( " popup_hide " , this , " set_pressed " , varray ( false ) ) ; <nl> picker - > set_pick_color ( color ) ; <nl> picker - > set_edit_alpha ( edit_alpha ) ; <nl> } <nl> void ColorPickerButton : : _bind_methods ( ) { <nl> <nl> ColorPickerButton : : ColorPickerButton ( ) { <nl> <nl> - / / Initialization is now done deferred <nl> - / / this improves performance in the inspector as the color picker <nl> - / / can be expensive to initialize <nl> + / / Initialization is now done deferred , <nl> + / / this improves performance in the inspector as the color picker <nl> + / / can be expensive to initialize . <nl> picker = NULL ; <nl> popup = NULL ; <nl> edit_alpha = true ; <nl> + <nl> + set_toggle_mode ( true ) ; <nl> } <nl> | Merge pull request from YeldhamDev / color_picker_changes | godotengine/godot | 4cfce9995548c876c496c7382b3985fd1217685d | 2019-01-27T17:37:48Z |
mmm a / src / builtins / array - of . tq <nl> ppp b / src / builtins / array - of . tq <nl> module array { <nl> } <nl> <nl> / / 8 . Perform ? Set ( A , " length " , len , true ) . <nl> - SetProperty ( a , kLengthString , len ) ; <nl> + SetPropertyLength ( a , len ) ; <nl> <nl> / / 9 . Return A . <nl> return a ; <nl> mmm a / src / builtins / array . tq <nl> ppp b / src / builtins / array . tq <nl> module array { <nl> StoreArrayHole ( newElements , to ) ; <nl> } <nl> } <nl> + <nl> + extern macro SetPropertyLength ( implicit context : Context ) ( Object , Number ) ; <nl> } <nl> mmm a / src / builtins / builtins - array - gen . cc <nl> ppp b / src / builtins / builtins - array - gen . cc <nl> class ArrayPopulatorAssembler : public CodeStubAssembler { <nl> BIND ( & done ) ; <nl> return array . value ( ) ; <nl> } <nl> - <nl> - void GenerateSetLength ( TNode < Context > context , TNode < Object > array , <nl> - TNode < Number > length ) { <nl> - Label fast ( this ) , runtime ( this ) , done ( this ) ; <nl> - / / There ' s no need to set the length , if <nl> - / / 1 ) the array is a fast JS array and <nl> - / / 2 ) the new length is equal to the old length . <nl> - / / as the set is not observable . Otherwise fall back to the run - time . <nl> - <nl> - / / 1 ) Check that the array has fast elements . <nl> - / / TODO ( delphick ) : Consider changing this since it does an an unnecessary <nl> - / / check for SMIs . <nl> - / / TODO ( delphick ) : Also we could hoist this to after the array construction <nl> - / / and copy the args into array in the same way as the Array constructor . <nl> - BranchIfFastJSArray ( array , context , & fast , & runtime ) ; <nl> - <nl> - BIND ( & fast ) ; <nl> - { <nl> - TNode < JSArray > fast_array = CAST ( array ) ; <nl> - <nl> - TNode < Smi > length_smi = CAST ( length ) ; <nl> - TNode < Smi > old_length = LoadFastJSArrayLength ( fast_array ) ; <nl> - CSA_ASSERT ( this , TaggedIsPositiveSmi ( old_length ) ) ; <nl> - <nl> - / / 2 ) If the created array ' s length matches the required length , then <nl> - / / there ' s nothing else to do . Otherwise use the runtime to set the <nl> - / / property as that will insert holes into excess elements or shrink <nl> - / / the backing store as appropriate . <nl> - Branch ( SmiNotEqual ( length_smi , old_length ) , & runtime , & done ) ; <nl> - } <nl> - <nl> - BIND ( & runtime ) ; <nl> - { <nl> - SetPropertyStrict ( context , array , <nl> - CodeStubAssembler : : LengthStringConstant ( ) , length ) ; <nl> - Goto ( & done ) ; <nl> - } <nl> - <nl> - BIND ( & done ) ; <nl> - } <nl> } ; <nl> <nl> / / ES # sec - array . from <nl> TF_BUILTIN ( ArrayFrom , ArrayPopulatorAssembler ) { <nl> BIND ( & finished ) ; <nl> <nl> / / Finally set the length on the output and return it . <nl> - GenerateSetLength ( context , array . value ( ) , length . value ( ) ) ; <nl> + SetPropertyLength ( context , array . value ( ) , length . value ( ) ) ; <nl> args . PopAndReturn ( array . value ( ) ) ; <nl> } <nl> <nl> mmm a / src / code - stub - assembler . cc <nl> ppp b / src / code - stub - assembler . cc <nl> TNode < JSArray > CodeStubAssembler : : ArrayCreate ( TNode < Context > context , <nl> return array . value ( ) ; <nl> } <nl> <nl> + void CodeStubAssembler : : SetPropertyLength ( TNode < Context > context , <nl> + TNode < Object > array , <nl> + TNode < Number > length ) { <nl> + Label fast ( this ) , runtime ( this ) , done ( this ) ; <nl> + / / There ' s no need to set the length , if <nl> + / / 1 ) the array is a fast JS array and <nl> + / / 2 ) the new length is equal to the old length . <nl> + / / as the set is not observable . Otherwise fall back to the run - time . <nl> + <nl> + / / 1 ) Check that the array has fast elements . <nl> + / / TODO ( delphick ) : Consider changing this since it does an an unnecessary <nl> + / / check for SMIs . <nl> + / / TODO ( delphick ) : Also we could hoist this to after the array construction <nl> + / / and copy the args into array in the same way as the Array constructor . <nl> + BranchIfFastJSArray ( array , context , & fast , & runtime ) ; <nl> + <nl> + BIND ( & fast ) ; <nl> + { <nl> + TNode < JSArray > fast_array = CAST ( array ) ; <nl> + <nl> + TNode < Smi > length_smi = CAST ( length ) ; <nl> + TNode < Smi > old_length = LoadFastJSArrayLength ( fast_array ) ; <nl> + CSA_ASSERT ( this , TaggedIsPositiveSmi ( old_length ) ) ; <nl> + <nl> + / / 2 ) If the created array ' s length matches the required length , then <nl> + / / there ' s nothing else to do . Otherwise use the runtime to set the <nl> + / / property as that will insert holes into excess elements or shrink <nl> + / / the backing store as appropriate . <nl> + Branch ( SmiNotEqual ( length_smi , old_length ) , & runtime , & done ) ; <nl> + } <nl> + <nl> + BIND ( & runtime ) ; <nl> + { <nl> + SetPropertyStrict ( context , array , CodeStubAssembler : : LengthStringConstant ( ) , <nl> + length ) ; <nl> + Goto ( & done ) ; <nl> + } <nl> + <nl> + BIND ( & done ) ; <nl> + } <nl> + <nl> } / / namespace internal <nl> } / / namespace v8 <nl> mmm a / src / code - stub - assembler . h <nl> ppp b / src / code - stub - assembler . h <nl> class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler : : CodeAssembler { <nl> <nl> void PerformStackCheck ( TNode < Context > context ) ; <nl> <nl> + void SetPropertyLength ( TNode < Context > context , TNode < Object > array , <nl> + TNode < Number > length ) ; <nl> + <nl> protected : <nl> / / Implements DescriptorArray : : Search ( ) . <nl> void DescriptorLookup ( SloppyTNode < Name > unique_name , <nl> | [ builtins ] fix Array . of performance regression | v8/v8 | d63fb52095980d28b5d9faa8388bbde8497c5663 | 2018-10-31T20:56:28Z |
mmm a / ios / sdk / WeexSDK / Sources / Component / WXTextComponent . m <nl> ppp b / ios / sdk / WeexSDK / Sources / Component / WXTextComponent . m <nl> - ( void ) dealloc <nl> if ( _needsRemoveObserver ) { <nl> [ [ NSNotificationCenter defaultCenter ] removeObserver : self name : WX_ICONFONT_DOWNLOAD_NOTIFICATION object : nil ] ; <nl> } <nl> - _ctAttributedString = nil ; <nl> pthread_mutex_destroy ( & _ctAttributedStringMutex ) ; <nl> pthread_mutexattr_destroy ( & _propertMutexAttr ) ; <nl> } <nl> - ( void ) fillAttributes : ( NSDictionary * ) attributes <nl> - ( void ) setNeedsRepaint <nl> { <nl> _textStorage = nil ; <nl> + <nl> + pthread_mutex_lock ( & ( _ctAttributedStringMutex ) ) ; <nl> _ctAttributedString = nil ; <nl> + pthread_mutex_unlock ( & ( _ctAttributedStringMutex ) ) ; <nl> + <nl> } <nl> <nl> # pragma mark - Subclass <nl> | * [ ios ] try to fix attributedString crash | apache/incubator-weex | 99086f301c9a782ba50df4be9ea3fe22ad070581 | 2017-08-10T06:30:09Z |
mmm a / addons / resource . language . en_gb / resources / strings . po <nl> ppp b / addons / resource . language . en_gb / resources / strings . po <nl> msgctxt " # 19188 " <nl> msgid " All your guide data will be cleared . Are you sure ? " <nl> msgstr " " <nl> <nl> - # . pvr settings " automatically start last played channel after Kodi startup " setting label <nl> - # : system / settings / settings . xml <nl> - msgctxt " # 19189 " <nl> - msgid " Continue last channel on startup " <nl> - msgstr " " <nl> - <nl> - # . pvr settings " automatically start last played channel after Kodi startup " setting value <nl> - # : system / settings / settings . xml <nl> - msgctxt " # 19190 " <nl> - msgid " Background " <nl> - msgstr " " <nl> + # empty string from id 19189 to 19190 <nl> <nl> # : addons / skin . estuary / xml / SettingsSystemInfo . xml <nl> msgctxt " # 19191 " <nl> msgctxt " # 19287 " <nl> msgid " All channels " <nl> msgstr " " <nl> <nl> - # . pvr settings ' continue last channel on startup ' setting value label . <nl> - # : system / settings / settings . xml <nl> - msgctxt " # 19288 " <nl> - msgid " Foreground " <nl> - msgstr " " <nl> + # empty string with id 19288 <nl> <nl> # . Label for button to hide a group in the group manager <nl> # : addons / skin . estuary / xml / DialogPVRGroupManager . xml <nl> msgctxt " # 36160 " <nl> msgid " Enable VTB hardware decoding of video files . " <nl> msgstr " " <nl> <nl> - # empty string with id 36161 <nl> + # . Description of setting with label # 508 " Startup action " <nl> + # : system / settings / settings . xml <nl> + msgctxt " # 36161 " <nl> + msgid " Select an action Kodi will perform on startup . " <nl> + msgstr " " <nl> <nl> # . Description of setting with label # 13432 " Allow hardware acceleration ( VideoToolbox ) " <nl> # : system / settings / settings . xml <nl> msgctxt " # 36227 " <nl> msgid " Switch to full screen display when starting playback of channels and recordings . " <nl> msgstr " " <nl> <nl> - # : system / settings / settings . xml <nl> - msgctxt " # 36228 " <nl> - msgid " Continue with the last viewed channel on startup . " <nl> - msgstr " " <nl> + # empty string with id 36228 <nl> <nl> # : system / settings / settings . xml <nl> msgctxt " # 36229 " <nl> msgstr " " <nl> msgctxt " # 39024 " <nl> msgid " Dependencies " <nl> msgstr " " <nl> + <nl> + # empty strings from id 39025 to 39099 <nl> + <nl> + # : system / settings / settings . xml <nl> + msgctxt " # 39100 " <nl> + msgid " Perform on startup " <nl> + msgstr " " <nl> + <nl> + # . Startup action option label <nl> + # : system / settings / settings . xml <nl> + msgctxt " # 39101 " <nl> + msgid " Play TV " <nl> + msgstr " " <nl> + <nl> + # : system / settings / settings . xml <nl> + msgctxt " # 39102 " <nl> + msgid " Play Radio " <nl> + msgstr " " <nl> mmm a / system / settings / settings . xml <nl> ppp b / system / settings / settings . xml <nl> <nl> < default > true < / default > <nl> < control type = " toggle " / > <nl> < / setting > <nl> - < setting id = " pvrplayback . startlast " type = " integer " label = " 19189 " help = " 36228 " > <nl> - < level > 1 < / level > <nl> - < default > 0 < / default > < ! - - CONTINUE_LAST_CHANNEL_OFF - - > <nl> - < constraints > <nl> - < options > <nl> - < option label = " 106 " > 0 < / option > < ! - - CONTINUE_LAST_CHANNEL_OFF - - > <nl> - < option label = " 19190 " > 1 < / option > < ! - - CONTINUE_LAST_CHANNEL_IN_BACKGROUND - - > <nl> - < option label = " 19288 " > 2 < / option > < ! - - CONTINUE_LAST_CHANNEL_IN_FOREGROUND - - > <nl> - < / options > <nl> - < / constraints > <nl> - < control type = " list " format = " string " / > <nl> - < / setting > <nl> < setting id = " pvrplayback . signalquality " type = " boolean " label = " 19037 " help = " 36229 " > <nl> < level > 2 < / level > <nl> < default > true < / default > <nl> <nl> < / category > <nl> < category id = " other " label = " 14224 " help = " 36292 " > <nl> < group id = " 2 " label = " 14227 " > <nl> + < setting id = " lookandfeel . startupaction " type = " integer " label = " 39100 " help = " 36161 " > <nl> + < level > 0 < / level > <nl> + < default > 0 < / default > < ! - - STARTUP_ACTION_NONE - - > <nl> + < constraints > <nl> + < options > <nl> + < option label = " 231 " > 0 < / option > < ! - - STARTUP_ACTION_NONE - - > <nl> + < option label = " 39101 " > 1 < / option > < ! - - STARTUP_ACTION_PLAY_TV - - > <nl> + < option label = " 39102 " > 2 < / option > < ! - - STARTUP_ACTION_PLAY_RADIO - - > <nl> + < / options > <nl> + < / constraints > <nl> + < control type = " list " format = " string " / > <nl> + < / setting > <nl> < setting id = " lookandfeel . startupwindow " type = " integer " label = " 512 " help = " 36109 " > <nl> < level > 0 < / level > <nl> < default > 10000 < / default > < ! - - WINDOW_HOME - - > <nl> mmm a / xbmc / Application . h <nl> ppp b / xbmc / Application . h <nl> struct ReplayGainSettings <nl> bool bAvoidClipping ; <nl> } ; <nl> <nl> + enum StartupAction <nl> + { <nl> + STARTUP_ACTION_NONE = 0 , <nl> + STARTUP_ACTION_PLAY_TV , <nl> + STARTUP_ACTION_PLAY_RADIO <nl> + } ; <nl> + <nl> class CApplication : public CXBApplicationEx , public IPlayerCallback , public IMsgTargetCallback , <nl> public ISettingCallback , public ISettingsHandler , public ISubSettings , <nl> public KODI : : MESSAGING : : IMessageTarget <nl> mmm a / xbmc / pvr / PVRGUIActions . cpp <nl> ppp b / xbmc / pvr / PVRGUIActions . cpp <nl> namespace PVR <nl> CPVRGUIActions : : CPVRGUIActions ( ) <nl> : m_bChannelScanRunning ( false ) , <nl> m_settings ( { <nl> + CSettings : : SETTING_LOOKANDFEEL_STARTUPACTION , <nl> CSettings : : SETTING_PVRRECORD_INSTANTRECORDTIME , <nl> CSettings : : SETTING_PVRRECORD_INSTANTRECORDACTION , <nl> CSettings : : SETTING_PVRPLAYBACK_SWITCHTOFULLSCREEN , <nl> - CSettings : : SETTING_PVRPLAYBACK_STARTLAST , <nl> CSettings : : SETTING_PVRPARENTAL_PIN , <nl> CSettings : : SETTING_PVRPARENTAL_ENABLED <nl> } ) <nl> namespace PVR <nl> return false ; <nl> } <nl> <nl> - bool CPVRGUIActions : : ContinueLastPlayedChannel ( ) const <nl> + bool CPVRGUIActions : : PlayChannelOnStartup ( ) const <nl> { <nl> - const CFileItemPtr item ( CServiceBroker : : GetPVRManager ( ) . ChannelGroups ( ) - > GetLastPlayedChannel ( ) ) ; <nl> - const CPVRChannelPtr channel ( item ? item - > GetPVRChannelInfoTag ( ) : CPVRChannelPtr ( ) ) ; <nl> - bool bWasPlaying = false ; <nl> - if ( channel ) <nl> - { <nl> - / / Obtain previous ' was playing on last app quit ' flag and reset it , then . <nl> - channel - > SetWasPlayingOnLastQuit ( false , bWasPlaying ) ; <nl> - } <nl> - <nl> - int iPlayMode = m_settings . GetIntValue ( CSettings : : SETTING_PVRPLAYBACK_STARTLAST ) ; <nl> - if ( iPlayMode = = CONTINUE_LAST_CHANNEL_OFF ) <nl> + int iAction = m_settings . GetIntValue ( CSettings : : SETTING_LOOKANDFEEL_STARTUPACTION ) ; <nl> + if ( iAction ! = STARTUP_ACTION_PLAY_TV & & <nl> + iAction ! = STARTUP_ACTION_PLAY_RADIO ) <nl> return false ; <nl> <nl> - / / Only switch to the channel if it was playing on last app quit . <nl> - if ( bWasPlaying ) <nl> + bool playTV = iAction = = STARTUP_ACTION_PLAY_TV ; <nl> + const CPVRChannelGroupsContainerPtr groups ( CServiceBroker : : GetPVRManager ( ) . ChannelGroups ( ) ) ; <nl> + CPVRChannelGroupPtr group = playTV ? groups - > GetGroupAllTV ( ) : groups - > GetGroupAllRadio ( ) ; <nl> + <nl> + / / get the last played channel or fallback to first channel <nl> + CFileItemPtr item ( group - > GetLastPlayedChannel ( ) ) ; <nl> + if ( item - > HasPVRChannelInfoTag ( ) ) <nl> + { <nl> + group = groups - > GetLastPlayedGroup ( item - > GetPVRChannelInfoTag ( ) - > ChannelID ( ) ) ; <nl> + } <nl> + else <nl> { <nl> - CLog : : Log ( LOGNOTICE , " PVRGUIActions - % s - continue playback on channel ' % s ' " , __FUNCTION__ , channel - > ChannelName ( ) . c_str ( ) ) ; <nl> - CServiceBroker : : GetPVRManager ( ) . SetPlayingGroup ( CServiceBroker : : GetPVRManager ( ) . ChannelGroups ( ) - > GetLastPlayedGroup ( channel - > ChannelID ( ) ) ) ; <nl> - return SwitchToChannel ( item , true , iPlayMode = = CONTINUE_LAST_CHANNEL_IN_FOREGROUND ) ; <nl> + / / fallback to first channel <nl> + auto channels ( group - > GetMembers ( ) ) ; <nl> + if ( channels . empty ( ) ) <nl> + return false ; <nl> + <nl> + item = std : : make_shared < CFileItem > ( channels . front ( ) . channel ) ; <nl> } <nl> <nl> - return false ; <nl> + CLog : : Log ( LOGNOTICE , " PVRGUIActions - % s - start playback of channel ' % s ' " , __FUNCTION__ , item - > GetPVRChannelInfoTag ( ) - > ChannelName ( ) . c_str ( ) ) ; <nl> + CServiceBroker : : GetPVRManager ( ) . SetPlayingGroup ( group ) ; <nl> + return SwitchToChannel ( item , true , true ) ; <nl> } <nl> <nl> bool CPVRGUIActions : : PlayMedia ( const CFileItemPtr & item ) const <nl> mmm a / xbmc / pvr / PVRGUIActions . h <nl> ppp b / xbmc / pvr / PVRGUIActions . h <nl> namespace PVR <nl> bool SwitchToChannel ( PlaybackType type ) const ; <nl> <nl> / * ! <nl> - * @ brief Continue playback of the last played channel . <nl> - * @ return True if playback was continued , false otherwise . <nl> + * @ brief Plays the last played channel or the first channel of TV or Radio on startup . <nl> + * @ return True if playback was started , false otherwise . <nl> * / <nl> - bool ContinueLastPlayedChannel ( ) const ; <nl> + bool PlayChannelOnStartup ( ) const ; <nl> <nl> / * ! <nl> * @ brief Hide a channel , always showing a confirmation dialog . <nl> mmm a / xbmc / pvr / PVRJobs . cpp <nl> ppp b / xbmc / pvr / PVRJobs . cpp <nl> bool CPVRChannelEntryTimeoutJob : : DoWork ( ) <nl> return false ; <nl> } <nl> <nl> - bool CPVRContinueLastChannelJob : : DoWork ( ) <nl> + bool CPVRPlayChannelOnStartupJob : : DoWork ( ) <nl> { <nl> - return CServiceBroker : : GetPVRManager ( ) . GUIActions ( ) - > ContinueLastPlayedChannel ( ) ; <nl> + return CServiceBroker : : GetPVRManager ( ) . GUIActions ( ) - > PlayChannelOnStartup ( ) ; <nl> } <nl> <nl> CPVREventlogJob : : CPVREventlogJob ( bool bNotifyUser , bool bError , const std : : string & label , const std : : string & msg , const std : : string & icon ) <nl> mmm a / xbmc / pvr / PVRJobs . h <nl> ppp b / xbmc / pvr / PVRJobs . h <nl> namespace PVR <nl> bool m_bOnOff ; <nl> } ; <nl> <nl> - class CPVRContinueLastChannelJob : public CJob <nl> + class CPVRPlayChannelOnStartupJob : public CJob <nl> { <nl> public : <nl> - CPVRContinueLastChannelJob ( ) = default ; <nl> - ~ CPVRContinueLastChannelJob ( ) override = default ; <nl> - const char * GetType ( ) const override { return " pvr - continue - last - channel - job " ; } <nl> + CPVRPlayChannelOnStartupJob ( ) = default ; <nl> + ~ CPVRPlayChannelOnStartupJob ( ) override = default ; <nl> + const char * GetType ( ) const override { return " pvr - play - channel - on - startup " ; } <nl> <nl> bool DoWork ( ) override ; <nl> } ; <nl> mmm a / xbmc / pvr / PVRManager . cpp <nl> ppp b / xbmc / pvr / PVRManager . cpp <nl> void CPVRManager : : Process ( void ) <nl> / * start job to search for missing channel icons * / <nl> TriggerSearchMissingChannelIcons ( ) ; <nl> <nl> - / * try to continue last watched channel * / <nl> - TriggerContinueLastChannel ( ) ; <nl> + / * try to play channel on startup * / <nl> + TriggerPlayChannelOnStartup ( ) ; <nl> } <nl> / * execute the next pending jobs if there are any * / <nl> try <nl> void CPVRManager : : OnWake ( ) <nl> / * start job to search for missing channel icons * / <nl> TriggerSearchMissingChannelIcons ( ) ; <nl> <nl> - / * continue last watched channel * / <nl> - TriggerContinueLastChannel ( ) ; <nl> + / * try to play channel on startup * / <nl> + TriggerPlayChannelOnStartup ( ) ; <nl> <nl> / * trigger PVR data updates * / <nl> TriggerChannelGroupsUpdate ( ) ; <nl> CGUIDialogProgressBarHandle * CPVRManager : : ShowProgressDialog ( const std : : string & <nl> } <nl> <nl> <nl> - void CPVRManager : : TriggerContinueLastChannel ( void ) <nl> + void CPVRManager : : TriggerPlayChannelOnStartup ( void ) <nl> { <nl> if ( IsStarted ( ) ) <nl> - CJobManager : : GetInstance ( ) . AddJob ( new CPVRContinueLastChannelJob ( ) , nullptr ) ; <nl> + CJobManager : : GetInstance ( ) . AddJob ( new CPVRPlayChannelOnStartupJob ( ) , nullptr ) ; <nl> } <nl> <nl> bool CPVRManager : : IsPlaying ( void ) const <nl> mmm a / xbmc / pvr / PVRManager . h <nl> ppp b / xbmc / pvr / PVRManager . h <nl> namespace PVR <nl> class CPVRClient ; <nl> class CPVRGUIInfo ; <nl> <nl> - enum ContinueLastChannelOnStartup <nl> - { <nl> - CONTINUE_LAST_CHANNEL_OFF = 0 , <nl> - CONTINUE_LAST_CHANNEL_IN_BACKGROUND , <nl> - CONTINUE_LAST_CHANNEL_IN_FOREGROUND <nl> - } ; <nl> - <nl> class CPVRManagerJobQueue <nl> { <nl> public : <nl> namespace PVR <nl> / * ! <nl> * @ brief Continue playback on the last played channel . <nl> * / <nl> - void TriggerContinueLastChannel ( void ) ; <nl> + void TriggerPlayChannelOnStartup ( void ) ; <nl> <nl> enum ManagerState <nl> { <nl> mmm a / xbmc / settings / Settings . cpp <nl> ppp b / xbmc / settings / Settings . cpp <nl> const std : : string CSettings : : SETTING_LOOKANDFEEL_SKINTHEME = " lookandfeel . skinth <nl> const std : : string CSettings : : SETTING_LOOKANDFEEL_SKINCOLORS = " lookandfeel . skincolors " ; <nl> const std : : string CSettings : : SETTING_LOOKANDFEEL_FONT = " lookandfeel . font " ; <nl> const std : : string CSettings : : SETTING_LOOKANDFEEL_SKINZOOM = " lookandfeel . skinzoom " ; <nl> + const std : : string CSettings : : SETTING_LOOKANDFEEL_STARTUPACTION = " lookandfeel . startupaction " ; <nl> const std : : string CSettings : : SETTING_LOOKANDFEEL_STARTUPWINDOW = " lookandfeel . startupwindow " ; <nl> const std : : string CSettings : : SETTING_LOOKANDFEEL_SOUNDSKIN = " lookandfeel . soundskin " ; <nl> const std : : string CSettings : : SETTING_LOOKANDFEEL_ENABLERSSFEEDS = " lookandfeel . enablerssfeeds " ; <nl> const std : : string CSettings : : SETTING_EPG_PREVENTUPDATESWHILEPLAYINGTV = " epg . pre <nl> const std : : string CSettings : : SETTING_EPG_IGNOREDBFORCLIENT = " epg . ignoredbforclient " ; <nl> const std : : string CSettings : : SETTING_EPG_RESETEPG = " epg . resetepg " ; <nl> const std : : string CSettings : : SETTING_PVRPLAYBACK_SWITCHTOFULLSCREEN = " pvrplayback . switchtofullscreen " ; <nl> - const std : : string CSettings : : SETTING_PVRPLAYBACK_STARTLAST = " pvrplayback . startlast " ; <nl> const std : : string CSettings : : SETTING_PVRPLAYBACK_SIGNALQUALITY = " pvrplayback . signalquality " ; <nl> const std : : string CSettings : : SETTING_PVRPLAYBACK_SCANTIME = " pvrplayback . scantime " ; <nl> const std : : string CSettings : : SETTING_PVRPLAYBACK_CONFIRMCHANNELSWITCH = " pvrplayback . confirmchannelswitch " ; <nl> mmm a / xbmc / settings / Settings . h <nl> ppp b / xbmc / settings / Settings . h <nl> class CSettings : public CSettingsBase , public CSettingCreator , public CSettingC <nl> static const std : : string SETTING_LOOKANDFEEL_SKINCOLORS ; <nl> static const std : : string SETTING_LOOKANDFEEL_FONT ; <nl> static const std : : string SETTING_LOOKANDFEEL_SKINZOOM ; <nl> + static const std : : string SETTING_LOOKANDFEEL_STARTUPACTION ; <nl> static const std : : string SETTING_LOOKANDFEEL_STARTUPWINDOW ; <nl> static const std : : string SETTING_LOOKANDFEEL_SOUNDSKIN ; <nl> static const std : : string SETTING_LOOKANDFEEL_ENABLERSSFEEDS ; <nl> class CSettings : public CSettingsBase , public CSettingCreator , public CSettingC <nl> static const std : : string SETTING_EPG_IGNOREDBFORCLIENT ; <nl> static const std : : string SETTING_EPG_RESETEPG ; <nl> static const std : : string SETTING_PVRPLAYBACK_SWITCHTOFULLSCREEN ; <nl> - static const std : : string SETTING_PVRPLAYBACK_STARTLAST ; <nl> static const std : : string SETTING_PVRPLAYBACK_SIGNALQUALITY ; <nl> static const std : : string SETTING_PVRPLAYBACK_SCANTIME ; <nl> static const std : : string SETTING_PVRPLAYBACK_CONFIRMCHANNELSWITCH ; <nl> | [ settings ] introduce new setting ' Startup action ' and add ' Play TV / Radio ' | xbmc/xbmc | a96ed68ef596954c6a9357ec2bda0a6d6965cca1 | 2017-07-24T08:02:04Z |
mmm a / tests / string_test . cpp <nl> ppp b / tests / string_test . cpp <nl> void test_strings ( ) { <nl> ok ( ! strcmp ( " foo " , foo ) , " foo matches " ) ; <nl> } <nl> <nl> - ok ( w_string ( ) . empty ( ) , " nullptr string is empty " ) ; <nl> - ok ( w_string : : build ( " " ) . empty ( ) , " empty string is empty " ) ; <nl> + { <nl> + w_string defaultStr ; <nl> + ok ( defaultStr . empty ( ) , " default constructed string should be empty " ) ; <nl> + <nl> + w_string nullStr ( nullptr ) ; <nl> + ok ( nullStr . empty ( ) , " nullptr string should be empty " ) ; <nl> + <nl> + ok ( w_string_piece ( ) . empty ( ) , <nl> + " default constructed string piece shouldbe empty " ) ; <nl> + <nl> + ok ( w_string_piece ( nullptr ) . empty ( ) , <nl> + " nullptr string piece shouldbe empty " ) ; <nl> + <nl> + ok ( w_string : : build ( " " ) . empty ( ) , " empty string is empty " ) ; <nl> + } <nl> } <nl> <nl> void test_pointers ( ) { <nl> void test_path_equal ( ) { <nl> <nl> int main ( int , char * * ) { <nl> plan_tests ( <nl> - 83 <nl> + 86 <nl> # ifdef _WIN32 <nl> / / extra basename tests <nl> + 6 <nl> mmm a / watchman_string . h <nl> ppp b / watchman_string . h <nl> class w_string_piece { <nl> } <nl> <nl> inline bool empty ( ) const { <nl> - return size ( ) = = 0 ; <nl> + return e_ = = s_ ; <nl> } <nl> <nl> inline size_t size ( ) const { <nl> | add w_string : : empty and use it estimateSizeNeeded / toAppend | facebook/watchman | f35c1ff8e257ad5fe333646f0815a272668551c6 | 2017-06-19T21:06:57Z |
mmm a / tools / run_tests / run_xds_tests . py <nl> ppp b / tools / run_tests / run_xds_tests . py <nl> def __init__ ( self , compute , alpha_compute , project ) : <nl> server_uri = service_host_name + ' : ' + str ( gcp . service_port ) <nl> if args . xds_v3_support : <nl> client_env [ ' GRPC_XDS_EXPERIMENTAL_V3_SUPPORT ' ] = ' true ' <nl> - bootstrap_server_features . append ( ' xds_v3 ' ) <nl> + bootstrap_server_features . append ( ' xds_v3 ' ) <nl> if args . bootstrap_file : <nl> bootstrap_path = os . path . abspath ( args . bootstrap_file ) <nl> else : <nl> | fix format | grpc/grpc | a13f304b9f47c3865c0649ab2b14c25e12cc4b4b | 2020-08-14T19:46:29Z |
mmm a / xbmc / addons / AddonDatabase . cpp <nl> ppp b / xbmc / addons / AddonDatabase . cpp <nl> bool CAddonDatabase : : BreakAddon ( const std : : string & addonID , const std : : string & r <nl> addonID . c_str ( ) , reason . c_str ( ) ) ) ; <nl> } <nl> <nl> - bool CAddonDatabase : : HasAddon ( const std : : string & addonID ) <nl> - { <nl> - std : : string strWhereClause = PrepareSQL ( " addonID = ' % s ' " , addonID . c_str ( ) ) ; <nl> - std : : string strHasAddon = GetSingleValue ( " addon " , " id " , strWhereClause ) ; <nl> - <nl> - return ! strHasAddon . empty ( ) ; <nl> - } <nl> - <nl> bool CAddonDatabase : : IsAddonDisabled ( const std : : string & addonID ) <nl> { <nl> try <nl> bool CAddonDatabase : : GetBlacklisted ( std : : vector < std : : string > & addons ) <nl> return false ; <nl> } <nl> <nl> - bool CAddonDatabase : : IsSystemPVRAddonEnabled ( const std : : string & addonID ) <nl> - { <nl> - std : : string strWhereClause = PrepareSQL ( " addonID = ' % s ' " , addonID . c_str ( ) ) ; <nl> - std : : string strEnabled = GetSingleValue ( " pvrenabled " , " id " , strWhereClause ) ; <nl> - <nl> - return ! strEnabled . empty ( ) ; <nl> - } <nl> - <nl> std : : string CAddonDatabase : : IsAddonBroken ( const std : : string & addonID ) <nl> { <nl> return GetSingleValue ( PrepareSQL ( " SELECT reason FROM broken WHERE addonID = ' % s ' " , addonID . c_str ( ) ) ) ; <nl> } <nl> <nl> - bool CAddonDatabase : : HasDisabledAddons ( ) <nl> - { <nl> - try <nl> - { <nl> - if ( NULL = = m_pDB . get ( ) ) return false ; <nl> - if ( NULL = = m_pDS . get ( ) ) return false ; <nl> - <nl> - m_pDS - > query ( " select count ( id ) from disabled " ) ; <nl> - bool ret = ! m_pDS - > eof ( ) & & m_pDS - > fv ( 0 ) . get_asInt ( ) > 0 ; / / have rows - > have disabled addons <nl> - m_pDS - > close ( ) ; <nl> - return ret ; <nl> - } <nl> - catch ( . . . ) <nl> - { <nl> - CLog : : Log ( LOGERROR , " % s failed " , __FUNCTION__ ) ; <nl> - } <nl> - return false ; <nl> - } <nl> - <nl> bool CAddonDatabase : : BlacklistAddon ( const std : : string & addonID ) <nl> { <nl> try <nl> mmm a / xbmc / addons / AddonDatabase . h <nl> ppp b / xbmc / addons / AddonDatabase . h <nl> class CAddonDatabase : public CDatabase <nl> \ sa IsAddonDisabled , HasDisabledAddons * / <nl> bool DisableAddon ( const std : : string & addonID , bool disable = true ) ; <nl> <nl> - / * ! \ brief Checks if an addon is in the database . <nl> - \ param addonID id of the addon to be checked <nl> - \ return true if addon is in database , false if addon is not in database yet * / <nl> - bool HasAddon ( const std : : string & addonID ) ; <nl> - <nl> / * ! \ brief Check whether an addon has been disabled via DisableAddon . <nl> \ param addonID id of the addon to check <nl> \ return true if the addon is disabled , false otherwise <nl> \ sa DisableAddon , HasDisabledAddons * / <nl> bool IsAddonDisabled ( const std : : string & addonID ) ; <nl> <nl> - / * ! \ brief Check whether we have disabled addons . <nl> - \ return true if we have disabled addons , false otherwise <nl> - \ sa DisableAddon , IsAddonDisabled * / <nl> - bool HasDisabledAddons ( ) ; <nl> - <nl> - / * ! @ deprecated only here to allow clean upgrades from earlier pvr versions <nl> - * / <nl> - bool IsSystemPVRAddonEnabled ( const std : : string & addonID ) ; <nl> - <nl> / * ! \ brief Mark an addon as broken <nl> Sets a flag that this addon has been marked as broken in the repository . <nl> \ param addonID id of the addon to mark as broken <nl> | [ addons ] remove unused methods | xbmc/xbmc | b5802e728c0ea4938219e3a2d6493e484b772067 | 2015-11-10T21:00:40Z |
mmm a / tools / run - tests . py <nl> ppp b / tools / run - tests . py <nl> <nl> } <nl> <nl> TIMEOUT_DEFAULT = 60 <nl> - TIMEOUT_SCALEFACTOR = { " debug " : 4 , <nl> - " release " : 1 } <nl> <nl> VARIANTS = [ " default " , " stress " , " turbofan " , " nocrankshaft " ] <nl> <nl> - MODE_FLAGS = { <nl> - " debug " : [ " - - nohard - abort " , " - - nodead - code - elimination " , <nl> - " - - nofold - constants " , " - - enable - slow - asserts " , <nl> - " - - debug - code " , " - - verify - heap " ] , <nl> - " release " : [ " - - nohard - abort " , " - - nodead - code - elimination " , <nl> - " - - nofold - constants " ] } <nl> + DEBUG_FLAGS = [ " - - nohard - abort " , " - - nodead - code - elimination " , <nl> + " - - nofold - constants " , " - - enable - slow - asserts " , <nl> + " - - debug - code " , " - - verify - heap " ] <nl> + RELEASE_FLAGS = [ " - - nohard - abort " , " - - nodead - code - elimination " , <nl> + " - - nofold - constants " ] <nl> + <nl> + MODES = { <nl> + " debug " : { <nl> + " flags " : DEBUG_FLAGS , <nl> + " timeout_scalefactor " : 4 , <nl> + " status_mode " : " debug " , <nl> + " execution_mode " : " debug " , <nl> + " output_folder " : " debug " , <nl> + } , <nl> + " optdebug " : { <nl> + " flags " : DEBUG_FLAGS , <nl> + " timeout_scalefactor " : 4 , <nl> + " status_mode " : " debug " , <nl> + " execution_mode " : " debug " , <nl> + " output_folder " : " optdebug " , <nl> + } , <nl> + " release " : { <nl> + " flags " : RELEASE_FLAGS , <nl> + " timeout_scalefactor " : 1 , <nl> + " status_mode " : " release " , <nl> + " execution_mode " : " release " , <nl> + " output_folder " : " release " , <nl> + } , <nl> + # This mode requires v8 to be compiled with dchecks and slow dchecks . <nl> + " tryrelease " : { <nl> + " flags " : RELEASE_FLAGS + [ " - - enable - slow - asserts " ] , <nl> + " timeout_scalefactor " : 2 , <nl> + " status_mode " : " debug " , <nl> + " execution_mode " : " release " , <nl> + " output_folder " : " release " , <nl> + } , <nl> + } <nl> <nl> GC_STRESS_FLAGS = [ " - - gc - interval = 500 " , " - - stress - compaction " , <nl> " - - concurrent - recompilation - queue - length = 64 " , <nl> def ProcessOptions ( options ) : <nl> options . mode = " , " . join ( [ tokens [ 1 ] for tokens in options . arch_and_mode ] ) <nl> options . mode = options . mode . split ( " , " ) <nl> for mode in options . mode : <nl> - if not mode . lower ( ) in [ " debug " , " release " , " optdebug " ] : <nl> + if not mode . lower ( ) in MODES : <nl> print " Unknown mode % s " % mode <nl> return False <nl> if options . arch in [ " auto " , " native " ] : <nl> def Execute ( arch , mode , args , options , suites , workspace ) : <nl> shell_dir = options . shell_dir <nl> if not shell_dir : <nl> if options . buildbot : <nl> + # TODO ( machenbach ) : Get rid of different output folder location on <nl> + # buildbot . Currently this is capitalized Release and Debug . <nl> shell_dir = os . path . join ( workspace , options . outdir , mode ) <nl> mode = mode . lower ( ) <nl> else : <nl> - shell_dir = os . path . join ( workspace , options . outdir , <nl> - " % s . % s " % ( arch , mode ) ) <nl> + shell_dir = os . path . join ( <nl> + workspace , <nl> + options . outdir , <nl> + " % s . % s " % ( arch , MODES [ mode ] [ " output_folder " ] ) , <nl> + ) <nl> shell_dir = os . path . relpath ( shell_dir ) <nl> <nl> - if mode = = " optdebug " : <nl> - mode = " debug " # " optdebug " is just an alias . <nl> - <nl> # Populate context object . <nl> - mode_flags = MODE_FLAGS [ mode ] <nl> + mode_flags = MODES [ mode ] [ " flags " ] <nl> timeout = options . timeout <nl> if timeout = = - 1 : <nl> # Simulators are slow , therefore allow a longer default timeout . <nl> def Execute ( arch , mode , args , options , suites , workspace ) : <nl> else : <nl> timeout = TIMEOUT_DEFAULT ; <nl> <nl> - timeout * = TIMEOUT_SCALEFACTOR [ mode ] <nl> + timeout * = MODES [ mode ] [ " timeout_scalefactor " ] <nl> <nl> if options . predictable : <nl> # Predictable mode is slower . <nl> timeout * = 2 <nl> <nl> - ctx = context . Context ( arch , mode , shell_dir , <nl> + ctx = context . Context ( arch , MODES [ mode ] [ " execution_mode " ] , shell_dir , <nl> mode_flags , options . verbose , <nl> timeout , options . isolates , <nl> options . command_prefix , <nl> def Execute ( arch , mode , args , options , suites , workspace ) : <nl> " deopt_fuzzer " : False , <nl> " gc_stress " : options . gc_stress , <nl> " isolates " : options . isolates , <nl> - " mode " : mode , <nl> + " mode " : MODES [ mode ] [ " status_mode " ] , <nl> " no_i18n " : options . no_i18n , <nl> " no_snap " : options . no_snap , <nl> " simulator_run " : simulator_run , <nl> def Execute ( arch , mode , args , options , suites , workspace ) : <nl> progress_indicator , options . junitout , options . junittestsuite ) <nl> if options . json_test_results : <nl> progress_indicator = progress . JsonTestProgressIndicator ( <nl> - progress_indicator , options . json_test_results , arch , mode ) <nl> + progress_indicator , options . json_test_results , arch , <nl> + MODES [ mode ] [ " execution_mode " ] ) <nl> <nl> run_networked = not options . no_network <nl> if not run_networked : <nl> | Add try release mode to test runner . | v8/v8 | 7a8d0c02a5e3e9996ebfdabfb953fd787e4dd3d2 | 2015-01-29T10:22:57Z |
mmm a / editor / animation_track_editor . cpp <nl> ppp b / editor / animation_track_editor . cpp <nl> void AnimationTrackEdit : : _notification ( int p_what ) { <nl> } <nl> text_color . a * = 0 . 7 ; <nl> } else if ( node ) { <nl> - Ref < Texture > icon ; <nl> - if ( has_icon ( node - > get_class ( ) , " EditorIcons " ) ) { <nl> - icon = get_icon ( node - > get_class ( ) , " EditorIcons " ) ; <nl> - } else { <nl> - icon = get_icon ( " Node " , " EditorIcons " ) ; <nl> - } <nl> + Ref < Texture > icon = EditorNode : : get_singleton ( ) - > get_object_icon ( node , " Node " ) ; <nl> <nl> draw_texture ( icon , Point2 ( ofs , int ( get_size ( ) . height - icon - > get_height ( ) ) / 2 ) ) ; <nl> icon_cache = icon ; <nl> void AnimationTrackEditor : : _update_tracks ( ) { <nl> if ( root & & root - > has_node ( base_path ) ) { <nl> Node * n = root - > get_node ( base_path ) ; <nl> if ( n ) { <nl> - if ( has_icon ( n - > get_class ( ) , " EditorIcons " ) ) { <nl> - icon = get_icon ( n - > get_class ( ) , " EditorIcons " ) ; <nl> - } <nl> + icon = EditorNode : : get_singleton ( ) - > get_object_icon ( n , " Node " ) ; <nl> name = n - > get_name ( ) ; <nl> tooltip = root - > get_path_to ( n ) ; <nl> } <nl> mmm a / editor / editor_data . cpp <nl> ppp b / editor / editor_data . cpp <nl> Object * EditorData : : instance_custom_type ( const String & p_type , const String & p_i <nl> <nl> for ( int i = 0 ; i < get_custom_types ( ) [ p_inherits ] . size ( ) ; i + + ) { <nl> if ( get_custom_types ( ) [ p_inherits ] [ i ] . name = = p_type ) { <nl> - Ref < Texture > icon = get_custom_types ( ) [ p_inherits ] [ i ] . icon ; <nl> Ref < Script > script = get_custom_types ( ) [ p_inherits ] [ i ] . script ; <nl> <nl> Object * ob = ClassDB : : instance ( p_inherits ) ; <nl> Object * EditorData : : instance_custom_type ( const String & p_type , const String & p_i <nl> ob - > call ( " set_name " , p_type ) ; <nl> } <nl> ob - > set_script ( script . get_ref_ptr ( ) ) ; <nl> - if ( icon . is_valid ( ) ) <nl> - ob - > set_meta ( " _editor_icon " , icon ) ; <nl> return ob ; <nl> } <nl> } <nl> mmm a / editor / editor_node . cpp <nl> ppp b / editor / editor_node . cpp <nl> void EditorNode : : stop_child_process ( ) { <nl> _menu_option_confirm ( RUN_STOP , false ) ; <nl> } <nl> <nl> + Ref < Script > EditorNode : : get_object_custom_type_base ( const Object * p_object ) const { <nl> + ERR_FAIL_COND_V ( ! p_object , NULL ) ; <nl> + <nl> + Ref < Script > script = p_object - > get_script ( ) ; <nl> + <nl> + if ( script . is_valid ( ) ) { <nl> + / / Uncommenting would break things ! Consider adding a parameter if you need it . <nl> + / / StringName name = EditorNode : : get_editor_data ( ) . script_class_get_name ( base_script - > get_path ( ) ) ; <nl> + / / if ( name ! = StringName ( ) ) <nl> + / / return name ; <nl> + <nl> + / / should probably be deprecated in 4 . x <nl> + StringName base = script - > get_instance_base_type ( ) ; <nl> + if ( base ! = StringName ( ) & & EditorNode : : get_editor_data ( ) . get_custom_types ( ) . has ( base ) ) { <nl> + const Vector < EditorData : : CustomType > & types = EditorNode : : get_editor_data ( ) . get_custom_types ( ) [ base ] ; <nl> + <nl> + Ref < Script > base_script = script ; <nl> + while ( base_script . is_valid ( ) ) { <nl> + for ( int i = 0 ; i < types . size ( ) ; + + i ) { <nl> + if ( types [ i ] . script = = base_script ) { <nl> + return types [ i ] . script ; <nl> + } <nl> + } <nl> + base_script = base_script - > get_base_script ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + return NULL ; <nl> + } <nl> + <nl> + StringName EditorNode : : get_object_custom_type_name ( const Object * p_object ) const { <nl> + ERR_FAIL_COND_V ( ! p_object , StringName ( ) ) ; <nl> + <nl> + Ref < Script > script = p_object - > get_script ( ) ; <nl> + if ( script . is_null ( ) & & p_object - > is_class ( " Script " ) ) { <nl> + script = p_object ; <nl> + } <nl> + <nl> + if ( script . is_valid ( ) ) { <nl> + Ref < Script > base_script = script ; <nl> + while ( base_script . is_valid ( ) ) { <nl> + StringName name = EditorNode : : get_editor_data ( ) . script_class_get_name ( base_script - > get_path ( ) ) ; <nl> + if ( name ! = StringName ( ) ) <nl> + return name ; <nl> + <nl> + / / should probably be deprecated in 4 . x <nl> + StringName base = base_script - > get_instance_base_type ( ) ; <nl> + if ( base ! = StringName ( ) & & EditorNode : : get_editor_data ( ) . get_custom_types ( ) . has ( base ) ) { <nl> + const Vector < EditorData : : CustomType > & types = EditorNode : : get_editor_data ( ) . get_custom_types ( ) [ base ] ; <nl> + for ( int i = 0 ; i < types . size ( ) ; + + i ) { <nl> + if ( types [ i ] . script = = base_script ) { <nl> + return types [ i ] . name ; <nl> + } <nl> + } <nl> + } <nl> + base_script = base_script - > get_base_script ( ) ; <nl> + } <nl> + } <nl> + <nl> + return StringName ( ) ; <nl> + } <nl> + <nl> Ref < Texture > EditorNode : : get_object_icon ( const Object * p_object , const String & p_fallback ) const { <nl> ERR_FAIL_COND_V ( ! p_object | | ! gui_base , NULL ) ; <nl> <nl> Ref < Texture > EditorNode : : get_object_icon ( const Object * p_object , const String & p <nl> } <nl> <nl> if ( script . is_valid ( ) ) { <nl> - StringName name = EditorNode : : get_editor_data ( ) . script_class_get_name ( script - > get_path ( ) ) ; <nl> - String icon_path = EditorNode : : get_editor_data ( ) . script_class_get_icon_path ( name ) ; <nl> - if ( icon_path . length ( ) ) <nl> - return ResourceLoader : : load ( icon_path ) ; <nl> - <nl> - / / should probably be deprecated in 4 . x <nl> - StringName base = script - > get_instance_base_type ( ) ; <nl> - if ( base ! = StringName ( ) ) { <nl> - const Map < String , Vector < EditorData : : CustomType > > & p_map = EditorNode : : get_editor_data ( ) . get_custom_types ( ) ; <nl> - for ( const Map < String , Vector < EditorData : : CustomType > > : : Element * E = p_map . front ( ) ; E ; E = E - > next ( ) ) { <nl> - const Vector < EditorData : : CustomType > & ct = E - > value ( ) ; <nl> - for ( int i = 0 ; i < ct . size ( ) ; + + i ) { <nl> - if ( ct [ i ] . name = = base & & ct [ i ] . icon . is_valid ( ) ) { <nl> - return ct [ i ] . icon ; <nl> + Ref < Script > base_script = script ; <nl> + while ( base_script . is_valid ( ) ) { <nl> + StringName name = EditorNode : : get_editor_data ( ) . script_class_get_name ( base_script - > get_path ( ) ) ; <nl> + String icon_path = EditorNode : : get_editor_data ( ) . script_class_get_icon_path ( name ) ; <nl> + if ( icon_path . length ( ) ) <nl> + return ResourceLoader : : load ( icon_path ) ; <nl> + <nl> + / / should probably be deprecated in 4 . x <nl> + StringName base = base_script - > get_instance_base_type ( ) ; <nl> + if ( base ! = StringName ( ) & & EditorNode : : get_editor_data ( ) . get_custom_types ( ) . has ( base ) ) { <nl> + const Vector < EditorData : : CustomType > & types = EditorNode : : get_editor_data ( ) . get_custom_types ( ) [ base ] ; <nl> + for ( int i = 0 ; i < types . size ( ) ; + + i ) { <nl> + if ( types [ i ] . script = = base_script & & types [ i ] . icon . is_valid ( ) ) { <nl> + return types [ i ] . icon ; <nl> } <nl> } <nl> } <nl> + base_script = base_script - > get_base_script ( ) ; <nl> } <nl> } <nl> <nl> mmm a / editor / editor_node . h <nl> ppp b / editor / editor_node . h <nl> class EditorNode : public Node { <nl> void stop_child_process ( ) ; <nl> <nl> Ref < Theme > get_editor_theme ( ) const { return theme ; } <nl> + Ref < Script > get_object_custom_type_base ( const Object * p_object ) const ; <nl> + StringName get_object_custom_type_name ( const Object * p_object ) const ; <nl> Ref < Texture > get_object_icon ( const Object * p_object , const String & p_fallback = " Object " ) const ; <nl> Ref < Texture > get_class_icon ( const String & p_class , const String & p_fallback = " Object " ) const ; <nl> <nl> mmm a / editor / scene_tree_dock . cpp <nl> ppp b / editor / scene_tree_dock . cpp <nl> void SceneTreeDock : : _tool_selected ( int p_tool , bool p_confirm_override ) { <nl> <nl> Node * n = Object : : cast_to < Node > ( selection [ i ] ) ; <nl> Ref < Script > existing = n - > get_script ( ) ; <nl> - if ( existing . is_valid ( ) ) { <nl> - const RefPtr empty ; <nl> + Ref < Script > empty = EditorNode : : get_singleton ( ) - > get_object_custom_type_base ( n ) ; <nl> + if ( existing ! = empty ) { <nl> editor_data - > get_undo_redo ( ) . add_do_method ( n , " set_script " , empty ) ; <nl> editor_data - > get_undo_redo ( ) . add_undo_method ( n , " set_script " , existing ) ; <nl> } <nl> void SceneTreeDock : : _tree_rmb ( const Vector2 & p_menu_pos ) { <nl> menu - > clear ( ) ; <nl> <nl> Ref < Script > existing_script ; <nl> + bool exisiting_script_removable = true ; <nl> if ( selection . size ( ) = = 1 ) { <nl> <nl> Node * selected = selection [ 0 ] ; <nl> void SceneTreeDock : : _tree_rmb ( const Vector2 & p_menu_pos ) { <nl> menu - > add_separator ( ) ; <nl> <nl> existing_script = selected - > get_script ( ) ; <nl> + <nl> + if ( EditorNode : : get_singleton ( ) - > get_object_custom_type_base ( selected ) = = existing_script ) { <nl> + exisiting_script_removable = false ; <nl> + } <nl> } <nl> <nl> if ( profile_allow_script_editing ) { <nl> void SceneTreeDock : : _tree_rmb ( const Vector2 & p_menu_pos ) { <nl> menu - > add_icon_shortcut ( get_icon ( " ScriptExtend " , " EditorIcons " ) , ED_GET_SHORTCUT ( " scene_tree / extend_script " ) , TOOL_ATTACH_SCRIPT ) ; <nl> } <nl> } <nl> - if ( selection . size ( ) > 1 | | existing_script . is_valid ( ) ) { <nl> + if ( selection . size ( ) > 1 | | ( existing_script . is_valid ( ) & & exisiting_script_removable ) ) { <nl> menu - > add_icon_shortcut ( get_icon ( " ScriptRemove " , " EditorIcons " ) , ED_GET_SHORTCUT ( " scene_tree / clear_script " ) , TOOL_CLEAR_SCRIPT ) ; <nl> } <nl> menu - > add_separator ( ) ; <nl> mmm a / editor / scene_tree_editor . cpp <nl> ppp b / editor / scene_tree_editor . cpp <nl> bool SceneTreeEditor : : _add_nodes ( Node * p_node , TreeItem * p_parent ) { <nl> Color accent = get_color ( " accent_color " , " Editor " ) ; <nl> <nl> Ref < Script > script = p_node - > get_script ( ) ; <nl> - if ( ! script . is_null ( ) ) { <nl> + if ( ! script . is_null ( ) & & EditorNode : : get_singleton ( ) - > get_object_custom_type_base ( p_node ) ! = script ) { <nl> / / has script <nl> item - > add_button ( 0 , get_icon ( " Script " , " EditorIcons " ) , BUTTON_SCRIPT ) ; <nl> } else { <nl> - / / has no script <nl> + / / has no script ( or script is a custom type ) <nl> item - > set_custom_color ( 0 , get_color ( " disabled_font_color " , " Editor " ) ) ; <nl> item - > set_selectable ( 0 , false ) ; <nl> + <nl> + if ( ! script . is_null ( ) ) { / / make sure to mark the script if a custom type <nl> + item - > add_button ( 0 , get_icon ( " Script " , " EditorIcons " ) , BUTTON_SCRIPT ) ; <nl> + item - > set_button_disabled ( 0 , item - > get_button_count ( 0 ) - 1 , true ) ; <nl> + } <nl> + <nl> accent . a * = 0 . 7 ; <nl> } <nl> <nl> bool SceneTreeEditor : : _add_nodes ( Node * p_node , TreeItem * p_parent ) { <nl> item - > add_button ( 0 , get_icon ( " InstanceOptions " , " EditorIcons " ) , BUTTON_SUBSCENE , false , TTR ( " Open in Editor " ) ) ; <nl> item - > set_tooltip ( 0 , TTR ( " Instance : " ) + " " + p_node - > get_filename ( ) + " \ n " + TTR ( " Type : " ) + " " + p_node - > get_class ( ) ) ; <nl> } else { <nl> - item - > set_tooltip ( 0 , String ( p_node - > get_name ( ) ) + " \ n " + TTR ( " Type : " ) + " " + p_node - > get_class ( ) ) ; <nl> + StringName type = EditorNode : : get_singleton ( ) - > get_object_custom_type_name ( p_node ) ; <nl> + if ( type = = StringName ( ) ) <nl> + type = p_node - > get_class ( ) ; <nl> + item - > set_tooltip ( 0 , String ( p_node - > get_name ( ) ) + " \ n " + TTR ( " Type : " ) + " " + type ) ; <nl> } <nl> <nl> if ( can_open_instance & & undo_redo ) { / / Show buttons only when necessary ( SceneTreeDock ) to avoid crashes <nl> bool SceneTreeEditor : : _add_nodes ( Node * p_node , TreeItem * p_parent ) { <nl> Ref < Script > script = p_node - > get_script ( ) ; <nl> if ( ! script . is_null ( ) ) { <nl> item - > add_button ( 0 , get_icon ( " Script " , " EditorIcons " ) , BUTTON_SCRIPT , false , TTR ( " Open Script : " ) + " " + script - > get_path ( ) ) ; <nl> + if ( EditorNode : : get_singleton ( ) - > get_object_custom_type_base ( p_node ) = = script ) { <nl> + item - > set_button_color ( 0 , item - > get_button_count ( 0 ) - 1 , Color ( 1 , 1 , 1 , 0 . 5 ) ) ; <nl> + } <nl> } <nl> <nl> if ( p_node - > is_class ( " CanvasItem " ) ) { <nl> | Merge pull request from bojidar - bg / 6067 - pale - customtype - script | godotengine/godot | 3aeca706599c0c4eff22cd859869e143539e2c99 | 2019-07-20T09:49:52Z |
mmm a / src / futex - emulation . cc <nl> ppp b / src / futex - emulation . cc <nl> base : : LazyInstance < FutexWaitList > : : type FutexEmulation : : wait_list_ = <nl> void FutexWaitListNode : : NotifyWake ( ) { <nl> / / Lock the FutexEmulation mutex before notifying . We know that the mutex <nl> / / will have been unlocked if we are currently waiting on the condition <nl> - / / variable . <nl> - / / <nl> - / / The mutex may also not be locked if the other thread is currently handling <nl> - / / interrupts , or if FutexEmulation : : Wait was just called and the mutex <nl> - / / hasn ' t been locked yet . In either of those cases , we set the interrupted <nl> - / / flag to true , which will be tested after the mutex is re - locked . <nl> + / / variable . The mutex will not be locked if FutexEmulation : : Wait hasn ' t <nl> + / / locked it yet . In that case , we set the interrupted_ <nl> + / / flag to true , which will be tested after the mutex locked by a future wait . <nl> base : : MutexGuard lock_guard ( FutexEmulation : : mutex_ . Pointer ( ) ) ; <nl> - if ( waiting_ ) { <nl> - cond_ . NotifyOne ( ) ; <nl> - interrupted_ = true ; <nl> - } <nl> + / / if not waiting , this will not have any effect . <nl> + cond_ . NotifyOne ( ) ; <nl> + interrupted_ = true ; <nl> } <nl> <nl> <nl> Object * FutexEmulation : : Wait ( Isolate * isolate , <nl> int32_t value , double rel_timeout_ms ) { <nl> DCHECK_LT ( addr , array_buffer - > byte_length ( ) ) ; <nl> <nl> - void * backing_store = array_buffer - > backing_store ( ) ; <nl> - int32_t * p = <nl> - reinterpret_cast < int32_t * > ( static_cast < int8_t * > ( backing_store ) + addr ) ; <nl> - <nl> - FutexWaitListNode * node = isolate - > futex_wait_list_node ( ) ; <nl> - node - > backing_store_ = backing_store ; <nl> - node - > wait_addr_ = addr ; <nl> - node - > waiting_ = true ; <nl> - <nl> bool use_timeout = rel_timeout_ms ! = V8_INFINITY ; <nl> <nl> base : : TimeDelta rel_timeout ; <nl> Object * FutexEmulation : : Wait ( Isolate * isolate , <nl> addr , value , rel_timeout_ms , & stop_handle ) ; <nl> <nl> if ( isolate - > has_scheduled_exception ( ) ) { <nl> - node - > waiting_ = false ; <nl> return isolate - > PromoteScheduledException ( ) ; <nl> } <nl> <nl> Object * FutexEmulation : : Wait ( Isolate * isolate , <nl> <nl> do { / / Not really a loop , just makes it easier to break out early . <nl> base : : MutexGuard lock_guard ( mutex_ . Pointer ( ) ) ; <nl> + void * backing_store = array_buffer - > backing_store ( ) ; <nl> + <nl> + FutexWaitListNode * node = isolate - > futex_wait_list_node ( ) ; <nl> + node - > backing_store_ = backing_store ; <nl> + node - > wait_addr_ = addr ; <nl> + node - > waiting_ = true ; <nl> + <nl> / / Reset node - > waiting_ = false when leaving this scope ( but while <nl> / / still holding the lock ) . <nl> ResetWaitingOnScopeExit reset_waiting ( node ) ; <nl> <nl> + int32_t * p = <nl> + reinterpret_cast < int32_t * > ( static_cast < int8_t * > ( backing_store ) + addr ) ; <nl> if ( * p ! = value ) { <nl> result = Smi : : FromInt ( WaitReturnValue : : kNotEqual ) ; <nl> callback_result = AtomicsWaitEvent : : kNotEqual ; <nl> | [ wasm ] fix data race in futex - emulation wait | v8/v8 | 46186c50af46c8211c426d45fba476b2c97acab8 | 2018-11-28T00:54:14Z |
mmm a / Source / CNTKv2LibraryDll / proto / onnx / CNTKToONNX . h <nl> ppp b / Source / CNTKv2LibraryDll / proto / onnx / CNTKToONNX . h <nl> <nl> # define CNTK_ONNX_MODEL_VERSION 1 <nl> # define MACRO_TO_STRING ( s ) # s <nl> const std : : string CNTK_ONNX_PRODUCER_NAME = " CNTK " ; <nl> + # ifdef _WIN32 <nl> + const std : : string CNTK_ONNX_PRODUCER_VERSION = CNTK_VERSION ; <nl> + # else <nl> const std : : string CNTK_ONNX_PRODUCER_VERSION = MACRO_TO_STRING ( CNTK_VERSION ) ; <nl> + # endif <nl> + <nl> <nl> namespace ONNXIR <nl> { <nl> mmm a / Source / CNTKv2LibraryDll / proto / onnx / core / graph . cpp <nl> ppp b / Source / CNTKv2LibraryDll / proto / onnx / core / graph . cpp <nl> namespace ONNXIR <nl> m_graph - > m_graphProtoSyncNeeded = true ; \ <nl> AttributeProto a ; \ <nl> a . set_name ( p_attrName ) ; \ <nl> - a . set_type ( enumType ) ; \ <nl> + a . set_type ( enumType ) ; \ <nl> a . set_ # # field ( p_value ) ; \ <nl> m_attributes . emplace ( p_attrName , a ) ; \ <nl> return true ; \ <nl> namespace ONNXIR <nl> m_graph - > m_graphProtoSyncNeeded = true ; \ <nl> AttributeProto a ; \ <nl> a . set_name ( p_attrName ) ; \ <nl> - a . set_type ( enumType ) ; \ <nl> + a . set_type ( enumType ) ; \ <nl> * ( a . mutable_ # # field ( ) ) = p_value ; \ <nl> m_attributes . emplace ( p_attrName , a ) ; \ <nl> return true ; \ <nl> namespace ONNXIR <nl> m_graph - > m_graphProtoSyncNeeded = true ; \ <nl> AttributeProto a ; \ <nl> a . set_name ( p_attrName ) ; \ <nl> - a . set_type ( enumType ) ; \ <nl> + a . set_type ( enumType ) ; \ <nl> for ( const auto & val : p_values ) \ <nl> { \ <nl> * ( a . mutable_ # # field ( ) - > Add ( ) ) = val ; \ <nl> | Fixing ONNX producer_version bug . | microsoft/CNTK | 0b7908593577105dac0c5a06580554067cd1b6bf | 2018-03-15T07:21:47Z |
mmm a / tensorflow / python / autograph / converters / call_trees_test . py <nl> ppp b / tensorflow / python / autograph / converters / call_trees_test . py <nl> def test_fn ( f ) : <nl> converter_testing . RESULT_OF_MOCK_CONVERTED_CALL + 3 ) <nl> self . assertListEqual ( self . dynamic_calls , [ ( ( ) , { } ) ] ) <nl> <nl> - def test_function_with_call_in_argument ( self ) : <nl> + def test_function_with_expression_in_argument ( self ) : <nl> <nl> def test_fn ( f , g ) : <nl> return f ( g ( ) + 7 ) + 3 <nl> def test_fn ( f , g ) : <nl> ( ( converter_testing . RESULT_OF_MOCK_CONVERTED_CALL + 7 , ) , { } ) , <nl> ] ) <nl> <nl> + def test_function_with_call_in_argument ( self ) : <nl> + <nl> + def test_fn ( f , g ) : <nl> + return f ( g ( ) ) + 3 <nl> + <nl> + with self . converted ( test_fn , call_trees , { } ) as result : <nl> + self . assertEqual ( <nl> + result . test_fn ( None , None ) , <nl> + converter_testing . RESULT_OF_MOCK_CONVERTED_CALL + 3 ) <nl> + self . assertListEqual ( self . dynamic_calls , [ <nl> + ( ( ) , { } ) , <nl> + ( ( converter_testing . RESULT_OF_MOCK_CONVERTED_CALL , ) , { } ) , <nl> + ] ) <nl> + <nl> def test_function_with_kwarg ( self ) : <nl> <nl> def test_fn ( f , a , b ) : <nl> | Additional test for function conversions . | tensorflow/tensorflow | 9eb453a230590d49478c716b6bb5ace09d33087c | 2019-03-22T01:47:32Z |
mmm a / xbmc / music / tags / test / TestTagLoaderTagLib . cpp <nl> ppp b / xbmc / music / tags / test / TestTagLoaderTagLib . cpp <nl> TYPED_TEST ( TestTagParser , FooProperties ) { <nl> EXPECT_EQ ( " foo " , tag . GetTitle ( ) ) ; <nl> } <nl> <nl> - class TestCTagLoaderTagLib : public : : testing : : Test , public CTagLoaderTagLib { } ; <nl> - TEST_F ( TestCTagLoaderTagLib , SetGenre ) <nl> + class TestTagLoaderTagLib : public : : testing : : Test , public CTagLoaderTagLib { } ; <nl> + TEST_F ( TestTagLoaderTagLib , SetGenre ) <nl> { <nl> CMusicInfoTag tag , tag2 ; <nl> const char * genre_nr [ ] = { " 0 " , " 2 " , " 4 " } ; <nl> TEST_F ( TestCTagLoaderTagLib , SetGenre ) <nl> <nl> } <nl> <nl> - TEST ( TestTagLoaderTagLib , SplitMBID ) <nl> + TEST_F ( TestTagLoaderTagLib , SplitMBID ) <nl> { <nl> CTagLoaderTagLib lib ; <nl> <nl> | changed : group all taglib tests in the same fixture | xbmc/xbmc | ecee8f3c0bfaef23b8eca28bfcae34d5456305d5 | 2017-12-14T21:22:13Z |
mmm a / format . cc <nl> ppp b / format . cc <nl> FMT_FUNC fmt : : internal : : UTF8ToUTF16 : : UTF8ToUTF16 ( fmt : : StringRef s ) { <nl> static const char ERROR_MSG [ ] = " cannot convert string from UTF - 8 to UTF - 16 " ; <nl> if ( length = = 0 ) <nl> FMT_THROW ( WindowsError ( GetLastError ( ) , ERROR_MSG ) ) ; <nl> - buffer_ . resize ( length ) ; <nl> + buffer_ . resize ( length + 1 ) ; <nl> length = MultiByteToWideChar ( <nl> CP_UTF8 , MB_ERR_INVALID_CHARS , s . data ( ) , s . size ( ) , & buffer_ [ 0 ] , length ) ; <nl> if ( length = = 0 ) <nl> FMT_THROW ( WindowsError ( GetLastError ( ) , ERROR_MSG ) ) ; <nl> + buffer_ [ length ] = 0 ; <nl> } <nl> <nl> FMT_FUNC fmt : : internal : : UTF16ToUTF8 : : UTF16ToUTF8 ( fmt : : WStringRef s ) { <nl> FMT_FUNC int fmt : : internal : : UTF16ToUTF8 : : convert ( fmt : : WStringRef s ) { <nl> int length = WideCharToMultiByte ( CP_UTF8 , 0 , s . data ( ) , s . size ( ) , 0 , 0 , 0 , 0 ) ; <nl> if ( length = = 0 ) <nl> return GetLastError ( ) ; <nl> - buffer_ . resize ( length ) ; <nl> + buffer_ . resize ( length + 1 ) ; <nl> length = WideCharToMultiByte ( <nl> CP_UTF8 , 0 , s . data ( ) , s . size ( ) , & buffer_ [ 0 ] , length , 0 , 0 ) ; <nl> if ( length = = 0 ) <nl> return GetLastError ( ) ; <nl> + buffer_ [ length ] = 0 ; <nl> return 0 ; <nl> } <nl> <nl> | Add terminating null character in UTF coverters | fmtlib/fmt | 7154238d452c10cdda132e76b032687f197aa749 | 2015-06-27T16:11:15Z |
mmm a / misc / hooks / pre - commit - clang - format <nl> ppp b / misc / hooks / pre - commit - clang - format <nl> do <nl> if grep - q " thirdparty " < < < $ file ; then <nl> continue ; <nl> fi <nl> + if grep - q " platform / android / java / src / com " < < < $ file ; then <nl> + continue ; <nl> + fi <nl> <nl> # ignore file if we do check for file extensions and the file <nl> # does not match any of the extensions specified in $ FILE_EXTS <nl> mmm a / misc / travis / clang - format . sh <nl> ppp b / misc / travis / clang - format . sh <nl> else <nl> RANGE = HEAD <nl> fi <nl> <nl> - FILES = $ ( git diff - tree - - no - commit - id - - name - only - r $ RANGE | grep - v thirdparty / | grep - E " \ . ( c | h | cpp | hpp | cc | hh | cxx | m | mm | inc | java | glsl ) $ " ) <nl> + FILES = $ ( git diff - tree - - no - commit - id - - name - only - r $ RANGE | grep - v thirdparty / | grep - v platform / android / java / src / com / | grep - E " \ . ( c | h | cpp | hpp | cc | hh | cxx | m | mm | inc | java | glsl ) $ " ) <nl> echo " Checking files : \ n $ FILES " <nl> <nl> # create a random filename to store our generated patch <nl> | Style : Don ' t apply clang - format to platform / android / java / src / com | godotengine/godot | bd9f6c23cbcce1ef2b334124c1cb9d0e3a7696ca | 2019-08-27T12:33:41Z |
mmm a / src / objective - c / NetworkTransitionBehavior . md <nl> ppp b / src / objective - c / NetworkTransitionBehavior . md <nl> The expected gRPC iOS channel and network transition behaviors are : <nl> During the backoff period , any call to the same host will wait until the <nl> first of the following events occur : <nl> * Connection succeeded ; calls will be made using this channel ; <nl> - * Conncetion failed ; calls will be failed and return UNAVAILABLE status code ; <nl> + * Connection failed ; calls will be failed and return UNAVAILABLE status code ; <nl> * The call ' s deadline is reached ; the call will fail and return <nl> DEADLINE \ _EXCEEDED status code . <nl> The length of backoff period of a channel is reset whenever a connection <nl> | Fix spelling error | grpc/grpc | 8ec5886e3938ca2d2d52504137fe857fb7bf5395 | 2020-05-01T14:10:40Z |
mmm a / tensorflow / python / compat / compat . py <nl> ppp b / tensorflow / python / compat / compat . py <nl> <nl> # This value changes every day with an automatic CL . It can be modified in code <nl> # via ` forward_compatibility_horizon ( ) ` or with the environment variable <nl> # TF_FORWARD_COMPATIBILITY_DELTA_DAYS , which is added to the compatibility date . <nl> - _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2020 , 5 , 13 ) <nl> + _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2020 , 5 , 14 ) <nl> _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = " TF_FORWARD_COMPATIBILITY_DELTA_DAYS " <nl> _FORWARD_COMPATIBILITY_DATE_NUMBER = None <nl> <nl> | compat : Update forward compatibility horizon to 2020 - 05 - 14 | tensorflow/tensorflow | 9d0cf955c1e1bd2e653b93bf939c6f1617d67881 | 2020-05-14T09:10:17Z |
mmm a / src / buffer_cache / mirrored / mirrored . cc <nl> ppp b / src / buffer_cache / mirrored / mirrored . cc <nl> bool mc_cache_t : : next_starting_up_step ( ) { <nl> mc_config_block_t * c = reinterpret_cast < mc_config_block_t * > ( c_buf - > data ) ; <nl> c - > magic = c - > expected_magic ; <nl> c - > cache = * static_config ; <nl> + c_buf - > writeback_buf . set_dirty ( ) ; <nl> + c_buf - > writeback_buf . needs_flush = true ; <nl> } <nl> <nl> / * Initialize the diff storage ( needs coro context ) * / <nl> mmm a / src / fsck / checker . cc <nl> ppp b / src / fsck / checker . cc <nl> <nl> # include " btree / leaf_node . hpp " <nl> # include " btree / internal_node . hpp " <nl> # include " buffer_cache / large_buf . hpp " <nl> + # include " buffer_cache / mirrored / mirrored . hpp " <nl> # include " fsck / raw_block . hpp " <nl> <nl> namespace fsck { <nl> | Fix mockio build and initial mc_config block creation | rethinkdb/rethinkdb | c9314eaf83e4a5ab1623c1b7b5e4aca47dbeff58 | 2011-02-10T23:24:09Z |
mmm a / java / demo / src / com / google / phonenumbers / PhoneNumberParserServlet . java <nl> ppp b / java / demo / src / com / google / phonenumbers / PhoneNumberParserServlet . java <nl> <nl> <nl> import java . io . IOException ; <nl> import java . io . InputStream ; <nl> + import java . io . UnsupportedEncodingException ; <nl> + import java . net . URLEncoder ; <nl> import java . util . Locale ; <nl> import java . util . StringTokenizer ; <nl> <nl> private void appendLine ( String title , String data , StringBuilder output ) { <nl> output . append ( " < / TR > " ) ; <nl> } <nl> <nl> + / * * <nl> + * Returns a link to create a new github issue with the relevant information . <nl> + * / <nl> + private String getNewIssueLink ( String phoneNumber , String defaultCountry ) <nl> + throws UnsupportedEncodingException { <nl> + boolean hasDefaultCountry = ! defaultCountry . isEmpty ( ) & & defaultCountry ! = " ZZ " ; <nl> + String issueTitle = " Validation issue with " + phoneNumber <nl> + + ( hasDefaultCountry ? " ( " + defaultCountry + " ) " : " " ) ; <nl> + <nl> + / / Issue template . This must be kept in sync with the template in <nl> + / / https : / / github . com / googlei18n / libphonenumber / blob / master / CONTRIBUTING . md . <nl> + StringBuilder issueTemplate = new StringBuilder ( ) ; <nl> + issueTemplate . append ( " Please read the \ " guidelines for contributing \ " ( linked above ) and fill " <nl> + + " in the template below . \ n \ n " ) ; <nl> + issueTemplate . append ( " Country / region affected ( e . g . , \ " US \ " ) : " ) <nl> + . append ( defaultCountry ) . append ( " \ n \ n " ) ; <nl> + issueTemplate . append ( " Example number ( s ) affected ( \ " + 1 555 555 - 1234 \ " ) : " ) <nl> + . append ( phoneNumber ) . append ( " \ n \ n " ) ; <nl> + issueTemplate . append ( <nl> + " The phone number range ( s ) to which the issue applies ( \ " + 1 555 555 - XXXX \ " ) : \ n \ n " ) ; <nl> + issueTemplate . append ( <nl> + " The type of the number ( s ) ( \ " fixed - line \ " , \ " mobile \ " , \ " short code \ " , etc . ) : \ n \ n " ) ; <nl> + issueTemplate . append ( <nl> + " The cost , if applicable ( \ " toll - free \ " , \ " premium rate \ " , \ " shared cost \ " ) : \ n \ n " ) ; <nl> + issueTemplate . append ( <nl> + " Supporting evidence ( for example , national numbering plan , announcement from mobile " <nl> + + " carrier , news article ) : * * IMPORTANT - anything posted here is made public . " <nl> + + " Read the guidelines first ! * * \ n \ n " ) ; <nl> + return " https : / / github . com / googlei18n / libphonenumber / issues / new ? title = " <nl> + + URLEncoder . encode ( issueTitle , UTF_8 . name ( ) ) + " & body = " <nl> + + URLEncoder . encode ( issueTemplate . toString ( ) , UTF_8 . name ( ) ) ; <nl> + } <nl> + <nl> / * * <nl> * The defaultCountry here is used for parsing phoneNumber . The languageCode and regionCode are <nl> * used to specify the language used for displaying the area descriptions generated from phone <nl> private StringBuilder getOutputForSingleNumber ( <nl> output . append ( " < / DIV > " ) ; <nl> } <nl> } <nl> + <nl> + String newIssueLink = getNewIssueLink ( phoneNumber , defaultCountry ) ; <nl> + String guidelinesLink = <nl> + " https : / / github . com / googlei18n / libphonenumber / blob / master / CONTRIBUTING . md " ; <nl> + output . append ( " < b style = \ " color : red \ " > File an issue < / b > : by clicking on " <nl> + + " < a target = \ " _blank \ " href = \ " " + newIssueLink + " \ " > this link < / a > , I confirm that I " <nl> + + " have read the < a target = \ " _blank \ " href = \ " " + guidelinesLink <nl> + + " \ " > contributor ' s guidelines < / a > . " ) ; <nl> + } catch ( UnsupportedEncodingException e ) { <nl> + output . append ( StringEscapeUtils . escapeHtml ( e . toString ( ) ) ) ; <nl> } catch ( NumberParseException e ) { <nl> output . append ( StringEscapeUtils . escapeHtml ( e . toString ( ) ) ) ; <nl> } <nl> | add link to file github issue directly from demo | google/libphonenumber | 223827488e8817edc8c7d89dedb7546a78cec89b | 2016-01-04T12:46:36Z |
mmm a / src / api . cc <nl> ppp b / src / api . cc <nl> MaybeLocal < Value > v8 : : TryCatch : : StackTrace ( Local < Context > context ) const { <nl> i : : Handle < i : : JSObject > obj ( i : : JSObject : : cast ( raw_obj ) , isolate_ ) ; <nl> i : : Handle < i : : String > name = isolate - > factory ( ) - > stack_string ( ) ; <nl> Maybe < bool > maybe = i : : JSReceiver : : HasProperty ( obj , name ) ; <nl> - has_pending_exception = ! maybe . IsJust ( ) ; <nl> + has_pending_exception = maybe . IsNothing ( ) ; <nl> RETURN_ON_FAILED_EXECUTION ( Value ) ; <nl> if ( ! maybe . FromJust ( ) ) return v8 : : Local < Value > ( ) ; <nl> Local < Value > result ; <nl> mmm a / src / builtins / builtins - array . cc <nl> ppp b / src / builtins / builtins - array . cc <nl> bool IterateElementsSlow ( Isolate * isolate , Handle < JSReceiver > receiver , <nl> uint32_t length , ArrayConcatVisitor * visitor ) { <nl> FOR_WITH_HANDLE_SCOPE ( isolate , uint32_t , i = 0 , i , i < length , + + i , { <nl> Maybe < bool > maybe = JSReceiver : : HasElement ( receiver , i ) ; <nl> - if ( ! maybe . IsJust ( ) ) return false ; <nl> + if ( maybe . IsNothing ( ) ) return false ; <nl> if ( maybe . FromJust ( ) ) { <nl> Handle < Object > element_value ; <nl> ASSIGN_RETURN_ON_EXCEPTION_VALUE ( <nl> bool IterateElements ( Isolate * isolate , Handle < JSReceiver > receiver , <nl> if ( ! visitor - > visit ( j , element_value ) ) return false ; <nl> } else { <nl> Maybe < bool > maybe = JSReceiver : : HasElement ( array , j ) ; <nl> - if ( ! maybe . IsJust ( ) ) return false ; <nl> + if ( maybe . IsNothing ( ) ) return false ; <nl> if ( maybe . FromJust ( ) ) { <nl> / / Call GetElement on array , not its prototype , or getters won ' t <nl> / / have the correct receiver . <nl> bool IterateElements ( Isolate * isolate , Handle < JSReceiver > receiver , <nl> if ( ! visitor - > visit ( j , element_value ) ) return false ; <nl> } else { <nl> Maybe < bool > maybe = JSReceiver : : HasElement ( array , j ) ; <nl> - if ( ! maybe . IsJust ( ) ) return false ; <nl> + if ( maybe . IsNothing ( ) ) return false ; <nl> if ( maybe . FromJust ( ) ) { <nl> / / Call GetElement on array , not its prototype , or getters won ' t <nl> / / have the correct receiver . <nl> mmm a / src / builtins / builtins - function . cc <nl> ppp b / src / builtins / builtins - function . cc <nl> Object * DoFunctionBind ( Isolate * isolate , BuiltinArguments args ) { <nl> Handle < Object > length ( Smi : : kZero , isolate ) ; <nl> Maybe < PropertyAttributes > attributes = <nl> JSReceiver : : GetPropertyAttributes ( & length_lookup ) ; <nl> - if ( ! attributes . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( attributes . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> if ( attributes . FromJust ( ) ! = ABSENT ) { <nl> Handle < Object > target_length ; <nl> ASSIGN_RETURN_FAILURE_ON_EXCEPTION ( isolate , target_length , <nl> mmm a / src / builtins / builtins - object . cc <nl> ppp b / src / builtins / builtins - object . cc <nl> BUILTIN ( ObjectPrototypePropertyIsEnumerable ) { <nl> isolate , object , JSReceiver : : ToObject ( isolate , args . receiver ( ) ) ) ; <nl> Maybe < PropertyAttributes > maybe = <nl> JSReceiver : : GetOwnPropertyAttributes ( object , name ) ; <nl> - if ( ! maybe . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( maybe . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> if ( maybe . FromJust ( ) = = ABSENT ) return isolate - > heap ( ) - > false_value ( ) ; <nl> return isolate - > heap ( ) - > ToBoolean ( ( maybe . FromJust ( ) & DONT_ENUM ) = = 0 ) ; <nl> } <nl> mmm a / src / contexts . cc <nl> ppp b / src / contexts . cc <nl> static Maybe < bool > UnscopableLookup ( LookupIterator * it ) { <nl> Isolate * isolate = it - > isolate ( ) ; <nl> <nl> Maybe < bool > found = JSReceiver : : HasProperty ( it ) ; <nl> - if ( ! found . IsJust ( ) | | ! found . FromJust ( ) ) return found ; <nl> + if ( found . IsNothing ( ) | | ! found . FromJust ( ) ) return found ; <nl> <nl> Handle < Object > unscopables ; <nl> ASSIGN_RETURN_ON_EXCEPTION_VALUE ( <nl> Handle < Object > Context : : Lookup ( Handle < String > name , ContextLookupFlags flags , <nl> maybe = JSReceiver : : GetPropertyAttributes ( object , name ) ; <nl> } <nl> <nl> - if ( ! maybe . IsJust ( ) ) return Handle < Object > ( ) ; <nl> + if ( maybe . IsNothing ( ) ) return Handle < Object > ( ) ; <nl> DCHECK ( ! isolate - > has_pending_exception ( ) ) ; <nl> * attributes = maybe . FromJust ( ) ; <nl> <nl> mmm a / src / inspector / v8 - value - utils . cc <nl> ppp b / src / inspector / v8 - value - utils . cc <nl> protocol : : Response toProtocolValue ( v8 : : Local < v8 : : Context > context , <nl> if ( name - > IsString ( ) ) { <nl> v8 : : Maybe < bool > hasRealNamedProperty = object - > HasRealNamedProperty ( <nl> context , v8 : : Local < v8 : : String > : : Cast ( name ) ) ; <nl> - if ( ! hasRealNamedProperty . IsJust ( ) | | ! hasRealNamedProperty . FromJust ( ) ) <nl> + if ( hasRealNamedProperty . IsNothing ( ) | | <nl> + ! hasRealNamedProperty . FromJust ( ) ) <nl> continue ; <nl> } <nl> v8 : : Local < v8 : : String > propertyName ; <nl> mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> Maybe < bool > Object : : SetPropertyInternal ( LookupIterator * it , <nl> } else { <nl> Maybe < PropertyAttributes > maybe_attributes = <nl> JSObject : : GetPropertyAttributesWithInterceptor ( it ) ; <nl> - if ( ! maybe_attributes . IsJust ( ) ) return Nothing < bool > ( ) ; <nl> + if ( maybe_attributes . IsNothing ( ) ) return Nothing < bool > ( ) ; <nl> if ( ( maybe_attributes . FromJust ( ) & READ_ONLY ) ! = 0 ) { <nl> return WriteToReadOnlyProperty ( it , value , should_throw ) ; <nl> } <nl> Maybe < PropertyAttributes > JSReceiver : : GetPropertyAttributes ( <nl> case LookupIterator : : INTERCEPTOR : { <nl> Maybe < PropertyAttributes > result = <nl> JSObject : : GetPropertyAttributesWithInterceptor ( it ) ; <nl> - if ( ! result . IsJust ( ) ) return result ; <nl> + if ( result . IsNothing ( ) ) return result ; <nl> if ( result . FromJust ( ) ! = ABSENT ) return result ; <nl> break ; <nl> } <nl> MaybeHandle < Object > JSReceiver : : DefineProperties ( Isolate * isolate , <nl> isolate , props , next_key , & success , LookupIterator : : OWN ) ; <nl> DCHECK ( success ) ; <nl> Maybe < PropertyAttributes > maybe = JSReceiver : : GetPropertyAttributes ( & it ) ; <nl> - if ( ! maybe . IsJust ( ) ) return MaybeHandle < Object > ( ) ; <nl> + if ( maybe . IsNothing ( ) ) return MaybeHandle < Object > ( ) ; <nl> PropertyAttributes attrs = maybe . FromJust ( ) ; <nl> / / 7c . If propDesc is not undefined and propDesc . [ [ Enumerable ] ] is true : <nl> if ( attrs = = ABSENT ) continue ; <nl> MaybeHandle < Object > JSReceiver : : DefineProperties ( Isolate * isolate , <nl> DefineOwnProperty ( isolate , Handle < JSReceiver > : : cast ( object ) , <nl> desc - > name ( ) , desc , THROW_ON_ERROR ) ; <nl> / / 8d . ReturnIfAbrupt ( status ) . <nl> - if ( ! status . IsJust ( ) ) return MaybeHandle < Object > ( ) ; <nl> + if ( status . IsNothing ( ) ) return MaybeHandle < Object > ( ) ; <nl> CHECK ( status . FromJust ( ) ) ; <nl> } <nl> / / 9 . Return o . <nl> mmm a / src / runtime / runtime - object . cc <nl> ppp b / src / runtime / runtime - object . cc <nl> RUNTIME_FUNCTION ( Runtime_ObjectHasOwnProperty ) { <nl> <nl> Maybe < bool > result = <nl> JSReceiver : : HasOwnProperty ( Handle < JSProxy > : : cast ( object ) , key ) ; <nl> - if ( ! result . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( result . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> return isolate - > heap ( ) - > ToBoolean ( result . FromJust ( ) ) ; <nl> <nl> } else if ( object - > IsString ( ) ) { <nl> RUNTIME_FUNCTION ( Runtime_AddNamedProperty ) { <nl> DCHECK ( ! name - > ToArrayIndex ( & index ) ) ; <nl> LookupIterator it ( object , name , object , LookupIterator : : OWN_SKIP_INTERCEPTOR ) ; <nl> Maybe < PropertyAttributes > maybe = JSReceiver : : GetPropertyAttributes ( & it ) ; <nl> - if ( ! maybe . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( maybe . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> DCHECK ( ! it . IsFound ( ) ) ; <nl> # endif <nl> <nl> RUNTIME_FUNCTION ( Runtime_AddElement ) { <nl> LookupIterator it ( isolate , object , index , object , <nl> LookupIterator : : OWN_SKIP_INTERCEPTOR ) ; <nl> Maybe < PropertyAttributes > maybe = JSReceiver : : GetPropertyAttributes ( & it ) ; <nl> - if ( ! maybe . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( maybe . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> DCHECK ( ! it . IsFound ( ) ) ; <nl> <nl> if ( object - > IsJSArray ( ) ) { <nl> RUNTIME_FUNCTION ( Runtime_HasProperty ) { <nl> <nl> / / Lookup the { name } on { receiver } . <nl> Maybe < bool > maybe = JSReceiver : : HasProperty ( receiver , name ) ; <nl> - if ( ! maybe . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( maybe . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> return isolate - > heap ( ) - > ToBoolean ( maybe . FromJust ( ) ) ; <nl> } <nl> <nl> mmm a / src / runtime / runtime - operators . cc <nl> ppp b / src / runtime / runtime - operators . cc <nl> RUNTIME_FUNCTION ( Runtime_Equal ) { <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , x , 0 ) ; <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , y , 1 ) ; <nl> Maybe < bool > result = Object : : Equals ( x , y ) ; <nl> - if ( ! result . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( result . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> return isolate - > heap ( ) - > ToBoolean ( result . FromJust ( ) ) ; <nl> } <nl> <nl> RUNTIME_FUNCTION ( Runtime_NotEqual ) { <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , x , 0 ) ; <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , y , 1 ) ; <nl> Maybe < bool > result = Object : : Equals ( x , y ) ; <nl> - if ( ! result . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( result . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> return isolate - > heap ( ) - > ToBoolean ( ! result . FromJust ( ) ) ; <nl> } <nl> <nl> RUNTIME_FUNCTION ( Runtime_LessThan ) { <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , x , 0 ) ; <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , y , 1 ) ; <nl> Maybe < bool > result = Object : : LessThan ( x , y ) ; <nl> - if ( ! result . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( result . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> return isolate - > heap ( ) - > ToBoolean ( result . FromJust ( ) ) ; <nl> } <nl> <nl> RUNTIME_FUNCTION ( Runtime_GreaterThan ) { <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , x , 0 ) ; <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , y , 1 ) ; <nl> Maybe < bool > result = Object : : GreaterThan ( x , y ) ; <nl> - if ( ! result . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( result . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> return isolate - > heap ( ) - > ToBoolean ( result . FromJust ( ) ) ; <nl> } <nl> <nl> RUNTIME_FUNCTION ( Runtime_LessThanOrEqual ) { <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , x , 0 ) ; <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , y , 1 ) ; <nl> Maybe < bool > result = Object : : LessThanOrEqual ( x , y ) ; <nl> - if ( ! result . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( result . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> return isolate - > heap ( ) - > ToBoolean ( result . FromJust ( ) ) ; <nl> } <nl> <nl> RUNTIME_FUNCTION ( Runtime_GreaterThanOrEqual ) { <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , x , 0 ) ; <nl> CONVERT_ARG_HANDLE_CHECKED ( Object , y , 1 ) ; <nl> Maybe < bool > result = Object : : GreaterThanOrEqual ( x , y ) ; <nl> - if ( ! result . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( result . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> return isolate - > heap ( ) - > ToBoolean ( result . FromJust ( ) ) ; <nl> } <nl> <nl> mmm a / src / runtime / runtime - scopes . cc <nl> ppp b / src / runtime / runtime - scopes . cc <nl> Object * DeclareGlobal ( <nl> } <nl> LookupIterator it ( global , name , global , lookup_config ) ; <nl> Maybe < PropertyAttributes > maybe = JSReceiver : : GetPropertyAttributes ( & it ) ; <nl> - if ( ! maybe . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( maybe . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> <nl> if ( it . IsFound ( ) ) { <nl> PropertyAttributes old_attributes = maybe . FromJust ( ) ; <nl> static Object * FindNameClash ( Handle < ScopeInfo > scope_info , <nl> LookupIterator it ( global_object , name , global_object , <nl> LookupIterator : : OWN_SKIP_INTERCEPTOR ) ; <nl> Maybe < PropertyAttributes > maybe = JSReceiver : : GetPropertyAttributes ( & it ) ; <nl> - if ( ! maybe . IsJust ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> + if ( maybe . IsNothing ( ) ) return isolate - > heap ( ) - > exception ( ) ; <nl> if ( ( maybe . FromJust ( ) & DONT_DELETE ) ! = 0 ) { <nl> / / ES # sec - globaldeclarationinstantiation 5 . a : <nl> / / If envRec . HasVarDeclaration ( name ) is true , throw a SyntaxError <nl> | Refactor ! foo . IsJust to foo . IsNothing ( ) | v8/v8 | 239a58de3ba0e2602409629498166451cc8a8c8a | 2017-10-23T23:25:28Z |
mmm a / aten / src / ATen / native / native_functions . yaml <nl> ppp b / aten / src / ATen / native / native_functions . yaml <nl> <nl> dispatch : <nl> CUDA : _cudnn_rnn_backward <nl> <nl> - - func : _cudnn_init_dropout_state ( double dropout , bool train , int dropout_seed , TensorOptions options ) - > Tensor <nl> + - func : _cudnn_init_dropout_state ( float dropout , bool train , int dropout_seed , TensorOptions options ) - > Tensor <nl> dispatch : <nl> CUDA : _cudnn_init_dropout_state <nl> <nl> <nl> - func : batch_norm ( Tensor input , Tensor ? weight , Tensor ? bias , Tensor ? running_mean , Tensor ? running_var , bool training , float momentum , float eps , bool cudnn_enabled ) - > Tensor <nl> matches_jit_signature : True <nl> <nl> - - func : _batch_norm_impl_index ( Tensor input , Tensor ? weight , Tensor ? bias , Tensor ? running_mean , Tensor ? running_var , bool training , double momentum , double eps , bool cudnn_enabled ) - > ( Tensor , Tensor , Tensor , int64_t ) <nl> + - func : _batch_norm_impl_index ( Tensor input , Tensor ? weight , Tensor ? bias , Tensor ? running_mean , Tensor ? running_var , bool training , float momentum , float eps , bool cudnn_enabled ) - > ( Tensor , Tensor , Tensor , int64_t ) <nl> <nl> - - func : _batch_norm_impl_index_backward ( int64_t impl_index , Tensor input , Tensor grad_output , Tensor ? weight , Tensor ? running_mean , Tensor ? running_var , Tensor ? save_mean , Tensor ? save_var_transform , bool train , double eps , std : : array < bool , 3 > output_mask ) - > ( Tensor , Tensor , Tensor ) <nl> + - func : _batch_norm_impl_index_backward ( int64_t impl_index , Tensor input , Tensor grad_output , Tensor ? weight , Tensor ? running_mean , Tensor ? running_var , Tensor ? save_mean , Tensor ? save_var_transform , bool train , float eps , std : : array < bool , 3 > output_mask ) - > ( Tensor , Tensor , Tensor ) <nl> <nl> # Sample bernoulli with values in ` self ` as probability . <nl> - func : bernoulli ( Tensor self , * , Generator ? generator = None ) - > Tensor <nl> <nl> CUDA : cudnn_grid_sampler_forward <nl> <nl> - func : cudnn_grid_sampler_backward ( Tensor self , Tensor grid , Tensor grad_output ) - > ( Tensor grad_self , Tensor grad_grid ) <nl> + matches_jit_signature : True <nl> dispatch : <nl> CUDA : cudnn_grid_sampler_backward <nl> <nl> <nl> matches_jit_signature : True <nl> <nl> - func : logsumexp_backward ( Tensor grad , Tensor self , Tensor res , int [ 1 ] dim , bool keepdim ) - > Tensor <nl> + matches_jit_signature : True <nl> <nl> - func : margin_ranking_loss ( Tensor input1 , Tensor input2 , Tensor target , float margin = 0 . 0 , int reduction = Mean ) - > Tensor <nl> matches_jit_signature : True <nl> <nl> matches_jit_signature : True <nl> <nl> - func : sum_backward ( Tensor grad , int [ ] sizes , int [ ] dims , bool keepdim ) - > Tensor <nl> + matches_jit_signature : True <nl> <nl> - func : median ( Tensor self , int dim , bool keepdim = False ) - > ( Tensor values , Tensor indices ) <nl> matches_jit_signature : True <nl> <nl> - func : pairwise_distance ( Tensor x1 , Tensor x2 , float p = 2 , float eps = 1e - 06 , bool keepdim = False ) - > Tensor <nl> matches_jit_signature : True <nl> <nl> - - func : cdist ( Tensor x1 , Tensor x2 , double p = 2 ) - > Tensor <nl> + - func : cdist ( Tensor x1 , Tensor x2 , float p = 2 ) - > Tensor <nl> + matches_jit_signature : True <nl> <nl> - func : pdist ( Tensor self , float p = 2 ) - > Tensor <nl> matches_jit_signature : True <nl> <nl> variants : method # This is method - only to match the previous tensor API . In the future we could make this a function too . <nl> <nl> - func : permute_backwards ( Tensor grad , int [ ] fwd_dims ) - > Tensor <nl> + matches_jit_signature : True <nl> <nl> - func : pixel_shuffle ( Tensor self , int upscale_factor ) - > Tensor <nl> matches_jit_signature : True <nl> <nl> device_guard : False <nl> <nl> - func : unsqueeze_to ( Tensor self , int [ ] sizes ) - > Tensor <nl> + matches_jit_signature : True <nl> <nl> - func : unsqueeze_to ( Tensor self , int64_t dim , int [ ] sizes ) - > Tensor <nl> <nl> <nl> matches_jit_signature : True <nl> <nl> - func : var_backward ( Tensor grad , Tensor self , bool unbiased ) - > Tensor <nl> + matches_jit_signature : True <nl> <nl> - func : var_backward ( Tensor grad , Tensor self , int [ ] dim , bool unbiased , bool keepdim ) - > Tensor <nl> + matches_jit_signature : True <nl> <nl> - func : view_as ( Tensor self , Tensor other ) - > Tensor <nl> matches_jit_signature : True <nl> <nl> matches_jit_signature : True <nl> <nl> # Quantized RNN layers <nl> - - func : quantized_lstm ( Tensor input , Tensor [ ] hx , Tensor [ ] params , bool has_biases , int num_layers , double dropout , bool train , bool bidirectional , bool batch_first ) - > ( Tensor , Tensor , Tensor ) <nl> + - func : quantized_lstm ( Tensor input , Tensor [ ] hx , Tensor [ ] params , bool has_biases , int num_layers , float dropout , bool train , bool bidirectional , bool batch_first ) - > ( Tensor , Tensor , Tensor ) <nl> + matches_jit_signature : True <nl> <nl> # Quantized RNN cells <nl> - func : quantized_lstm_cell ( Tensor input , Tensor [ ] hx , Tensor w_ih , Tensor w_hh , Tensor b_ih , Tensor b_hh , Tensor packed_ih , Tensor packed_hh , Tensor col_offsets_ih , Tensor col_offsets_hh , Scalar scale_ih , Scalar scale_hh , Scalar zero_point_ih , Scalar zero_point_hh ) - > ( Tensor , Tensor ) <nl> <nl> - func : svd ( Tensor self , bool some = True , bool compute_uv = True , * , Tensor ( a ! ) U , Tensor ( b ! ) S , Tensor ( c ! ) V ) - > ( Tensor ( a ! ) U , Tensor ( b ! ) S , Tensor ( c ! ) V ) <nl> <nl> - func : svd ( Tensor self , bool some = True , bool compute_uv = True ) - > ( Tensor U , Tensor S , Tensor V ) <nl> + matches_jit_signature : True <nl> variants : method , function <nl> <nl> - func : cholesky ( Tensor self , bool upper = False , * , Tensor ( a ! ) out ) - > Tensor ( a ! ) <nl> <nl> <nl> - func : normal ( float mean , Tensor std , * , Generator ? generator = None , Tensor ( a ! ) output ) - > Tensor ( a ! ) <nl> <nl> - - func : normal ( double mean , Tensor std , * , Generator ? generator = None ) - > Tensor <nl> + - func : normal ( float mean , Tensor std , * , Generator ? generator = None ) - > Tensor <nl> + matches_jit_signature : True <nl> <nl> - func : normal ( Tensor mean , Tensor std , * , Generator ? generator = None , Tensor ( a ! ) output ) - > Tensor ( a ! ) <nl> <nl> <nl> python_module : nn <nl> <nl> - func : multilabel_margin_loss_forward ( Tensor self , Tensor target , int reduction ) - > ( Tensor output , Tensor is_target ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : multilabel_margin_loss_backward ( Tensor grad_output , Tensor self , Tensor target , int reduction , Tensor is_target , * , Tensor ( a ! ) grad_input ) - > Tensor ( a ! ) <nl> <nl> python_module : nn <nl> <nl> - func : nll_loss_forward ( Tensor self , Tensor target , Tensor ? weight , int reduction , int ignore_index ) - > ( Tensor output , Tensor total_weight ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : nll_loss_backward ( Tensor grad_output , Tensor self , Tensor target , Tensor ? weight , int reduction , int ignore_index , Tensor total_weight , * , Tensor ( a ! ) grad_input ) - > Tensor ( a ! ) <nl> <nl> python_module : nn <nl> <nl> - func : nll_loss2d_forward ( Tensor self , Tensor target , Tensor ? weight , int reduction , int ignore_index ) - > ( Tensor output , Tensor total_weight ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : nll_loss2d_backward ( Tensor grad_output , Tensor self , Tensor target , Tensor ? weight , int reduction , int ignore_index , Tensor total_weight , * , Tensor ( a ! ) grad_input ) - > Tensor ( a ! ) <nl> <nl> python_module : nn <nl> <nl> - func : log_sigmoid_forward ( Tensor self ) - > ( Tensor output , Tensor buffer ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : log_sigmoid_backward ( Tensor grad_output , Tensor self , Tensor buffer , * , Tensor ( a ! ) grad_input ) - > Tensor ( a ! ) <nl> <nl> python_module : nn <nl> <nl> - func : _adaptive_avg_pool2d ( Tensor self , int [ 2 ] output_size ) - > Tensor <nl> + matches_jit_signature : True <nl> dispatch : <nl> CPU : adaptive_avg_pool2d_cpu <nl> CUDA : adaptive_avg_pool2d_cuda <nl> <nl> <nl> # Return : ( Tensor output , Tensor indices ) <nl> - func : fractional_max_pool3d ( Tensor self , int [ 3 ] kernel_size , int [ 3 ] output_size , Tensor random_samples ) - > ( Tensor , Tensor ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> dispatch : <nl> CPU : fractional_max_pool3d_cpu <nl> <nl> python_module : nn <nl> <nl> - func : thnn_conv_transpose2d_forward ( Tensor self , Tensor weight , int [ 2 ] kernel_size , Tensor ? bias , int [ 2 ] stride , int [ 2 ] padding , int [ 2 ] output_padding , int [ 2 ] dilation ) - > ( Tensor output , Tensor columns , Tensor ones ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : thnn_conv_transpose2d_backward ( Tensor grad_output , Tensor self , Tensor weight , int [ 2 ] kernel_size , int [ 2 ] stride , int [ 2 ] padding , int [ 2 ] output_padding , int [ 2 ] dilation , Tensor columns , Tensor ones , * , Tensor ? ( a ! ) grad_input , Tensor ? ( b ! ) grad_weight , Tensor ? ( c ! ) grad_bias ) - > ( Tensor ( a ! ) , Tensor ( b ! ) , Tensor ( c ! ) ) <nl> python_module : nn <nl> <nl> - func : thnn_conv_transpose2d_backward ( Tensor grad_output , Tensor self , Tensor weight , int [ 2 ] kernel_size , int [ 2 ] stride , int [ 2 ] padding , int [ 2 ] output_padding , int [ 2 ] dilation , Tensor columns , Tensor ones , bool [ 3 ] output_mask ) - > ( Tensor grad_input , Tensor grad_weight , Tensor grad_bias ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : thnn_conv_transpose3d ( Tensor self , Tensor weight , int [ 3 ] kernel_size , Tensor ? bias = None , int [ 3 ] stride = 1 , int [ 3 ] padding = 0 , int [ 3 ] output_padding = 0 , int [ 3 ] dilation = 1 , * , Tensor ( a ! ) output ) - > Tensor ( a ! ) <nl> <nl> python_module : nn <nl> <nl> - func : thnn_conv_transpose3d_forward ( Tensor self , Tensor weight , int [ 3 ] kernel_size , Tensor ? bias , int [ 3 ] stride , int [ 3 ] padding , int [ 3 ] output_padding , int [ 3 ] dilation ) - > ( Tensor output , Tensor finput , Tensor fgrad_input ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : thnn_conv_transpose3d_backward ( Tensor grad_output , Tensor self , Tensor weight , int [ 3 ] kernel_size , int [ 3 ] stride , int [ 3 ] padding , int [ 3 ] output_padding , int [ 3 ] dilation , Tensor finput , Tensor fgrad_input , * , Tensor ? ( a ! ) grad_input , Tensor ? ( b ! ) grad_weight , Tensor ? ( c ! ) grad_bias ) - > ( Tensor ( a ! ) , Tensor ( b ! ) , Tensor ( c ! ) ) <nl> python_module : nn <nl> <nl> - func : thnn_conv_transpose3d_backward ( Tensor grad_output , Tensor self , Tensor weight , int [ 3 ] kernel_size , int [ 3 ] stride , int [ 3 ] padding , int [ 3 ] output_padding , int [ 3 ] dilation , Tensor finput , Tensor fgrad_input , bool [ 3 ] output_mask ) - > ( Tensor grad_input , Tensor grad_weight , Tensor grad_bias ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : thnn_conv2d ( Tensor self , Tensor weight , int [ 2 ] kernel_size , Tensor ? bias = None , int [ 2 ] stride = 1 , int [ 2 ] padding = 0 , * , Tensor ( a ! ) output ) - > Tensor ( a ! ) <nl> <nl> python_module : nn <nl> <nl> - func : thnn_conv2d_forward ( Tensor self , Tensor weight , int [ 2 ] kernel_size , Tensor ? bias , int [ 2 ] stride , int [ 2 ] padding ) - > ( Tensor output , Tensor finput , Tensor fgrad_input ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : thnn_conv2d_backward ( Tensor grad_output , Tensor self , Tensor weight , int [ 2 ] kernel_size , int [ 2 ] stride , int [ 2 ] padding , Tensor finput , Tensor fgrad_input , * , Tensor ? ( a ! ) grad_input , Tensor ? ( b ! ) grad_weight , Tensor ? ( c ! ) grad_bias ) - > ( Tensor ( a ! ) , Tensor ( b ! ) , Tensor ( c ! ) ) <nl> python_module : nn <nl> <nl> - func : thnn_conv2d_backward ( Tensor grad_output , Tensor self , Tensor weight , int [ 2 ] kernel_size , int [ 2 ] stride , int [ 2 ] padding , Tensor finput , Tensor fgrad_input , bool [ 3 ] output_mask ) - > ( Tensor grad_input , Tensor grad_weight , Tensor grad_bias ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : thnn_conv_depthwise2d ( Tensor self , Tensor weight , int [ 2 ] kernel_size , Tensor ? bias = None , int [ 2 ] stride = 1 , int [ 2 ] padding = 0 , int [ 2 ] dilation = 1 , * , Tensor ( a ! ) output ) - > Tensor ( a ! ) <nl> <nl> python_module : nn <nl> <nl> - func : thnn_conv_depthwise2d_backward ( Tensor grad_output , Tensor self , Tensor weight , int [ 2 ] kernel_size , int [ 2 ] stride , int [ 2 ] padding , int [ 2 ] dilation , bool [ 2 ] output_mask ) - > ( Tensor grad_input , Tensor grad_weight ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : thnn_conv3d ( Tensor self , Tensor weight , int [ 3 ] kernel_size , Tensor ? bias = None , int [ 3 ] stride = 1 , int [ 3 ] padding = 0 , * , Tensor ( a ! ) output ) - > Tensor ( a ! ) <nl> <nl> python_module : nn <nl> <nl> - func : thnn_conv3d_forward ( Tensor self , Tensor weight , int [ 3 ] kernel_size , Tensor ? bias , int [ 3 ] stride , int [ 3 ] padding ) - > ( Tensor output , Tensor finput , Tensor fgrad_input ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : thnn_conv3d_backward ( Tensor grad_output , Tensor self , Tensor weight , int [ 3 ] kernel_size , int [ 3 ] stride , int [ 3 ] padding , Tensor finput , Tensor fgrad_input , * , Tensor ? ( a ! ) grad_input , Tensor ? ( b ! ) grad_weight , Tensor ? ( c ! ) grad_bias ) - > ( Tensor ( a ! ) , Tensor ( b ! ) , Tensor ( c ! ) ) <nl> python_module : nn <nl> <nl> - func : thnn_conv3d_backward ( Tensor grad_output , Tensor self , Tensor weight , int [ 3 ] kernel_size , int [ 3 ] stride , int [ 3 ] padding , Tensor finput , Tensor fgrad_input , bool [ 3 ] output_mask ) - > ( Tensor grad_input , Tensor grad_weight , Tensor grad_bias ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : thnn_conv_dilated2d ( Tensor self , Tensor weight , int [ 2 ] kernel_size , Tensor ? bias = None , int [ 2 ] stride = 1 , int [ 2 ] padding = 0 , int [ 2 ] dilation = 1 , * , Tensor ( a ! ) output ) - > Tensor ( a ! ) <nl> <nl> python_module : nn <nl> <nl> - func : thnn_conv_dilated2d_forward ( Tensor self , Tensor weight , int [ 2 ] kernel_size , Tensor ? bias , int [ 2 ] stride , int [ 2 ] padding , int [ 2 ] dilation ) - > ( Tensor output , Tensor columns , Tensor ones ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : thnn_conv_dilated2d_backward ( Tensor grad_output , Tensor self , Tensor weight , int [ 2 ] kernel_size , int [ 2 ] stride , int [ 2 ] padding , int [ 2 ] dilation , Tensor columns , Tensor ones , * , Tensor ? ( a ! ) grad_input , Tensor ? ( b ! ) grad_weight , Tensor ? ( c ! ) grad_bias ) - > ( Tensor ( a ! ) , Tensor ( b ! ) , Tensor ( c ! ) ) <nl> python_module : nn <nl> <nl> - func : thnn_conv_dilated2d_backward ( Tensor grad_output , Tensor self , Tensor weight , int [ 2 ] kernel_size , int [ 2 ] stride , int [ 2 ] padding , int [ 2 ] dilation , Tensor columns , Tensor ones , bool [ 3 ] output_mask ) - > ( Tensor grad_input , Tensor grad_weight , Tensor grad_bias ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : thnn_conv_dilated3d ( Tensor self , Tensor weight , int [ 3 ] kernel_size , Tensor ? bias = None , int [ 3 ] stride = 1 , int [ 3 ] padding = 0 , int [ 3 ] dilation = 1 , * , Tensor ( a ! ) output ) - > Tensor ( a ! ) <nl> <nl> python_module : nn <nl> <nl> - func : thnn_conv_dilated3d_forward ( Tensor self , Tensor weight , int [ 3 ] kernel_size , Tensor ? bias , int [ 3 ] stride , int [ 3 ] padding , int [ 3 ] dilation ) - > ( Tensor output , Tensor columns , Tensor ones ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : thnn_conv_dilated3d_backward ( Tensor grad_output , Tensor self , Tensor weight , int [ 3 ] kernel_size , int [ 3 ] stride , int [ 3 ] padding , int [ ] dilation , Tensor columns , Tensor ones , * , Tensor ? ( a ! ) grad_input , Tensor ? ( b ! ) grad_weight , Tensor ? ( c ! ) grad_bias ) - > ( Tensor ( a ! ) , Tensor ( b ! ) , Tensor ( c ! ) ) <nl> python_module : nn <nl> <nl> - func : thnn_conv_dilated3d_backward ( Tensor grad_output , Tensor self , Tensor weight , int [ 3 ] kernel_size , int [ 3 ] stride , int [ 3 ] padding , int [ 3 ] dilation , Tensor columns , Tensor ones , bool [ 3 ] output_mask ) - > ( Tensor grad_input , Tensor grad_weight , Tensor grad_bias ) <nl> + matches_jit_signature : True <nl> python_module : nn <nl> <nl> - func : thnn_col2im ( Tensor self , int [ 2 ] output_size , int [ 2 ] kernel_size , int [ 2 ] dilation , int [ 2 ] padding , int [ 2 ] stride ) - > Tensor <nl> mmm a / aten / src / ATen / native_parse . py <nl> ppp b / aten / src / ATen / native_parse . py <nl> def type_argument_translations ( arg ) : <nl> # Enables str by translating to legacy std : : string . <nl> elif t = = ' str ' : <nl> t = ' std : : string ' <nl> + elif t = = ' double ' : <nl> + raise RuntimeError ( " Please use float and not double . " <nl> + " See [ temp translations ] for details . " ) <nl> # Enables int [ x ] by translating to legacy IntArrayRef [ x ] . See [ temp translations ] <nl> elif re . match ( r ' int \ [ ( \ d + ) \ ] ' , t ) : <nl> match = re . match ( r ' int \ [ ( \ d + ) \ ] ' , t ) <nl> | Revisit some native functions to increase number of jit matches ( ) | pytorch/pytorch | 9101dfc57ccb6b6931b4e80233bbc64d9080d2e8 | 2019-02-21T22:41:06Z |
mmm a / installer / windows / build . cmd <nl> ppp b / installer / windows / build . cmd <nl> set LIGHT = - sw1104 <nl> <nl> : : Compile & Link <nl> " % WIX % \ bin \ candle . exe " - nologo - pedantic - arch % ARCH % % SQLCIPHER % product . wxs <nl> - " % WIX % \ bin \ light . exe " - nologo - pedantic % LIGHT % % ICE % - ext WixUIExtension - ext WixUtilExtension product . wixobj - out % MSI % . msi <nl> + " % WIX % \ bin \ light . exe " - nologo - pedantic % LIGHT % % ICE % - ext WixUIExtension - ext WixUtilExtension - cultures : en - us - loc strings . wxl product . wixobj - out % MSI % . msi <nl> <nl> : : Cleanup <nl> del product . wixobj <nl> new file mode 100644 <nl> index 000000000 . . ffcdcb0c1 <nl> mmm / dev / null <nl> ppp b / installer / windows / strings . wxl <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < WixLocalization Culture = " en - us " xmlns = " http : / / schemas . microsoft . com / wix / 2006 / localization " > <nl> + < String Id = " WelcomeDlgDescription " > The Setup Wizard will install [ ProductName ] on your computer . If you have a previous version already installed , this installation process will update it . Click Next to continue or Cancel to exit the Setup Wizard . < / String > <nl> + < / WixLocalization > <nl> | Update the welcome dialog text for Windows Installer | sqlitebrowser/sqlitebrowser | 09c668a1126afa538827595a45547e1e684aa62d | 2018-06-03T20:51:50Z |
mmm a / tensorflow / cc / ops / while_loop . h <nl> ppp b / tensorflow / cc / ops / while_loop . h <nl> limitations under the License . <nl> # ifndef TENSORFLOW_CC_OPS_WHILE_LOOP_H_ <nl> # define TENSORFLOW_CC_OPS_WHILE_LOOP_H_ <nl> <nl> + # include < vector > <nl> + # include < string > <nl> # include " tensorflow / cc / framework / ops . h " <nl> # include " tensorflow / cc / framework / scope . h " <nl> <nl> | cc / ops / const_op . h : Add # include < vector > for vector < > and # include < string > for string | tensorflow/tensorflow | 6e7a1c901eee20babaaaf8097c995ee53801e454 | 2020-09-17T09:12:22Z |
mmm a / lib / ffmpeg / libavcodec / vdpau_vc1 . c <nl> ppp b / lib / ffmpeg / libavcodec / vdpau_vc1 . c <nl> static int vdpau_vc1_start_frame ( AVCodecContext * avctx , <nl> else <nl> info - > picture_type = s - > pict_type - 1 + s - > pict_type / 3 ; <nl> <nl> - info - > frame_coding_mode = v - > fcm ; <nl> + info - > frame_coding_mode = v - > fcm ? v - > fcm + 1 : 0 ; <nl> info - > postprocflag = v - > postprocflag ; <nl> info - > pulldown = v - > broadcast ; <nl> info - > interlace = v - > interlace ; <nl> | ffmpeg backport : fix vdpau vc1 interlace modes | xbmc/xbmc | d01e7e75f1463a8d3f4279c0bdf202cf82fdc80c | 2013-05-06T19:00:04Z |
mmm a / README . md <nl> ppp b / README . md <nl> $ make <nl> $ . / json_unit " * " <nl> <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - All tests passed ( 3344416 assertions in 30 test cases ) <nl> + All tests passed ( 3344554 assertions in 31 test cases ) <nl> ` ` ` <nl> <nl> For more information , have a look at the file [ . travis . yml ] ( https : / / github . com / nlohmann / json / blob / master / . travis . yml ) . <nl> new file mode 100644 <nl> index 000000000 . . d81a58dbd <nl> mmm / dev / null <nl> ppp b / doc / examples / diff . cpp <nl> <nl> + # include < json . hpp > <nl> + <nl> + using json = nlohmann : : json ; <nl> + <nl> + int main ( ) <nl> + { <nl> + / / the source document <nl> + json source = R " ( <nl> + { <nl> + " baz " : " qux " , <nl> + " foo " : " bar " <nl> + } <nl> + ) " _json ; <nl> + <nl> + / / the target document <nl> + json target = R " ( <nl> + { <nl> + " baz " : " boo " , <nl> + " hello " : [ <nl> + " world " <nl> + ] <nl> + } <nl> + ) " _json ; <nl> + <nl> + / / create the patch <nl> + json patch = json : : diff ( source , target ) ; <nl> + <nl> + / / roundtrip <nl> + json patched_source = source . patch ( patch ) ; <nl> + <nl> + / / output patch and roundtrip result <nl> + std : : cout < < std : : setw ( 4 ) < < patch < < " \ n \ n " <nl> + < < std : : setw ( 4 ) < < patched_source < < std : : endl ; <nl> + } <nl> new file mode 100644 <nl> index 000000000 . . c3e3fa4d7 <nl> mmm / dev / null <nl> ppp b / doc / examples / diff . link <nl> @ @ - 0 , 0 + 1 @ @ <nl> + < a target = " _blank " href = " http : / / melpon . org / wandbox / permlink / hicmeOK39tBxaluM " > < b > online < / b > < / a > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000 . . 7dc79791f <nl> mmm / dev / null <nl> ppp b / doc / examples / diff . output <nl> <nl> + [ <nl> + { <nl> + " op " : " replace " , <nl> + " path " : " / baz " , <nl> + " value " : " boo " <nl> + } , <nl> + { <nl> + " op " : " remove " , <nl> + " path " : " / foo " <nl> + } , <nl> + { <nl> + " op " : " add " , <nl> + " path " : " / hello " , <nl> + " value " : [ <nl> + " world " <nl> + ] <nl> + } <nl> + ] <nl> + <nl> + { <nl> + " baz " : " boo " , <nl> + " hello " : [ <nl> + " world " <nl> + ] <nl> + } <nl> new file mode 100644 <nl> index 000000000 . . 24a52d596 <nl> mmm / dev / null <nl> ppp b / doc / examples / patch . cpp <nl> <nl> + # include < json . hpp > <nl> + <nl> + using json = nlohmann : : json ; <nl> + <nl> + int main ( ) <nl> + { <nl> + / / the original document <nl> + json doc = R " ( <nl> + { <nl> + " baz " : " qux " , <nl> + " foo " : " bar " <nl> + } <nl> + ) " _json ; <nl> + <nl> + / / the patch <nl> + json patch = R " ( <nl> + [ <nl> + { " op " : " replace " , " path " : " / baz " , " value " : " boo " } , <nl> + { " op " : " add " , " path " : " / hello " , " value " : [ " world " ] } , <nl> + { " op " : " remove " , " path " : " / foo " } <nl> + ] <nl> + ) " _json ; <nl> + <nl> + / / apply the patch <nl> + json patched_doc = doc . patch ( patch ) ; <nl> + <nl> + / / output original and patched document <nl> + std : : cout < < std : : setw ( 4 ) < < doc < < " \ n \ n " <nl> + < < std : : setw ( 4 ) < < patched_doc < < std : : endl ; <nl> + } <nl> new file mode 100644 <nl> index 000000000 . . 5d5032b7b <nl> mmm / dev / null <nl> ppp b / doc / examples / patch . link <nl> @ @ - 0 , 0 + 1 @ @ <nl> + < a target = " _blank " href = " http : / / melpon . org / wandbox / permlink / lbczW3AzcUbH1Nbo " > < b > online < / b > < / a > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000 . . eb558fe25 <nl> mmm / dev / null <nl> ppp b / doc / examples / patch . output <nl> <nl> + { <nl> + " baz " : " qux " , <nl> + " foo " : " bar " <nl> + } <nl> + <nl> + { <nl> + " baz " : " boo " , <nl> + " hello " : [ <nl> + " world " <nl> + ] <nl> + } <nl> mmm a / src / json . hpp <nl> ppp b / src / json . hpp <nl> class basic_json <nl> } <nl> } <nl> <nl> - / * ! <nl> - @ brief access specified element via JSON Pointer <nl> - <nl> - Uses a JSON pointer to retrieve a reference to the respective JSON value . <nl> - No bound checking is performed . Similar to <nl> - @ ref operator [ ] ( const typename object_t : : key_type & ) , ` null ` values <nl> - are created in arrays and objects if necessary . <nl> - <nl> - In particular : <nl> - - If the JSON pointer points to an object key that does not exist , it <nl> - is created an filled with a ` null ` value before a reference to it <nl> - is returned . <nl> - - If the JSON pointer points to an array index that does not exist , it <nl> - is created an filled with a ` null ` value before a reference to it <nl> - is returned . All indices between the current maximum and the given <nl> - index are also filled with ` null ` . <nl> - - The special value ` - ` is treated as a synonym for the index past the <nl> - end . <nl> - <nl> - @ param [ in ] ptr a JSON pointer <nl> - <nl> - @ return reference to the element pointed to by @ a ptr <nl> - <nl> - @ complexity Constant . <nl> - <nl> - @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> - @ throw std : : domain_error if an array index begins with ' 0 ' <nl> - @ throw std : : invalid_argument if an array index was not a number <nl> - <nl> - @ liveexample { The behavior is shown in the example . , operatorjson_pointer } <nl> - <nl> - @ since version 2 . 0 . 0 <nl> - * / <nl> - reference operator [ ] ( const json_pointer & ptr ) <nl> - { <nl> - return ptr . get_unchecked ( this ) ; <nl> - } <nl> - <nl> - / * ! <nl> - @ brief access specified element via JSON Pointer <nl> - <nl> - Uses a JSON pointer to retrieve a reference to the respective JSON value . <nl> - No bound checking is performed . The function does not change the JSON <nl> - value ; no ` null ` values are created . In particular , the the special value <nl> - ` - ` yields an exception . <nl> - <nl> - @ param [ in ] ptr JSON pointer to the desired element <nl> - <nl> - @ return const reference to the element pointed to by @ a ptr <nl> - <nl> - @ complexity Constant . <nl> - <nl> - @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> - @ throw std : : domain_error if an array index begins with ' 0 ' <nl> - @ throw std : : invalid_argument if an array index was not a number <nl> - <nl> - @ liveexample { The behavior is shown in the example . , operatorjson_pointer_const } <nl> - <nl> - @ since version 2 . 0 . 0 <nl> - * / <nl> - const_reference operator [ ] ( const json_pointer & ptr ) const <nl> - { <nl> - return ptr . get_unchecked ( this ) ; <nl> - } <nl> - <nl> - / * ! <nl> - @ brief access specified element via JSON Pointer <nl> - <nl> - Returns a reference to the element at with specified JSON pointer @ a ptr , <nl> - with bounds checking . <nl> - <nl> - @ param [ in ] ptr JSON pointer to the desired element <nl> - <nl> - @ return reference to the element pointed to by @ a ptr <nl> - <nl> - @ complexity Constant . <nl> - <nl> - @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> - @ throw std : : domain_error if an array index begins with ' 0 ' <nl> - @ throw std : : invalid_argument if an array index was not a number <nl> - <nl> - @ liveexample { The behavior is shown in the example . , at_json_pointer } <nl> - <nl> - @ since version 2 . 0 . 0 <nl> - * / <nl> - reference at ( const json_pointer & ptr ) <nl> - { <nl> - return ptr . get_checked ( this ) ; <nl> - } <nl> - <nl> - / * ! <nl> - @ brief access specified element via JSON Pointer <nl> - <nl> - Returns a const reference to the element at with specified JSON pointer <nl> - @ a ptr , with bounds checking . <nl> - <nl> - @ param [ in ] ptr JSON pointer to the desired element <nl> - <nl> - @ return reference to the element pointed to by @ a ptr <nl> - <nl> - @ complexity Constant . <nl> - <nl> - @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> - @ throw std : : domain_error if an array index begins with ' 0 ' <nl> - @ throw std : : invalid_argument if an array index was not a number <nl> - <nl> - @ liveexample { The behavior is shown in the example . , at_json_pointer_const } <nl> - <nl> - @ since version 2 . 0 . 0 <nl> - * / <nl> - const_reference at ( const json_pointer & ptr ) const <nl> - { <nl> - return ptr . get_checked ( this ) ; <nl> - } <nl> - <nl> / * ! <nl> @ brief access specified object element with default value <nl> <nl> class basic_json <nl> <nl> @ throw std : : domain_error when called on a type other than JSON array ; <nl> example : ` " cannot use erase ( ) with null " ` <nl> - @ throw std : : out_of_range when ` idx > = size ( ) ` ; example : ` " index out of <nl> - range " ` <nl> + @ throw std : : out_of_range when ` idx > = size ( ) ` ; example : ` " array index 17 <nl> + is out of range " ` <nl> <nl> @ complexity Linear in distance between @ a idx and the end of the container . <nl> <nl> class basic_json <nl> { <nl> if ( idx > = size ( ) ) <nl> { <nl> - throw std : : out_of_range ( " index out of range " ) ; <nl> + throw std : : out_of_range ( " array index " + std : : to_string ( idx ) + " is out of range " ) ; <nl> } <nl> <nl> assert ( m_value . array ! = nullptr ) ; <nl> class basic_json <nl> : reference_tokens ( split ( s ) ) <nl> { } <nl> <nl> + / / / test for inequality <nl> + bool operator ! = ( const json_pointer & rhs ) const <nl> + { <nl> + return reference_tokens ! = rhs . reference_tokens ; <nl> + } <nl> + <nl> + private : <nl> + / / / remove and return last reference pointer <nl> std : : string pop_back ( ) <nl> { <nl> - if ( reference_tokens . empty ( ) ) <nl> + if ( is_root ( ) ) <nl> { <nl> throw std : : domain_error ( " JSON pointer has no parent " ) ; <nl> } <nl> class basic_json <nl> return last ; <nl> } <nl> <nl> - private : <nl> + / / / return whether pointer points to the root document <nl> + bool is_root ( ) const <nl> + { <nl> + return reference_tokens . empty ( ) ; <nl> + } <nl> + <nl> + json_pointer top ( ) const <nl> + { <nl> + if ( is_root ( ) ) <nl> + { <nl> + throw std : : domain_error ( " JSON pointer has no parent " ) ; <nl> + } <nl> + <nl> + json_pointer result = * this ; <nl> + result . reference_tokens = { reference_tokens [ 0 ] } ; <nl> + return result ; <nl> + } <nl> + <nl> / * ! <nl> @ brief create and return a reference to the pointed to value <nl> * / <nl> class basic_json <nl> case value_t : : array : <nl> { <nl> / / create an entry in the array <nl> - result = & result - > operator [ ] ( static_cast < size_t > ( std : : stoi ( reference_token ) ) ) ; <nl> + result = & result - > operator [ ] ( static_cast < size_type > ( std : : stoi ( reference_token ) ) ) ; <nl> break ; <nl> } <nl> <nl> class basic_json <nl> else <nl> { <nl> / / convert array index to number ; unchecked access <nl> - ptr = & ptr - > operator [ ] ( static_cast < size_t > ( std : : stoi ( reference_token ) ) ) ; <nl> + ptr = & ptr - > operator [ ] ( static_cast < size_type > ( std : : stoi ( reference_token ) ) ) ; <nl> } <nl> break ; <nl> } <nl> class basic_json <nl> } <nl> <nl> / / note : at performs range check <nl> - ptr = & ptr - > at ( static_cast < size_t > ( std : : stoi ( reference_token ) ) ) ; <nl> + ptr = & ptr - > at ( static_cast < size_type > ( std : : stoi ( reference_token ) ) ) ; <nl> break ; <nl> } <nl> <nl> class basic_json <nl> } <nl> <nl> / / use unchecked array access <nl> - ptr = & ptr - > operator [ ] ( static_cast < size_t > ( std : : stoi ( reference_token ) ) ) ; <nl> + ptr = & ptr - > operator [ ] ( static_cast < size_type > ( std : : stoi ( reference_token ) ) ) ; <nl> break ; <nl> } <nl> <nl> class basic_json <nl> } <nl> <nl> / / note : at performs range check <nl> - ptr = & ptr - > at ( static_cast < size_t > ( std : : stoi ( reference_token ) ) ) ; <nl> + ptr = & ptr - > at ( static_cast < size_type > ( std : : stoi ( reference_token ) ) ) ; <nl> break ; <nl> } <nl> <nl> class basic_json <nl> } <nl> } <nl> <nl> - / / first transform any occurrence of the sequence ' ~ 1 ' to ' / ' <nl> - replace_substring ( reference_token , " ~ 1 " , " / " ) ; <nl> - / / then transform any occurrence of the sequence ' ~ 0 ' to ' ~ ' <nl> - replace_substring ( reference_token , " ~ 0 " , " ~ " ) ; <nl> - <nl> / / finally , store the reference token <nl> + unescape ( reference_token ) ; <nl> result . push_back ( reference_token ) ; <nl> } <nl> <nl> class basic_json <nl> ) ; <nl> } <nl> <nl> + / / / escape tilde and slash <nl> + static std : : string escape ( std : : string s ) <nl> + { <nl> + / / escape " ~ " " to " ~ 0 " and " / " to " ~ 1 " <nl> + replace_substring ( s , " ~ " , " ~ 0 " ) ; <nl> + replace_substring ( s , " / " , " ~ 1 " ) ; <nl> + return s ; <nl> + } <nl> + <nl> + / / / unescape tilde and slash <nl> + static void unescape ( std : : string & s ) <nl> + { <nl> + / / first transform any occurrence of the sequence ' ~ 1 ' to ' / ' <nl> + replace_substring ( s , " ~ 1 " , " / " ) ; <nl> + / / then transform any occurrence of the sequence ' ~ 0 ' to ' ~ ' <nl> + replace_substring ( s , " ~ 0 " , " ~ " ) ; <nl> + } <nl> + <nl> / * ! <nl> @ param [ in ] reference_string the reference string to the current value <nl> @ param [ in ] value the value to consider <nl> class basic_json <nl> <nl> @ note Empty objects or arrays are flattened to ` null ` . <nl> * / <nl> - static void flatten ( const std : : string reference_string , <nl> + static void flatten ( const std : : string & reference_string , <nl> const basic_json & value , <nl> basic_json & result ) <nl> { <nl> class basic_json <nl> / / iterate object and use keys as reference string <nl> for ( const auto & element : * value . m_value . object ) <nl> { <nl> - / / escape " ~ " " to " ~ 0 " and " / " to " ~ 1 " <nl> - std : : string key ( element . first ) ; <nl> - replace_substring ( key , " ~ " , " ~ 0 " ) ; <nl> - replace_substring ( key , " / " , " ~ 1 " ) ; <nl> - <nl> - flatten ( reference_string + " / " + key , <nl> + flatten ( reference_string + " / " + escape ( element . first ) , <nl> element . second , result ) ; <nl> } <nl> } <nl> class basic_json <nl> std : : vector < std : : string > reference_tokens { } ; <nl> } ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / JSON Pointer functions / / <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / JSON Pointer support / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / / @ name JSON Pointer functions <nl> / / / @ { <nl> <nl> + / * ! <nl> + @ brief access specified element via JSON Pointer <nl> + <nl> + Uses a JSON pointer to retrieve a reference to the respective JSON value . <nl> + No bound checking is performed . Similar to <nl> + @ ref operator [ ] ( const typename object_t : : key_type & ) , ` null ` values <nl> + are created in arrays and objects if necessary . <nl> + <nl> + In particular : <nl> + - If the JSON pointer points to an object key that does not exist , it <nl> + is created an filled with a ` null ` value before a reference to it <nl> + is returned . <nl> + - If the JSON pointer points to an array index that does not exist , it <nl> + is created an filled with a ` null ` value before a reference to it <nl> + is returned . All indices between the current maximum and the given <nl> + index are also filled with ` null ` . <nl> + - The special value ` - ` is treated as a synonym for the index past the <nl> + end . <nl> + <nl> + @ param [ in ] ptr a JSON pointer <nl> + <nl> + @ return reference to the element pointed to by @ a ptr <nl> + <nl> + @ complexity Constant . <nl> + <nl> + @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> + @ throw std : : domain_error if an array index begins with ' 0 ' <nl> + @ throw std : : invalid_argument if an array index was not a number <nl> + <nl> + @ liveexample { The behavior is shown in the example . , operatorjson_pointer } <nl> + <nl> + @ since version 2 . 0 . 0 <nl> + * / <nl> + reference operator [ ] ( const json_pointer & ptr ) <nl> + { <nl> + return ptr . get_unchecked ( this ) ; <nl> + } <nl> + <nl> + / * ! <nl> + @ brief access specified element via JSON Pointer <nl> + <nl> + Uses a JSON pointer to retrieve a reference to the respective JSON value . <nl> + No bound checking is performed . The function does not change the JSON <nl> + value ; no ` null ` values are created . In particular , the the special value <nl> + ` - ` yields an exception . <nl> + <nl> + @ param [ in ] ptr JSON pointer to the desired element <nl> + <nl> + @ return const reference to the element pointed to by @ a ptr <nl> + <nl> + @ complexity Constant . <nl> + <nl> + @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> + @ throw std : : domain_error if an array index begins with ' 0 ' <nl> + @ throw std : : invalid_argument if an array index was not a number <nl> + <nl> + @ liveexample { The behavior is shown in the example . , operatorjson_pointer_const } <nl> + <nl> + @ since version 2 . 0 . 0 <nl> + * / <nl> + const_reference operator [ ] ( const json_pointer & ptr ) const <nl> + { <nl> + return ptr . get_unchecked ( this ) ; <nl> + } <nl> + <nl> + / * ! <nl> + @ brief access specified element via JSON Pointer <nl> + <nl> + Returns a reference to the element at with specified JSON pointer @ a ptr , <nl> + with bounds checking . <nl> + <nl> + @ param [ in ] ptr JSON pointer to the desired element <nl> + <nl> + @ return reference to the element pointed to by @ a ptr <nl> + <nl> + @ complexity Constant . <nl> + <nl> + @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> + @ throw std : : domain_error if an array index begins with ' 0 ' <nl> + @ throw std : : invalid_argument if an array index was not a number <nl> + <nl> + @ liveexample { The behavior is shown in the example . , at_json_pointer } <nl> + <nl> + @ since version 2 . 0 . 0 <nl> + * / <nl> + reference at ( const json_pointer & ptr ) <nl> + { <nl> + return ptr . get_checked ( this ) ; <nl> + } <nl> + <nl> + / * ! <nl> + @ brief access specified element via JSON Pointer <nl> + <nl> + Returns a const reference to the element at with specified JSON pointer <nl> + @ a ptr , with bounds checking . <nl> + <nl> + @ param [ in ] ptr JSON pointer to the desired element <nl> + <nl> + @ return reference to the element pointed to by @ a ptr <nl> + <nl> + @ complexity Constant . <nl> + <nl> + @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> + @ throw std : : domain_error if an array index begins with ' 0 ' <nl> + @ throw std : : invalid_argument if an array index was not a number <nl> + <nl> + @ liveexample { The behavior is shown in the example . , at_json_pointer_const } <nl> + <nl> + @ since version 2 . 0 . 0 <nl> + * / <nl> + const_reference at ( const json_pointer & ptr ) const <nl> + { <nl> + return ptr . get_checked ( this ) ; <nl> + } <nl> + <nl> / * ! <nl> @ brief return flattened JSON value <nl> <nl> class basic_json <nl> <nl> / / / @ } <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / JSON Patch functions / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + / / / @ name JSON Patch functions <nl> + / / / @ { <nl> + <nl> / * ! <nl> @ brief applies a JSON patch <nl> <nl> + [ JSON Patch ] ( http : / / jsonpatch . com ) defines a JSON document structure for <nl> + expressing a sequence of operations to apply to a JSON ) document . With <nl> + this funcion , a JSON Patch is applied to the current JSON value by <nl> + executing all operations from the patch . <nl> + <nl> @ param [ in ] patch JSON patch document <nl> @ return patched document <nl> <nl> - @ note The original JSON value is not changed ; that is , the patch is <nl> - applied to a copy of the value . <nl> + @ note The application of a patch is atomic : Either all operations succeed <nl> + and the patched document is returned or an exception is thrown . In <nl> + any case , the original value is not changed : the patch is applied <nl> + to a copy of the value . <nl> + <nl> + @ throw std : : out_of_range if a JSON pointer inside the patch could not <nl> + be resolved successfully in the current JSON value ; example : ` " key baz <nl> + not found " ` <nl> + @ throw invalid_argument if the JSON patch is malformed ( e . g . , mandatory <nl> + attributes are missing ) ; example : ` " operation add must have member path " ` <nl> + <nl> + @ complexity Linear in the size of the JSON value and the length of the <nl> + JSON patch . As usually only a fraction of the JSON value is affected by <nl> + the patch , the complexity can usually be neglected . <nl> <nl> - @ sa [ RFC 6902 ] ( https : / / tools . ietf . org / html / rfc6902 ) <nl> + @ liveexample { The following code shows how a JSON patch is applied to a <nl> + value . , patch } <nl> + <nl> + @ sa @ ref diff - - create a JSON patch by comparing two JSON values <nl> + <nl> + @ sa [ RFC 6902 ( JSON Patch ) ] ( https : / / tools . ietf . org / html / rfc6902 ) <nl> + @ sa [ RFC 6901 ( JSON Pointer ) ] ( https : / / tools . ietf . org / html / rfc6901 ) <nl> + <nl> + @ since version 2 . 0 . 0 <nl> * / <nl> - basic_json apply_patch ( const basic_json & patch ) const <nl> + basic_json patch ( const basic_json & patch ) const <nl> { <nl> / / make a working copy to apply the patch to <nl> basic_json result = * this ; <nl> <nl> + / / the valid JSON Patch operations <nl> + enum class patch_operations { add , remove , replace , move , copy , test , invalid } ; <nl> + <nl> + const auto get_op = [ ] ( const std : : string op ) <nl> + { <nl> + if ( op = = " add " ) <nl> + { <nl> + return patch_operations : : add ; <nl> + } <nl> + if ( op = = " remove " ) <nl> + { <nl> + return patch_operations : : remove ; <nl> + } <nl> + if ( op = = " replace " ) <nl> + { <nl> + return patch_operations : : replace ; <nl> + } <nl> + if ( op = = " move " ) <nl> + { <nl> + return patch_operations : : move ; <nl> + } <nl> + if ( op = = " copy " ) <nl> + { <nl> + return patch_operations : : copy ; <nl> + } <nl> + if ( op = = " test " ) <nl> + { <nl> + return patch_operations : : test ; <nl> + } <nl> + <nl> + return patch_operations : : invalid ; <nl> + } ; <nl> + <nl> / / wrapper for " add " operation ; add value at ptr <nl> const auto operation_add = [ & result ] ( json_pointer & ptr , basic_json val ) <nl> { <nl> - / / get reference to parent of JSON pointer ptr <nl> - const auto last_path = ptr . pop_back ( ) ; <nl> - basic_json & parent = result . at ( ptr ) ; <nl> - <nl> - if ( parent . is_object ( ) ) <nl> + / / adding to the root of the target document means replacing it <nl> + if ( ptr . is_root ( ) ) <nl> { <nl> - / / use operator [ ] to add value <nl> - parent [ last_path ] = val ; <nl> + result = val ; <nl> } <nl> - else if ( parent . is_array ( ) ) <nl> + else <nl> { <nl> - if ( last_path = = " - " ) <nl> + / / make sure the top element of the pointer exists <nl> + json_pointer top_pointer = ptr . top ( ) ; <nl> + if ( top_pointer ! = ptr ) <nl> { <nl> - / / special case : append to back <nl> - parent . push_back ( val ) ; <nl> + basic_json & x = result . at ( top_pointer ) ; <nl> } <nl> - else <nl> + <nl> + / / get reference to parent of JSON pointer ptr <nl> + const auto last_path = ptr . pop_back ( ) ; <nl> + basic_json & parent = result [ ptr ] ; <nl> + <nl> + switch ( parent . m_type ) <nl> { <nl> - / / default case : insert add offset <nl> - parent . insert ( parent . begin ( ) + std : : stoi ( last_path ) , val ) ; <nl> + case value_t : : null : <nl> + case value_t : : object : <nl> + { <nl> + / / use operator [ ] to add value <nl> + parent [ last_path ] = val ; <nl> + break ; <nl> + } <nl> + <nl> + case value_t : : array : <nl> + { <nl> + if ( last_path = = " - " ) <nl> + { <nl> + / / special case : append to back <nl> + parent . push_back ( val ) ; <nl> + } <nl> + else <nl> + { <nl> + const auto idx = std : : stoi ( last_path ) ; <nl> + if ( static_cast < size_type > ( idx ) > parent . size ( ) ) <nl> + { <nl> + / / avoid undefined behavior <nl> + throw std : : out_of_range ( " array index " + std : : to_string ( idx ) + " is out of range " ) ; <nl> + } <nl> + else <nl> + { <nl> + / / default case : insert add offset <nl> + parent . insert ( parent . begin ( ) + static_cast < difference_type > ( idx ) , val ) ; <nl> + } <nl> + } <nl> + break ; <nl> + } <nl> + <nl> + default : <nl> + { <nl> + throw std : : domain_error ( " unexpected parent type " + parent . type_name ( ) ) ; <nl> + } <nl> } <nl> } <nl> } ; <nl> class basic_json <nl> / / remove child <nl> if ( parent . is_object ( ) ) <nl> { <nl> - parent . erase ( parent . find ( last_path ) ) ; <nl> + / / perform range check <nl> + auto it = parent . find ( last_path ) ; <nl> + if ( it ! = parent . end ( ) ) <nl> + { <nl> + parent . erase ( it ) ; <nl> + } <nl> + else <nl> + { <nl> + throw std : : out_of_range ( " key ' " + last_path + " ' not found " ) ; <nl> + } <nl> } <nl> else if ( parent . is_array ( ) ) <nl> { <nl> - parent . erase ( parent . begin ( ) + std : : stoi ( last_path ) ) ; <nl> + / / note erase performs range check <nl> + parent . erase ( static_cast < size_type > ( std : : stoi ( last_path ) ) ) ; <nl> } <nl> } ; <nl> <nl> class basic_json <nl> if ( not patch . is_array ( ) ) <nl> { <nl> / / a JSON patch must be an array of objects <nl> - throw std : : domain_error ( " JSON patch must be an array of objects " ) ; <nl> + throw std : : invalid_argument ( " JSON patch must be an array of objects " ) ; <nl> } <nl> <nl> / / iterate and apply th eoperations <nl> class basic_json <nl> / / check if desired value is present <nl> if ( it = = val . m_value . object - > end ( ) ) <nl> { <nl> - throw std : : domain_error ( error_msg + " must have member ' " + member + " ' " ) ; <nl> + throw std : : invalid_argument ( error_msg + " must have member ' " + member + " ' " ) ; <nl> } <nl> <nl> / / check if result is of type string <nl> if ( string_type and not it - > second . is_string ( ) ) <nl> { <nl> - throw std : : domain_error ( error_msg + " must have string member ' " + member + " ' " ) ; <nl> + throw std : : invalid_argument ( error_msg + " must have string member ' " + member + " ' " ) ; <nl> } <nl> <nl> / / no error : return value <nl> class basic_json <nl> / / type check <nl> if ( not val . is_object ( ) ) <nl> { <nl> - throw std : : domain_error ( " JSON patch must be an array of objects " ) ; <nl> + throw std : : invalid_argument ( " JSON patch must be an array of objects " ) ; <nl> } <nl> <nl> / / collect mandatory members <nl> class basic_json <nl> const std : : string path = get_value ( op , " path " , true ) ; <nl> json_pointer ptr ( path ) ; <nl> <nl> - if ( op = = " add " ) <nl> - { <nl> - operation_add ( ptr , get_value ( " add " , " value " , false ) ) ; <nl> - } <nl> - else if ( op = = " remove " ) <nl> + switch ( get_op ( op ) ) <nl> { <nl> - operation_remove ( ptr ) ; <nl> + case patch_operations : : add : <nl> + { <nl> + operation_add ( ptr , get_value ( " add " , " value " , false ) ) ; <nl> + break ; <nl> + } <nl> + <nl> + case patch_operations : : remove : <nl> + { <nl> + operation_remove ( ptr ) ; <nl> + break ; <nl> + } <nl> + <nl> + case patch_operations : : replace : <nl> + { <nl> + / / the " path " location must exist - use at ( ) <nl> + result . at ( ptr ) = get_value ( " replace " , " value " , false ) ; <nl> + break ; <nl> + } <nl> + <nl> + case patch_operations : : move : <nl> + { <nl> + const std : : string from_path = get_value ( " move " , " from " , true ) ; <nl> + json_pointer from_ptr ( from_path ) ; <nl> + <nl> + / / the " from " location must exist - use at ( ) <nl> + basic_json v = result . at ( from_ptr ) ; <nl> + <nl> + / / The move operation is functionally identical to a <nl> + / / " remove " operation on the " from " location , followed <nl> + / / immediately by an " add " operation at the target <nl> + / / location with the value that was just removed . <nl> + operation_remove ( from_ptr ) ; <nl> + operation_add ( ptr , v ) ; <nl> + break ; <nl> + } <nl> + <nl> + case patch_operations : : copy : <nl> + { <nl> + const std : : string from_path = get_value ( " copy " , " from " , true ) ; ; <nl> + const json_pointer from_ptr ( from_path ) ; <nl> + <nl> + / / the " from " location must exist - use at ( ) <nl> + result [ ptr ] = result . at ( from_ptr ) ; <nl> + break ; <nl> + } <nl> + <nl> + case patch_operations : : test : <nl> + { <nl> + bool success = false ; <nl> + try <nl> + { <nl> + / / check if " value " matches the one at " path " <nl> + / / the " path " location must exist - use at ( ) <nl> + success = ( result . at ( ptr ) = = get_value ( " test " , " value " , false ) ) ; <nl> + } <nl> + catch ( std : : out_of_range & ) <nl> + { <nl> + / / ignore out of range errors : success remains false <nl> + } <nl> + <nl> + / / throw an exception if test fails <nl> + if ( not success ) <nl> + { <nl> + throw std : : domain_error ( " unsuccessful : " + val . dump ( ) ) ; <nl> + } <nl> + <nl> + break ; <nl> + } <nl> + <nl> + case patch_operations : : invalid : <nl> + { <nl> + / / op must be " add " , " remove " , " replace " , " move " , " copy " , or <nl> + / / " test " <nl> + throw std : : invalid_argument ( " operation value ' " + op + " ' is invalid " ) ; <nl> + } <nl> } <nl> - else if ( op = = " replace " ) <nl> + } <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + / * ! <nl> + @ brief creates a diff as a JSON patch <nl> + <nl> + Creates a [ JSON Patch ] ( http : / / jsonpatch . com ) so that value @ a source can <nl> + be changed into the value @ a target by calling @ ref patch function . <nl> + <nl> + @ invariant For two JSON values @ a source and @ a target , the following code <nl> + yields always ` true ` : <nl> + @ code { . cpp } <nl> + source . patch ( diff ( source , target ) ) = = target ; <nl> + @ endcode <nl> + <nl> + @ note Currently , only ` remove ` , ` add ` , and ` replace ` operations are <nl> + generated . <nl> + <nl> + @ param [ in ] source JSON value to copare from <nl> + @ param [ in ] target JSON value to copare against <nl> + @ param [ in ] path helper value to create JSON pointers <nl> + <nl> + @ return a JSON patch to convert the @ a source to @ a target <nl> + <nl> + @ complexity Linear in the lengths of @ a source and @ a target . <nl> + <nl> + @ liveexample { The following code shows how a JSON patch is created as a <nl> + diff for two JSON values . , diff } <nl> + <nl> + @ sa @ ref patch - - apply a JSON patch <nl> + <nl> + @ sa [ RFC 6902 ( JSON Patch ) ] ( https : / / tools . ietf . org / html / rfc6902 ) <nl> + <nl> + @ since version 2 . 0 . 0 <nl> + * / <nl> + static basic_json diff ( const basic_json & source , <nl> + const basic_json & target , <nl> + std : : string path = " " ) noexcept <nl> + { <nl> + / / the patch <nl> + basic_json result ( value_t : : array ) ; <nl> + <nl> + / / if the values are the same , return empty patch <nl> + if ( source = = target ) <nl> + { <nl> + return result ; <nl> + } <nl> + <nl> + if ( source . type ( ) ! = target . type ( ) ) <nl> + { <nl> + / / different types : replace value <nl> + result . push_back ( <nl> { <nl> - result . at ( ptr ) = get_value ( " replace " , " value " , false ) ; <nl> - } <nl> - else if ( op = = " move " ) <nl> + { " op " , " replace " } , <nl> + { " path " , path } , <nl> + { " value " , target } <nl> + } ) ; <nl> + } <nl> + else <nl> + { <nl> + switch ( source . type ( ) ) <nl> { <nl> - const std : : string from_path = get_value ( " move " , " from " , true ) ; <nl> - json_pointer from_ptr ( from_path ) ; <nl> - basic_json v = result [ from_ptr ] ; <nl> + case value_t : : array : <nl> + { <nl> + / / first pass : traverse common elements <nl> + size_t i = 0 ; <nl> + while ( i < source . size ( ) and i < target . size ( ) ) <nl> + { <nl> + / / recursive call to compare array values at index i <nl> + auto temp_diff = diff ( source [ i ] , target [ i ] , path + " / " + std : : to_string ( i ) ) ; <nl> + result . insert ( result . end ( ) , temp_diff . begin ( ) , temp_diff . end ( ) ) ; <nl> + + + i ; <nl> + } <nl> <nl> - operation_remove ( from_ptr ) ; <nl> - operation_add ( ptr , v ) ; <nl> - } <nl> - else if ( op = = " copy " ) <nl> - { <nl> - const std : : string from_path = get_value ( " copy " , " from " , true ) ; ; <nl> - const json_pointer from_ptr ( from_path ) ; <nl> + / / i now reached the end of at least one array <nl> + / / in a second pass , traverse the remaining elements <nl> <nl> - result [ ptr ] = result . at ( from_ptr ) ; <nl> - } <nl> - else if ( op = = " test " ) <nl> - { <nl> - if ( result . at ( ptr ) ! = get_value ( " test " , " value " , false ) ) <nl> + / / remove my remaining elements <nl> + while ( i < source . size ( ) ) <nl> + { <nl> + result . push_back ( object ( <nl> + { <nl> + { " op " , " remove " } , <nl> + { " path " , path + " / " + std : : to_string ( i ) } <nl> + } ) ) ; <nl> + + + i ; <nl> + } <nl> + <nl> + / / add other remaining elements <nl> + while ( i < target . size ( ) ) <nl> + { <nl> + result . push_back ( <nl> + { <nl> + { " op " , " add " } , <nl> + { " path " , path + " / " + std : : to_string ( i ) } , <nl> + { " value " , target [ i ] } <nl> + } ) ; <nl> + + + i ; <nl> + } <nl> + <nl> + break ; <nl> + } <nl> + <nl> + case value_t : : object : <nl> { <nl> - throw std : : domain_error ( " unsuccessful : " + val . dump ( ) ) ; <nl> + / / first pass : traverse this object ' s elements <nl> + for ( auto it = source . begin ( ) ; it ! = source . end ( ) ; + + it ) <nl> + { <nl> + / / escape the key name to be used in a JSON patch <nl> + const auto key = json_pointer : : escape ( it . key ( ) ) ; <nl> + <nl> + if ( target . find ( it . key ( ) ) ! = target . end ( ) ) <nl> + { <nl> + / / recursive call to compare object values at key it <nl> + auto temp_diff = diff ( it . value ( ) , target [ it . key ( ) ] , path + " / " + key ) ; <nl> + result . insert ( result . end ( ) , temp_diff . begin ( ) , temp_diff . end ( ) ) ; <nl> + } <nl> + else <nl> + { <nl> + / / found a key that is not in o - > remove it <nl> + result . push_back ( object ( <nl> + { <nl> + { " op " , " remove " } , <nl> + { " path " , path + " / " + key } <nl> + } ) ) ; <nl> + } <nl> + } <nl> + <nl> + / / second pass : traverse other object ' s elements <nl> + for ( auto it = target . begin ( ) ; it ! = target . end ( ) ; + + it ) <nl> + { <nl> + if ( source . find ( it . key ( ) ) = = source . end ( ) ) <nl> + { <nl> + / / found a key that is not in this - > add it <nl> + const auto key = json_pointer : : escape ( it . key ( ) ) ; <nl> + result . push_back ( <nl> + { <nl> + { " op " , " add " } , <nl> + { " path " , path + " / " + key } , <nl> + { " value " , it . value ( ) } <nl> + } ) ; <nl> + } <nl> + } <nl> + <nl> + break ; <nl> + } <nl> + <nl> + default : <nl> + { <nl> + / / both primitive type : replace value <nl> + result . push_back ( <nl> + { <nl> + { " op " , " replace " } , <nl> + { " path " , path } , <nl> + { " value " , target } <nl> + } ) ; <nl> + break ; <nl> } <nl> - } <nl> - else <nl> - { <nl> - / / op must be " add " , " remove " , " replace " , " move " , " copy " , or <nl> - / / " test " <nl> - throw std : : domain_error ( " operation value ' " + op + " ' is invalid " ) ; <nl> } <nl> } <nl> <nl> return result ; <nl> } <nl> + <nl> + / / / @ } <nl> } ; <nl> <nl> <nl> using json = basic_json < > ; <nl> } <nl> <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / nonmember functions / / <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / nonmember support / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / specialization of std : : swap , and std : : hash <nl> namespace std <nl> mmm a / src / json . hpp . re2c <nl> ppp b / src / json . hpp . re2c <nl> class basic_json <nl> } <nl> } <nl> <nl> - / * ! <nl> - @ brief access specified element via JSON Pointer <nl> - <nl> - Uses a JSON pointer to retrieve a reference to the respective JSON value . <nl> - No bound checking is performed . Similar to <nl> - @ ref operator [ ] ( const typename object_t : : key_type & ) , ` null ` values <nl> - are created in arrays and objects if necessary . <nl> - <nl> - In particular : <nl> - - If the JSON pointer points to an object key that does not exist , it <nl> - is created an filled with a ` null ` value before a reference to it <nl> - is returned . <nl> - - If the JSON pointer points to an array index that does not exist , it <nl> - is created an filled with a ` null ` value before a reference to it <nl> - is returned . All indices between the current maximum and the given <nl> - index are also filled with ` null ` . <nl> - - The special value ` - ` is treated as a synonym for the index past the <nl> - end . <nl> - <nl> - @ param [ in ] ptr a JSON pointer <nl> - <nl> - @ return reference to the element pointed to by @ a ptr <nl> - <nl> - @ complexity Constant . <nl> - <nl> - @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> - @ throw std : : domain_error if an array index begins with ' 0 ' <nl> - @ throw std : : invalid_argument if an array index was not a number <nl> - <nl> - @ liveexample { The behavior is shown in the example . , operatorjson_pointer } <nl> - <nl> - @ since version 2 . 0 . 0 <nl> - * / <nl> - reference operator [ ] ( const json_pointer & ptr ) <nl> - { <nl> - return ptr . get_unchecked ( this ) ; <nl> - } <nl> - <nl> - / * ! <nl> - @ brief access specified element via JSON Pointer <nl> - <nl> - Uses a JSON pointer to retrieve a reference to the respective JSON value . <nl> - No bound checking is performed . The function does not change the JSON <nl> - value ; no ` null ` values are created . In particular , the the special value <nl> - ` - ` yields an exception . <nl> - <nl> - @ param [ in ] ptr JSON pointer to the desired element <nl> - <nl> - @ return const reference to the element pointed to by @ a ptr <nl> - <nl> - @ complexity Constant . <nl> - <nl> - @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> - @ throw std : : domain_error if an array index begins with ' 0 ' <nl> - @ throw std : : invalid_argument if an array index was not a number <nl> - <nl> - @ liveexample { The behavior is shown in the example . , operatorjson_pointer_const } <nl> - <nl> - @ since version 2 . 0 . 0 <nl> - * / <nl> - const_reference operator [ ] ( const json_pointer & ptr ) const <nl> - { <nl> - return ptr . get_unchecked ( this ) ; <nl> - } <nl> - <nl> - / * ! <nl> - @ brief access specified element via JSON Pointer <nl> - <nl> - Returns a reference to the element at with specified JSON pointer @ a ptr , <nl> - with bounds checking . <nl> - <nl> - @ param [ in ] ptr JSON pointer to the desired element <nl> - <nl> - @ return reference to the element pointed to by @ a ptr <nl> - <nl> - @ complexity Constant . <nl> - <nl> - @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> - @ throw std : : domain_error if an array index begins with ' 0 ' <nl> - @ throw std : : invalid_argument if an array index was not a number <nl> - <nl> - @ liveexample { The behavior is shown in the example . , at_json_pointer } <nl> - <nl> - @ since version 2 . 0 . 0 <nl> - * / <nl> - reference at ( const json_pointer & ptr ) <nl> - { <nl> - return ptr . get_checked ( this ) ; <nl> - } <nl> - <nl> - / * ! <nl> - @ brief access specified element via JSON Pointer <nl> - <nl> - Returns a const reference to the element at with specified JSON pointer <nl> - @ a ptr , with bounds checking . <nl> - <nl> - @ param [ in ] ptr JSON pointer to the desired element <nl> - <nl> - @ return reference to the element pointed to by @ a ptr <nl> - <nl> - @ complexity Constant . <nl> - <nl> - @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> - @ throw std : : domain_error if an array index begins with ' 0 ' <nl> - @ throw std : : invalid_argument if an array index was not a number <nl> - <nl> - @ liveexample { The behavior is shown in the example . , at_json_pointer_const } <nl> - <nl> - @ since version 2 . 0 . 0 <nl> - * / <nl> - const_reference at ( const json_pointer & ptr ) const <nl> - { <nl> - return ptr . get_checked ( this ) ; <nl> - } <nl> - <nl> / * ! <nl> @ brief access specified object element with default value <nl> <nl> class basic_json <nl> <nl> @ throw std : : domain_error when called on a type other than JSON array ; <nl> example : ` " cannot use erase ( ) with null " ` <nl> - @ throw std : : out_of_range when ` idx > = size ( ) ` ; example : ` " index out of <nl> - range " ` <nl> + @ throw std : : out_of_range when ` idx > = size ( ) ` ; example : ` " array index 17 <nl> + is out of range " ` <nl> <nl> @ complexity Linear in distance between @ a idx and the end of the container . <nl> <nl> class basic_json <nl> { <nl> if ( idx > = size ( ) ) <nl> { <nl> - throw std : : out_of_range ( " index out of range " ) ; <nl> + throw std : : out_of_range ( " array index " + std : : to_string ( idx ) + " is out of range " ) ; <nl> } <nl> <nl> assert ( m_value . array ! = nullptr ) ; <nl> class basic_json <nl> : reference_tokens ( split ( s ) ) <nl> { } <nl> <nl> + / / / test for inequality <nl> + bool operator ! = ( const json_pointer & rhs ) const <nl> + { <nl> + return reference_tokens ! = rhs . reference_tokens ; <nl> + } <nl> + <nl> + private : <nl> + / / / remove and return last reference pointer <nl> std : : string pop_back ( ) <nl> { <nl> - if ( reference_tokens . empty ( ) ) <nl> + if ( is_root ( ) ) <nl> { <nl> throw std : : domain_error ( " JSON pointer has no parent " ) ; <nl> } <nl> class basic_json <nl> return last ; <nl> } <nl> <nl> - private : <nl> + / / / return whether pointer points to the root document <nl> + bool is_root ( ) const <nl> + { <nl> + return reference_tokens . empty ( ) ; <nl> + } <nl> + <nl> + json_pointer top ( ) const <nl> + { <nl> + if ( is_root ( ) ) <nl> + { <nl> + throw std : : domain_error ( " JSON pointer has no parent " ) ; <nl> + } <nl> + <nl> + json_pointer result = * this ; <nl> + result . reference_tokens = { reference_tokens [ 0 ] } ; <nl> + return result ; <nl> + } <nl> + <nl> / * ! <nl> @ brief create and return a reference to the pointed to value <nl> * / <nl> class basic_json <nl> case value_t : : array : <nl> { <nl> / / create an entry in the array <nl> - result = & result - > operator [ ] ( static_cast < size_t > ( std : : stoi ( reference_token ) ) ) ; <nl> + result = & result - > operator [ ] ( static_cast < size_type > ( std : : stoi ( reference_token ) ) ) ; <nl> break ; <nl> } <nl> <nl> class basic_json <nl> else <nl> { <nl> / / convert array index to number ; unchecked access <nl> - ptr = & ptr - > operator [ ] ( static_cast < size_t > ( std : : stoi ( reference_token ) ) ) ; <nl> + ptr = & ptr - > operator [ ] ( static_cast < size_type > ( std : : stoi ( reference_token ) ) ) ; <nl> } <nl> break ; <nl> } <nl> class basic_json <nl> } <nl> <nl> / / note : at performs range check <nl> - ptr = & ptr - > at ( static_cast < size_t > ( std : : stoi ( reference_token ) ) ) ; <nl> + ptr = & ptr - > at ( static_cast < size_type > ( std : : stoi ( reference_token ) ) ) ; <nl> break ; <nl> } <nl> <nl> class basic_json <nl> } <nl> <nl> / / use unchecked array access <nl> - ptr = & ptr - > operator [ ] ( static_cast < size_t > ( std : : stoi ( reference_token ) ) ) ; <nl> + ptr = & ptr - > operator [ ] ( static_cast < size_type > ( std : : stoi ( reference_token ) ) ) ; <nl> break ; <nl> } <nl> <nl> class basic_json <nl> } <nl> <nl> / / note : at performs range check <nl> - ptr = & ptr - > at ( static_cast < size_t > ( std : : stoi ( reference_token ) ) ) ; <nl> + ptr = & ptr - > at ( static_cast < size_type > ( std : : stoi ( reference_token ) ) ) ; <nl> break ; <nl> } <nl> <nl> class basic_json <nl> } <nl> } <nl> <nl> - / / first transform any occurrence of the sequence ' ~ 1 ' to ' / ' <nl> - replace_substring ( reference_token , " ~ 1 " , " / " ) ; <nl> - / / then transform any occurrence of the sequence ' ~ 0 ' to ' ~ ' <nl> - replace_substring ( reference_token , " ~ 0 " , " ~ " ) ; <nl> - <nl> / / finally , store the reference token <nl> + unescape ( reference_token ) ; <nl> result . push_back ( reference_token ) ; <nl> } <nl> <nl> class basic_json <nl> ) ; <nl> } <nl> <nl> + / / / escape tilde and slash <nl> + static std : : string escape ( std : : string s ) <nl> + { <nl> + / / escape " ~ " " to " ~ 0 " and " / " to " ~ 1 " <nl> + replace_substring ( s , " ~ " , " ~ 0 " ) ; <nl> + replace_substring ( s , " / " , " ~ 1 " ) ; <nl> + return s ; <nl> + } <nl> + <nl> + / / / unescape tilde and slash <nl> + static void unescape ( std : : string & s ) <nl> + { <nl> + / / first transform any occurrence of the sequence ' ~ 1 ' to ' / ' <nl> + replace_substring ( s , " ~ 1 " , " / " ) ; <nl> + / / then transform any occurrence of the sequence ' ~ 0 ' to ' ~ ' <nl> + replace_substring ( s , " ~ 0 " , " ~ " ) ; <nl> + } <nl> + <nl> / * ! <nl> @ param [ in ] reference_string the reference string to the current value <nl> @ param [ in ] value the value to consider <nl> class basic_json <nl> <nl> @ note Empty objects or arrays are flattened to ` null ` . <nl> * / <nl> - static void flatten ( const std : : string reference_string , <nl> + static void flatten ( const std : : string & reference_string , <nl> const basic_json & value , <nl> basic_json & result ) <nl> { <nl> class basic_json <nl> / / iterate object and use keys as reference string <nl> for ( const auto & element : * value . m_value . object ) <nl> { <nl> - / / escape " ~ " " to " ~ 0 " and " / " to " ~ 1 " <nl> - std : : string key ( element . first ) ; <nl> - replace_substring ( key , " ~ " , " ~ 0 " ) ; <nl> - replace_substring ( key , " / " , " ~ 1 " ) ; <nl> - <nl> - flatten ( reference_string + " / " + key , <nl> + flatten ( reference_string + " / " + escape ( element . first ) , <nl> element . second , result ) ; <nl> } <nl> } <nl> class basic_json <nl> std : : vector < std : : string > reference_tokens { } ; <nl> } ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / JSON Pointer functions / / <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / JSON Pointer support / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / / @ name JSON Pointer functions <nl> / / / @ { <nl> <nl> + / * ! <nl> + @ brief access specified element via JSON Pointer <nl> + <nl> + Uses a JSON pointer to retrieve a reference to the respective JSON value . <nl> + No bound checking is performed . Similar to <nl> + @ ref operator [ ] ( const typename object_t : : key_type & ) , ` null ` values <nl> + are created in arrays and objects if necessary . <nl> + <nl> + In particular : <nl> + - If the JSON pointer points to an object key that does not exist , it <nl> + is created an filled with a ` null ` value before a reference to it <nl> + is returned . <nl> + - If the JSON pointer points to an array index that does not exist , it <nl> + is created an filled with a ` null ` value before a reference to it <nl> + is returned . All indices between the current maximum and the given <nl> + index are also filled with ` null ` . <nl> + - The special value ` - ` is treated as a synonym for the index past the <nl> + end . <nl> + <nl> + @ param [ in ] ptr a JSON pointer <nl> + <nl> + @ return reference to the element pointed to by @ a ptr <nl> + <nl> + @ complexity Constant . <nl> + <nl> + @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> + @ throw std : : domain_error if an array index begins with ' 0 ' <nl> + @ throw std : : invalid_argument if an array index was not a number <nl> + <nl> + @ liveexample { The behavior is shown in the example . , operatorjson_pointer } <nl> + <nl> + @ since version 2 . 0 . 0 <nl> + * / <nl> + reference operator [ ] ( const json_pointer & ptr ) <nl> + { <nl> + return ptr . get_unchecked ( this ) ; <nl> + } <nl> + <nl> + / * ! <nl> + @ brief access specified element via JSON Pointer <nl> + <nl> + Uses a JSON pointer to retrieve a reference to the respective JSON value . <nl> + No bound checking is performed . The function does not change the JSON <nl> + value ; no ` null ` values are created . In particular , the the special value <nl> + ` - ` yields an exception . <nl> + <nl> + @ param [ in ] ptr JSON pointer to the desired element <nl> + <nl> + @ return const reference to the element pointed to by @ a ptr <nl> + <nl> + @ complexity Constant . <nl> + <nl> + @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> + @ throw std : : domain_error if an array index begins with ' 0 ' <nl> + @ throw std : : invalid_argument if an array index was not a number <nl> + <nl> + @ liveexample { The behavior is shown in the example . , operatorjson_pointer_const } <nl> + <nl> + @ since version 2 . 0 . 0 <nl> + * / <nl> + const_reference operator [ ] ( const json_pointer & ptr ) const <nl> + { <nl> + return ptr . get_unchecked ( this ) ; <nl> + } <nl> + <nl> + / * ! <nl> + @ brief access specified element via JSON Pointer <nl> + <nl> + Returns a reference to the element at with specified JSON pointer @ a ptr , <nl> + with bounds checking . <nl> + <nl> + @ param [ in ] ptr JSON pointer to the desired element <nl> + <nl> + @ return reference to the element pointed to by @ a ptr <nl> + <nl> + @ complexity Constant . <nl> + <nl> + @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> + @ throw std : : domain_error if an array index begins with ' 0 ' <nl> + @ throw std : : invalid_argument if an array index was not a number <nl> + <nl> + @ liveexample { The behavior is shown in the example . , at_json_pointer } <nl> + <nl> + @ since version 2 . 0 . 0 <nl> + * / <nl> + reference at ( const json_pointer & ptr ) <nl> + { <nl> + return ptr . get_checked ( this ) ; <nl> + } <nl> + <nl> + / * ! <nl> + @ brief access specified element via JSON Pointer <nl> + <nl> + Returns a const reference to the element at with specified JSON pointer <nl> + @ a ptr , with bounds checking . <nl> + <nl> + @ param [ in ] ptr JSON pointer to the desired element <nl> + <nl> + @ return reference to the element pointed to by @ a ptr <nl> + <nl> + @ complexity Constant . <nl> + <nl> + @ throw std : : out_of_range if the JSON pointer can not be resolved <nl> + @ throw std : : domain_error if an array index begins with ' 0 ' <nl> + @ throw std : : invalid_argument if an array index was not a number <nl> + <nl> + @ liveexample { The behavior is shown in the example . , at_json_pointer_const } <nl> + <nl> + @ since version 2 . 0 . 0 <nl> + * / <nl> + const_reference at ( const json_pointer & ptr ) const <nl> + { <nl> + return ptr . get_checked ( this ) ; <nl> + } <nl> + <nl> / * ! <nl> @ brief return flattened JSON value <nl> <nl> class basic_json <nl> <nl> / / / @ } <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / JSON Patch functions / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + / / / @ name JSON Patch functions <nl> + / / / @ { <nl> + <nl> / * ! <nl> @ brief applies a JSON patch <nl> <nl> + [ JSON Patch ] ( http : / / jsonpatch . com ) defines a JSON document structure for <nl> + expressing a sequence of operations to apply to a JSON ) document . With <nl> + this funcion , a JSON Patch is applied to the current JSON value by <nl> + executing all operations from the patch . <nl> + <nl> @ param [ in ] patch JSON patch document <nl> @ return patched document <nl> <nl> - @ note The original JSON value is not changed ; that is , the patch is <nl> - applied to a copy of the value . <nl> + @ note The application of a patch is atomic : Either all operations succeed <nl> + and the patched document is returned or an exception is thrown . In <nl> + any case , the original value is not changed : the patch is applied <nl> + to a copy of the value . <nl> + <nl> + @ throw std : : out_of_range if a JSON pointer inside the patch could not <nl> + be resolved successfully in the current JSON value ; example : ` " key baz <nl> + not found " ` <nl> + @ throw invalid_argument if the JSON patch is malformed ( e . g . , mandatory <nl> + attributes are missing ) ; example : ` " operation add must have member path " ` <nl> <nl> - @ sa [ RFC 6902 ] ( https : / / tools . ietf . org / html / rfc6902 ) <nl> + @ complexity Linear in the size of the JSON value and the length of the <nl> + JSON patch . As usually only a fraction of the JSON value is affected by <nl> + the patch , the complexity can usually be neglected . <nl> + <nl> + @ liveexample { The following code shows how a JSON patch is applied to a <nl> + value . , patch } <nl> + <nl> + @ sa @ ref diff - - create a JSON patch by comparing two JSON values <nl> + <nl> + @ sa [ RFC 6902 ( JSON Patch ) ] ( https : / / tools . ietf . org / html / rfc6902 ) <nl> + @ sa [ RFC 6901 ( JSON Pointer ) ] ( https : / / tools . ietf . org / html / rfc6901 ) <nl> + <nl> + @ since version 2 . 0 . 0 <nl> * / <nl> - basic_json apply_patch ( const basic_json & patch ) const <nl> + basic_json patch ( const basic_json & patch ) const <nl> { <nl> / / make a working copy to apply the patch to <nl> basic_json result = * this ; <nl> <nl> + / / the valid JSON Patch operations <nl> + enum class patch_operations { add , remove , replace , move , copy , test , invalid } ; <nl> + <nl> + const auto get_op = [ ] ( const std : : string op ) <nl> + { <nl> + if ( op = = " add " ) <nl> + { <nl> + return patch_operations : : add ; <nl> + } <nl> + if ( op = = " remove " ) <nl> + { <nl> + return patch_operations : : remove ; <nl> + } <nl> + if ( op = = " replace " ) <nl> + { <nl> + return patch_operations : : replace ; <nl> + } <nl> + if ( op = = " move " ) <nl> + { <nl> + return patch_operations : : move ; <nl> + } <nl> + if ( op = = " copy " ) <nl> + { <nl> + return patch_operations : : copy ; <nl> + } <nl> + if ( op = = " test " ) <nl> + { <nl> + return patch_operations : : test ; <nl> + } <nl> + <nl> + return patch_operations : : invalid ; <nl> + } ; <nl> + <nl> / / wrapper for " add " operation ; add value at ptr <nl> const auto operation_add = [ & result ] ( json_pointer & ptr , basic_json val ) <nl> { <nl> - / / get reference to parent of JSON pointer ptr <nl> - const auto last_path = ptr . pop_back ( ) ; <nl> - basic_json & parent = result . at ( ptr ) ; <nl> - <nl> - if ( parent . is_object ( ) ) <nl> + / / adding to the root of the target document means replacing it <nl> + if ( ptr . is_root ( ) ) <nl> { <nl> - / / use operator [ ] to add value <nl> - parent [ last_path ] = val ; <nl> + result = val ; <nl> } <nl> - else if ( parent . is_array ( ) ) <nl> + else <nl> { <nl> - if ( last_path = = " - " ) <nl> + / / make sure the top element of the pointer exists <nl> + json_pointer top_pointer = ptr . top ( ) ; <nl> + if ( top_pointer ! = ptr ) <nl> { <nl> - / / special case : append to back <nl> - parent . push_back ( val ) ; <nl> + basic_json & x = result . at ( top_pointer ) ; <nl> } <nl> - else <nl> + <nl> + / / get reference to parent of JSON pointer ptr <nl> + const auto last_path = ptr . pop_back ( ) ; <nl> + basic_json & parent = result [ ptr ] ; <nl> + <nl> + switch ( parent . m_type ) <nl> { <nl> - / / default case : insert add offset <nl> - parent . insert ( parent . begin ( ) + std : : stoi ( last_path ) , val ) ; <nl> + case value_t : : null : <nl> + case value_t : : object : <nl> + { <nl> + / / use operator [ ] to add value <nl> + parent [ last_path ] = val ; <nl> + break ; <nl> + } <nl> + <nl> + case value_t : : array : <nl> + { <nl> + if ( last_path = = " - " ) <nl> + { <nl> + / / special case : append to back <nl> + parent . push_back ( val ) ; <nl> + } <nl> + else <nl> + { <nl> + const auto idx = std : : stoi ( last_path ) ; <nl> + if ( static_cast < size_type > ( idx ) > parent . size ( ) ) <nl> + { <nl> + / / avoid undefined behavior <nl> + throw std : : out_of_range ( " array index " + std : : to_string ( idx ) + " is out of range " ) ; <nl> + } <nl> + else <nl> + { <nl> + / / default case : insert add offset <nl> + parent . insert ( parent . begin ( ) + static_cast < difference_type > ( idx ) , val ) ; <nl> + } <nl> + } <nl> + break ; <nl> + } <nl> + <nl> + default : <nl> + { <nl> + throw std : : domain_error ( " unexpected parent type " + parent . type_name ( ) ) ; <nl> + } <nl> } <nl> } <nl> } ; <nl> class basic_json <nl> / / remove child <nl> if ( parent . is_object ( ) ) <nl> { <nl> - parent . erase ( parent . find ( last_path ) ) ; <nl> + / / perform range check <nl> + auto it = parent . find ( last_path ) ; <nl> + if ( it ! = parent . end ( ) ) <nl> + { <nl> + parent . erase ( it ) ; <nl> + } <nl> + else <nl> + { <nl> + throw std : : out_of_range ( " key ' " + last_path + " ' not found " ) ; <nl> + } <nl> } <nl> else if ( parent . is_array ( ) ) <nl> { <nl> - parent . erase ( parent . begin ( ) + std : : stoi ( last_path ) ) ; <nl> + / / note erase performs range check <nl> + parent . erase ( static_cast < size_type > ( std : : stoi ( last_path ) ) ) ; <nl> } <nl> } ; <nl> <nl> class basic_json <nl> if ( not patch . is_array ( ) ) <nl> { <nl> / / a JSON patch must be an array of objects <nl> - throw std : : domain_error ( " JSON patch must be an array of objects " ) ; <nl> + throw std : : invalid_argument ( " JSON patch must be an array of objects " ) ; <nl> } <nl> <nl> / / iterate and apply th eoperations <nl> class basic_json <nl> / / check if desired value is present <nl> if ( it = = val . m_value . object - > end ( ) ) <nl> { <nl> - throw std : : domain_error ( error_msg + " must have member ' " + member + " ' " ) ; <nl> + throw std : : invalid_argument ( error_msg + " must have member ' " + member + " ' " ) ; <nl> } <nl> <nl> / / check if result is of type string <nl> if ( string_type and not it - > second . is_string ( ) ) <nl> { <nl> - throw std : : domain_error ( error_msg + " must have string member ' " + member + " ' " ) ; <nl> + throw std : : invalid_argument ( error_msg + " must have string member ' " + member + " ' " ) ; <nl> } <nl> <nl> / / no error : return value <nl> class basic_json <nl> / / type check <nl> if ( not val . is_object ( ) ) <nl> { <nl> - throw std : : domain_error ( " JSON patch must be an array of objects " ) ; <nl> + throw std : : invalid_argument ( " JSON patch must be an array of objects " ) ; <nl> } <nl> <nl> / / collect mandatory members <nl> class basic_json <nl> const std : : string path = get_value ( op , " path " , true ) ; <nl> json_pointer ptr ( path ) ; <nl> <nl> - if ( op = = " add " ) <nl> + switch ( get_op ( op ) ) <nl> { <nl> - operation_add ( ptr , get_value ( " add " , " value " , false ) ) ; <nl> - } <nl> - else if ( op = = " remove " ) <nl> - { <nl> - operation_remove ( ptr ) ; <nl> + case patch_operations : : add : <nl> + { <nl> + operation_add ( ptr , get_value ( " add " , " value " , false ) ) ; <nl> + break ; <nl> + } <nl> + <nl> + case patch_operations : : remove : <nl> + { <nl> + operation_remove ( ptr ) ; <nl> + break ; <nl> + } <nl> + <nl> + case patch_operations : : replace : <nl> + { <nl> + / / the " path " location must exist - use at ( ) <nl> + result . at ( ptr ) = get_value ( " replace " , " value " , false ) ; <nl> + break ; <nl> + } <nl> + <nl> + case patch_operations : : move : <nl> + { <nl> + const std : : string from_path = get_value ( " move " , " from " , true ) ; <nl> + json_pointer from_ptr ( from_path ) ; <nl> + <nl> + / / the " from " location must exist - use at ( ) <nl> + basic_json v = result . at ( from_ptr ) ; <nl> + <nl> + / / The move operation is functionally identical to a <nl> + / / " remove " operation on the " from " location , followed <nl> + / / immediately by an " add " operation at the target <nl> + / / location with the value that was just removed . <nl> + operation_remove ( from_ptr ) ; <nl> + operation_add ( ptr , v ) ; <nl> + break ; <nl> + } <nl> + <nl> + case patch_operations : : copy : <nl> + { <nl> + const std : : string from_path = get_value ( " copy " , " from " , true ) ; ; <nl> + const json_pointer from_ptr ( from_path ) ; <nl> + <nl> + / / the " from " location must exist - use at ( ) <nl> + result [ ptr ] = result . at ( from_ptr ) ; <nl> + break ; <nl> + } <nl> + <nl> + case patch_operations : : test : <nl> + { <nl> + bool success = false ; <nl> + try <nl> + { <nl> + / / check if " value " matches the one at " path " <nl> + / / the " path " location must exist - use at ( ) <nl> + success = ( result . at ( ptr ) = = get_value ( " test " , " value " , false ) ) ; <nl> + } <nl> + catch ( std : : out_of_range & ) <nl> + { <nl> + / / ignore out of range errors : success remains false <nl> + } <nl> + <nl> + / / throw an exception if test fails <nl> + if ( not success ) <nl> + { <nl> + throw std : : domain_error ( " unsuccessful : " + val . dump ( ) ) ; <nl> + } <nl> + <nl> + break ; <nl> + } <nl> + <nl> + case patch_operations : : invalid : <nl> + { <nl> + / / op must be " add " , " remove " , " replace " , " move " , " copy " , or <nl> + / / " test " <nl> + throw std : : invalid_argument ( " operation value ' " + op + " ' is invalid " ) ; <nl> + } <nl> } <nl> - else if ( op = = " replace " ) <nl> + } <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + / * ! <nl> + @ brief creates a diff as a JSON patch <nl> + <nl> + Creates a [ JSON Patch ] ( http : / / jsonpatch . com ) so that value @ a source can <nl> + be changed into the value @ a target by calling @ ref patch function . <nl> + <nl> + @ invariant For two JSON values @ a source and @ a target , the following code <nl> + yields always ` true ` : <nl> + @ code { . cpp } <nl> + source . patch ( diff ( source , target ) ) = = target ; <nl> + @ endcode <nl> + <nl> + @ note Currently , only ` remove ` , ` add ` , and ` replace ` operations are <nl> + generated . <nl> + <nl> + @ param [ in ] source JSON value to copare from <nl> + @ param [ in ] target JSON value to copare against <nl> + @ param [ in ] path helper value to create JSON pointers <nl> + <nl> + @ return a JSON patch to convert the @ a source to @ a target <nl> + <nl> + @ complexity Linear in the lengths of @ a source and @ a target . <nl> + <nl> + @ liveexample { The following code shows how a JSON patch is created as a <nl> + diff for two JSON values . , diff } <nl> + <nl> + @ sa @ ref patch - - apply a JSON patch <nl> + <nl> + @ sa [ RFC 6902 ( JSON Patch ) ] ( https : / / tools . ietf . org / html / rfc6902 ) <nl> + <nl> + @ since version 2 . 0 . 0 <nl> + * / <nl> + static basic_json diff ( const basic_json & source , <nl> + const basic_json & target , <nl> + std : : string path = " " ) noexcept <nl> + { <nl> + / / the patch <nl> + basic_json result ( value_t : : array ) ; <nl> + <nl> + / / if the values are the same , return empty patch <nl> + if ( source = = target ) <nl> + { <nl> + return result ; <nl> + } <nl> + <nl> + if ( source . type ( ) ! = target . type ( ) ) <nl> + { <nl> + / / different types : replace value <nl> + result . push_back ( <nl> { <nl> - result . at ( ptr ) = get_value ( " replace " , " value " , false ) ; <nl> - } <nl> - else if ( op = = " move " ) <nl> + { " op " , " replace " } , <nl> + { " path " , path } , <nl> + { " value " , target } <nl> + } ) ; <nl> + } <nl> + else <nl> + { <nl> + switch ( source . type ( ) ) <nl> { <nl> - const std : : string from_path = get_value ( " move " , " from " , true ) ; <nl> - json_pointer from_ptr ( from_path ) ; <nl> - basic_json v = result [ from_ptr ] ; <nl> + case value_t : : array : <nl> + { <nl> + / / first pass : traverse common elements <nl> + size_t i = 0 ; <nl> + while ( i < source . size ( ) and i < target . size ( ) ) <nl> + { <nl> + / / recursive call to compare array values at index i <nl> + auto temp_diff = diff ( source [ i ] , target [ i ] , path + " / " + std : : to_string ( i ) ) ; <nl> + result . insert ( result . end ( ) , temp_diff . begin ( ) , temp_diff . end ( ) ) ; <nl> + + + i ; <nl> + } <nl> <nl> - operation_remove ( from_ptr ) ; <nl> - operation_add ( ptr , v ) ; <nl> - } <nl> - else if ( op = = " copy " ) <nl> - { <nl> - const std : : string from_path = get_value ( " copy " , " from " , true ) ; ; <nl> - const json_pointer from_ptr ( from_path ) ; <nl> + / / i now reached the end of at least one array <nl> + / / in a second pass , traverse the remaining elements <nl> <nl> - result [ ptr ] = result . at ( from_ptr ) ; <nl> - } <nl> - else if ( op = = " test " ) <nl> - { <nl> - if ( result . at ( ptr ) ! = get_value ( " test " , " value " , false ) ) <nl> + / / remove my remaining elements <nl> + while ( i < source . size ( ) ) <nl> + { <nl> + result . push_back ( object ( <nl> + { <nl> + { " op " , " remove " } , <nl> + { " path " , path + " / " + std : : to_string ( i ) } <nl> + } ) ) ; <nl> + + + i ; <nl> + } <nl> + <nl> + / / add other remaining elements <nl> + while ( i < target . size ( ) ) <nl> + { <nl> + result . push_back ( <nl> + { <nl> + { " op " , " add " } , <nl> + { " path " , path + " / " + std : : to_string ( i ) } , <nl> + { " value " , target [ i ] } <nl> + } ) ; <nl> + + + i ; <nl> + } <nl> + <nl> + break ; <nl> + } <nl> + <nl> + case value_t : : object : <nl> { <nl> - throw std : : domain_error ( " unsuccessful : " + val . dump ( ) ) ; <nl> + / / first pass : traverse this object ' s elements <nl> + for ( auto it = source . begin ( ) ; it ! = source . end ( ) ; + + it ) <nl> + { <nl> + / / escape the key name to be used in a JSON patch <nl> + const auto key = json_pointer : : escape ( it . key ( ) ) ; <nl> + <nl> + if ( target . find ( it . key ( ) ) ! = target . end ( ) ) <nl> + { <nl> + / / recursive call to compare object values at key it <nl> + auto temp_diff = diff ( it . value ( ) , target [ it . key ( ) ] , path + " / " + key ) ; <nl> + result . insert ( result . end ( ) , temp_diff . begin ( ) , temp_diff . end ( ) ) ; <nl> + } <nl> + else <nl> + { <nl> + / / found a key that is not in o - > remove it <nl> + result . push_back ( object ( <nl> + { <nl> + { " op " , " remove " } , <nl> + { " path " , path + " / " + key } <nl> + } ) ) ; <nl> + } <nl> + } <nl> + <nl> + / / second pass : traverse other object ' s elements <nl> + for ( auto it = target . begin ( ) ; it ! = target . end ( ) ; + + it ) <nl> + { <nl> + if ( source . find ( it . key ( ) ) = = source . end ( ) ) <nl> + { <nl> + / / found a key that is not in this - > add it <nl> + const auto key = json_pointer : : escape ( it . key ( ) ) ; <nl> + result . push_back ( <nl> + { <nl> + { " op " , " add " } , <nl> + { " path " , path + " / " + key } , <nl> + { " value " , it . value ( ) } <nl> + } ) ; <nl> + } <nl> + } <nl> + <nl> + break ; <nl> + } <nl> + <nl> + default : <nl> + { <nl> + / / both primitive type : replace value <nl> + result . push_back ( <nl> + { <nl> + { " op " , " replace " } , <nl> + { " path " , path } , <nl> + { " value " , target } <nl> + } ) ; <nl> + break ; <nl> } <nl> - } <nl> - else <nl> - { <nl> - / / op must be " add " , " remove " , " replace " , " move " , " copy " , or <nl> - / / " test " <nl> - throw std : : domain_error ( " operation value ' " + op + " ' is invalid " ) ; <nl> } <nl> } <nl> <nl> return result ; <nl> } <nl> + <nl> + / / / @ } <nl> } ; <nl> <nl> <nl> using json = basic_json < > ; <nl> } <nl> <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / nonmember functions / / <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / nonmember support / / <nl> + / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / specialization of std : : swap , and std : : hash <nl> namespace std <nl> mmm a / test / unit . cpp <nl> ppp b / test / unit . cpp <nl> TEST_CASE ( " element access " ) <nl> { <nl> json jarray = { 1 , 1u , true , nullptr , " string " , 42 . 23 , json : : object ( ) , { 1 , 2 , 3 } } ; <nl> CHECK_THROWS_AS ( jarray . erase ( 8 ) , std : : out_of_range ) ; <nl> - CHECK_THROWS_WITH ( jarray . erase ( 8 ) , " index out of range " ) ; <nl> + CHECK_THROWS_WITH ( jarray . erase ( 8 ) , " array index 8 is out of range " ) ; <nl> } <nl> } <nl> <nl> TEST_CASE ( " JSON patch " ) <nl> { <nl> SECTION ( " examples from RFC 6902 " ) <nl> { <nl> - SECTION ( " example A . 1 - Adding an Object Member " ) <nl> + SECTION ( " 4 . Operations " ) <nl> + { <nl> + / / the ordering of members in JSON objects is not significant : <nl> + json op1 = R " ( { " op " : " add " , " path " : " / a / b / c " , " value " : " foo " } ) " _json ; <nl> + json op2 = R " ( { " path " : " / a / b / c " , " op " : " add " , " value " : " foo " } ) " _json ; <nl> + json op3 = R " ( { " value " : " foo " , " path " : " / a / b / c " , " op " : " add " } ) " _json ; <nl> + <nl> + / / check if the operation objects are equivalent <nl> + CHECK ( op1 = = op2 ) ; <nl> + CHECK ( op1 = = op3 ) ; <nl> + } <nl> + <nl> + SECTION ( " 4 . 1 add " ) <nl> + { <nl> + json patch = R " ( [ { " op " : " add " , " path " : " / a / b / c " , " value " : [ " foo " , " bar " ] } ] ) " _json ; <nl> + <nl> + / / However , the object itself or an array containing it does need <nl> + / / to exist , and it remains an error for that not to be the case . <nl> + / / For example , an " add " with a target location of " / a / b " starting <nl> + / / with this document <nl> + json doc1 = R " ( { " a " : { " foo " : 1 } } ) " _json ; <nl> + <nl> + / / is not an error , because " a " exists , and " b " will be added to <nl> + / / its value . <nl> + CHECK_NOTHROW ( doc1 . patch ( patch ) ) ; <nl> + CHECK ( doc1 . patch ( patch ) = = R " ( <nl> + { <nl> + " a " : { <nl> + " foo " : 1 , <nl> + " b " : { <nl> + " c " : [ " foo " , " bar " ] <nl> + } <nl> + } <nl> + } <nl> + ) " _json ) ; <nl> + <nl> + / / It is an error in this document : <nl> + json doc2 = R " ( { " q " : { " bar " : 2 } } ) " _json ; <nl> + <nl> + / / because " a " does not exist . <nl> + CHECK_THROWS_AS ( doc2 . patch ( patch ) , std : : out_of_range ) ; <nl> + CHECK_THROWS_WITH ( doc2 . patch ( patch ) , " key ' a ' not found " ) ; <nl> + } <nl> + <nl> + SECTION ( " 4 . 2 remove " ) <nl> + { <nl> + / / If removing an element from an array , any elements above the <nl> + / / specified index are shifted one position to the left . <nl> + json doc = { 1 , 2 , 3 , 4 } ; <nl> + json patch = { { { " op " , " remove " } , { " path " , " / 1 " } } } ; <nl> + CHECK ( doc . patch ( patch ) = = json ( { 1 , 3 , 4 } ) ) ; <nl> + } <nl> + <nl> + SECTION ( " A . 1 . Adding an Object Member " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { " foo " : " bar " } <nl> - ) " _json ; <nl> + { " foo " : " bar " } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " add " , " path " : " / baz " , " value " : " qux " } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " add " , " path " : " / baz " , " value " : " qux " } <nl> + ] <nl> + ) " _json ; <nl> <nl> / / The resulting JSON document : <nl> json expected = R " ( <nl> - { <nl> - " baz " : " qux " , <nl> - " foo " : " bar " <nl> - } <nl> - ) " _json ; <nl> + { <nl> + " baz " : " qux " , <nl> + " foo " : " bar " <nl> + } <nl> + ) " _json ; <nl> <nl> / / check if patched value is as expected <nl> - CHECK ( doc . apply_patch ( patch ) = = expected ) ; <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> } <nl> <nl> - SECTION ( " example A . 2 - Adding an Array Element " ) <nl> + SECTION ( " A . 2 . Adding an Array Element " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { " foo " : [ " bar " , " baz " ] } <nl> - ) " _json ; <nl> + { " foo " : [ " bar " , " baz " ] } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " add " , " path " : " / foo / 1 " , " value " : " qux " } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " add " , " path " : " / foo / 1 " , " value " : " qux " } <nl> + ] <nl> + ) " _json ; <nl> <nl> / / The resulting JSON document : <nl> json expected = R " ( <nl> - { " foo " : [ " bar " , " qux " , " baz " ] } <nl> - ) " _json ; <nl> + { " foo " : [ " bar " , " qux " , " baz " ] } <nl> + ) " _json ; <nl> <nl> / / check if patched value is as expected <nl> - CHECK ( doc . apply_patch ( patch ) = = expected ) ; <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> } <nl> <nl> - SECTION ( " example A . 3 - Removing an Object Member " ) <nl> + SECTION ( " A . 3 . Removing an Object Member " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { <nl> - " baz " : " qux " , <nl> - " foo " : " bar " <nl> - } <nl> - ) " _json ; <nl> + { <nl> + " baz " : " qux " , <nl> + " foo " : " bar " <nl> + } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " remove " , " path " : " / baz " } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " remove " , " path " : " / baz " } <nl> + ] <nl> + ) " _json ; <nl> <nl> / / The resulting JSON document : <nl> json expected = R " ( <nl> - { " foo " : " bar " } <nl> - ) " _json ; <nl> + { " foo " : " bar " } <nl> + ) " _json ; <nl> <nl> / / check if patched value is as expected <nl> - CHECK ( doc . apply_patch ( patch ) = = expected ) ; <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> } <nl> <nl> - SECTION ( " example A . 4 - Removing an Array Element " ) <nl> + SECTION ( " A . 4 . Removing an Array Element " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { " foo " : [ " bar " , " qux " , " baz " ] } <nl> - ) " _json ; <nl> + { " foo " : [ " bar " , " qux " , " baz " ] } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " remove " , " path " : " / foo / 1 " } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " remove " , " path " : " / foo / 1 " } <nl> + ] <nl> + ) " _json ; <nl> <nl> / / The resulting JSON document : <nl> json expected = R " ( <nl> - { " foo " : [ " bar " , " baz " ] } <nl> - ) " _json ; <nl> + { " foo " : [ " bar " , " baz " ] } <nl> + ) " _json ; <nl> <nl> / / check if patched value is as expected <nl> - CHECK ( doc . apply_patch ( patch ) = = expected ) ; <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> } <nl> <nl> - SECTION ( " example A . 5 - Replacing a Value " ) <nl> + SECTION ( " A . 5 . Replacing a Value " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { <nl> - " baz " : " qux " , <nl> - " foo " : " bar " <nl> - } <nl> - ) " _json ; <nl> + { <nl> + " baz " : " qux " , <nl> + " foo " : " bar " <nl> + } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " replace " , " path " : " / baz " , " value " : " boo " } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " replace " , " path " : " / baz " , " value " : " boo " } <nl> + ] <nl> + ) " _json ; <nl> <nl> json expected = R " ( <nl> - { <nl> - " baz " : " boo " , <nl> - " foo " : " bar " <nl> - } <nl> - ) " _json ; <nl> + { <nl> + " baz " : " boo " , <nl> + " foo " : " bar " <nl> + } <nl> + ) " _json ; <nl> <nl> / / check if patched value is as expected <nl> - CHECK ( doc . apply_patch ( patch ) = = expected ) ; <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> } <nl> <nl> - SECTION ( " example A . 6 - Moving a Value " ) <nl> + SECTION ( " A . 6 . Moving a Value " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { <nl> - " foo " : { <nl> - " bar " : " baz " , <nl> - " waldo " : " fred " <nl> - } , <nl> - " qux " : { <nl> - " corge " : " grault " <nl> + { <nl> + " foo " : { <nl> + " bar " : " baz " , <nl> + " waldo " : " fred " <nl> + } , <nl> + " qux " : { <nl> + " corge " : " grault " <nl> + } <nl> } <nl> - } <nl> - ) " _json ; <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " move " , " from " : " / foo / waldo " , " path " : " / qux / thud " } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " move " , " from " : " / foo / waldo " , " path " : " / qux / thud " } <nl> + ] <nl> + ) " _json ; <nl> <nl> / / The resulting JSON document : <nl> json expected = R " ( <nl> - { <nl> - " foo " : { <nl> - " bar " : " baz " <nl> - } , <nl> - " qux " : { <nl> - " corge " : " grault " , <nl> - " thud " : " fred " <nl> + { <nl> + " foo " : { <nl> + " bar " : " baz " <nl> + } , <nl> + " qux " : { <nl> + " corge " : " grault " , <nl> + " thud " : " fred " <nl> + } <nl> } <nl> - } <nl> - ) " _json ; <nl> + ) " _json ; <nl> <nl> / / check if patched value is as expected <nl> - CHECK ( doc . apply_patch ( patch ) = = expected ) ; <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> } <nl> <nl> - SECTION ( " example A . 7 - Moving a Value " ) <nl> + SECTION ( " A . 7 . Moving a Value " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { " foo " : [ " all " , " grass " , " cows " , " eat " ] } <nl> - ) " _json ; <nl> + { " foo " : [ " all " , " grass " , " cows " , " eat " ] } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " move " , " from " : " / foo / 1 " , " path " : " / foo / 3 " } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " move " , " from " : " / foo / 1 " , " path " : " / foo / 3 " } <nl> + ] <nl> + ) " _json ; <nl> <nl> / / The resulting JSON document : <nl> json expected = R " ( <nl> - { " foo " : [ " all " , " cows " , " eat " , " grass " ] } <nl> - ) " _json ; <nl> + { " foo " : [ " all " , " cows " , " eat " , " grass " ] } <nl> + ) " _json ; <nl> <nl> / / check if patched value is as expected <nl> - CHECK ( doc . apply_patch ( patch ) = = expected ) ; <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> } <nl> <nl> - SECTION ( " example A . 8 - Testing a Value : Success " ) <nl> + SECTION ( " A . 8 . Testing a Value : Success " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { <nl> - " baz " : " qux " , <nl> - " foo " : [ " a " , 2 , " c " ] <nl> - } <nl> - ) " _json ; <nl> + { <nl> + " baz " : " qux " , <nl> + " foo " : [ " a " , 2 , " c " ] <nl> + } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document that will result in successful evaluation : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " test " , " path " : " / baz " , " value " : " qux " } , <nl> - { " op " : " test " , " path " : " / foo / 1 " , " value " : 2 } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " test " , " path " : " / baz " , " value " : " qux " } , <nl> + { " op " : " test " , " path " : " / foo / 1 " , " value " : 2 } <nl> + ] <nl> + ) " _json ; <nl> <nl> / / check if evaluation does not throw <nl> - CHECK_NOTHROW ( doc . apply_patch ( patch ) ) ; <nl> + CHECK_NOTHROW ( doc . patch ( patch ) ) ; <nl> / / check if patched document is unchanged <nl> - CHECK ( doc . apply_patch ( patch ) = = doc ) ; <nl> + CHECK ( doc . patch ( patch ) = = doc ) ; <nl> } <nl> <nl> - SECTION ( " example A . 9 - Testing a Value : Error " ) <nl> + SECTION ( " A . 9 . Testing a Value : Error " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { " baz " : " qux " } <nl> - ) " _json ; <nl> + { " baz " : " qux " } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document that will result in an error condition : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " test " , " path " : " / baz " , " value " : " bar " } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " test " , " path " : " / baz " , " value " : " bar " } <nl> + ] <nl> + ) " _json ; <nl> <nl> / / check that evaluation throws <nl> - CHECK_THROWS_AS ( doc . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( doc . apply_patch ( patch ) , " unsuccessful : " + patch [ 0 ] . dump ( ) ) ; <nl> + CHECK_THROWS_AS ( doc . patch ( patch ) , std : : domain_error ) ; <nl> + CHECK_THROWS_WITH ( doc . patch ( patch ) , " unsuccessful : " + patch [ 0 ] . dump ( ) ) ; <nl> } <nl> <nl> - SECTION ( " example A . 10 - Adding a Nested Member Object " ) <nl> + SECTION ( " A . 10 . Adding a Nested Member Object " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { " foo " : " bar " } <nl> - ) " _json ; <nl> + { " foo " : " bar " } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " add " , " path " : " / child " , " value " : { " grandchild " : { } } } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " add " , " path " : " / child " , " value " : { " grandchild " : { } } } <nl> + ] <nl> + ) " _json ; <nl> <nl> / / The resulting JSON document : <nl> json expected = R " ( <nl> - { <nl> - " foo " : " bar " , <nl> - " child " : { <nl> - " grandchild " : { <nl> + { <nl> + " foo " : " bar " , <nl> + " child " : { <nl> + " grandchild " : { <nl> + } <nl> } <nl> } <nl> - } <nl> - ) " _json ; <nl> + ) " _json ; <nl> <nl> / / check if patched value is as expected <nl> - CHECK ( doc . apply_patch ( patch ) = = expected ) ; <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> } <nl> <nl> - SECTION ( " example A . 11 - Ignoring Unrecognized Elements " ) <nl> + SECTION ( " A . 11 . Ignoring Unrecognized Elements " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { " foo " : " bar " } <nl> - ) " _json ; <nl> + { " foo " : " bar " } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " add " , " path " : " / baz " , " value " : " qux " , " xyz " : 123 } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " add " , " path " : " / baz " , " value " : " qux " , " xyz " : 123 } <nl> + ] <nl> + ) " _json ; <nl> <nl> json expected = R " ( <nl> - { <nl> - " foo " : " bar " , <nl> - " baz " : " qux " <nl> - } <nl> - ) " _json ; <nl> + { <nl> + " foo " : " bar " , <nl> + " baz " : " qux " <nl> + } <nl> + ) " _json ; <nl> <nl> / / check if patched value is as expected <nl> - CHECK ( doc . apply_patch ( patch ) = = expected ) ; <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> } <nl> <nl> - SECTION ( " example A . 12 - Adding to a Nonexistent Target " ) <nl> + SECTION ( " A . 12 . Adding to a Nonexistent Target " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { " foo " : " bar " } <nl> - ) " _json ; <nl> + { " foo " : " bar " } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " add " , " path " : " / baz / bat " , " value " : " qux " } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " add " , " path " : " / baz / bat " , " value " : " qux " } <nl> + ] <nl> + ) " _json ; <nl> <nl> / / This JSON Patch document , applied to the target JSON document <nl> / / above , would result in an error ( therefore , it would not be <nl> TEST_CASE ( " JSON patch " ) <nl> / / references neither the root of the document , nor a member of <nl> / / an existing object , nor a member of an existing array . <nl> <nl> - CHECK_THROWS_AS ( doc . apply_patch ( patch ) , std : : out_of_range ) ; <nl> - CHECK_THROWS_WITH ( doc . apply_patch ( patch ) , " key ' baz ' not found " ) ; <nl> + CHECK_THROWS_AS ( doc . patch ( patch ) , std : : out_of_range ) ; <nl> + CHECK_THROWS_WITH ( doc . patch ( patch ) , " key ' baz ' not found " ) ; <nl> } <nl> <nl> - / / A . 13 . Invalid JSON Patch Document <nl> + / / A . 13 . Invalid JSON Patch Document <nl> / / not applicable <nl> <nl> - SECTION ( " example A . 14 - Escape Ordering " ) <nl> + SECTION ( " A . 14 . Escape Ordering " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { <nl> - " / " : 9 , <nl> - " ~ 1 " : 10 <nl> - } <nl> - ) " _json ; <nl> + { <nl> + " / " : 9 , <nl> + " ~ 1 " : 10 <nl> + } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " test " , " path " : " / ~ 01 " , " value " : 10 } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " test " , " path " : " / ~ 01 " , " value " : 10 } <nl> + ] <nl> + ) " _json ; <nl> <nl> json expected = R " ( <nl> - { <nl> - " / " : 9 , <nl> - " ~ 1 " : 10 <nl> - } <nl> - ) " _json ; <nl> + { <nl> + " / " : 9 , <nl> + " ~ 1 " : 10 <nl> + } <nl> + ) " _json ; <nl> <nl> / / check if patched value is as expected <nl> - CHECK ( doc . apply_patch ( patch ) = = expected ) ; <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> } <nl> <nl> - SECTION ( " example A . 15 - Comparing Strings and Numbers " ) <nl> + SECTION ( " A . 15 . Comparing Strings and Numbers " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { <nl> - " / " : 9 , <nl> - " ~ 1 " : 10 <nl> - } <nl> - ) " _json ; <nl> + { <nl> + " / " : 9 , <nl> + " ~ 1 " : 10 <nl> + } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document that will result in an error condition : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " test " , " path " : " / ~ 01 " , " value " : " 10 " } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " test " , " path " : " / ~ 01 " , " value " : " 10 " } <nl> + ] <nl> + ) " _json ; <nl> <nl> / / check that evaluation throws <nl> - CHECK_THROWS_AS ( doc . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( doc . apply_patch ( patch ) , " unsuccessful : " + patch [ 0 ] . dump ( ) ) ; <nl> + CHECK_THROWS_AS ( doc . patch ( patch ) , std : : domain_error ) ; <nl> + CHECK_THROWS_WITH ( doc . patch ( patch ) , " unsuccessful : " + patch [ 0 ] . dump ( ) ) ; <nl> } <nl> <nl> - SECTION ( " example A . 16 - Adding an Array Value " ) <nl> + SECTION ( " A . 16 . Adding an Array Value " ) <nl> { <nl> / / An example target JSON document : <nl> json doc = R " ( <nl> - { " foo " : [ " bar " ] } <nl> - ) " _json ; <nl> + { " foo " : [ " bar " ] } <nl> + ) " _json ; <nl> <nl> / / A JSON Patch document : <nl> json patch = R " ( <nl> - [ <nl> - { " op " : " add " , " path " : " / foo / - " , " value " : [ " abc " , " def " ] } <nl> - ] <nl> - ) " _json ; <nl> + [ <nl> + { " op " : " add " , " path " : " / foo / - " , " value " : [ " abc " , " def " ] } <nl> + ] <nl> + ) " _json ; <nl> <nl> / / The resulting JSON document : <nl> json expected = R " ( <nl> - { " foo " : [ " bar " , [ " abc " , " def " ] ] } <nl> - ) " _json ; <nl> + { " foo " : [ " bar " , [ " abc " , " def " ] ] } <nl> + ) " _json ; <nl> <nl> / / check if patched value is as expected <nl> - CHECK ( doc . apply_patch ( patch ) = = expected ) ; <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> } <nl> } <nl> <nl> SECTION ( " own examples " ) <nl> { <nl> + SECTION ( " add " ) <nl> + { <nl> + SECTION ( " add to the root element " ) <nl> + { <nl> + / / If the path is the root of the target document - the <nl> + / / specified value becomes the entire content of the target <nl> + / / document . <nl> + <nl> + / / An example target JSON document : <nl> + json doc = 17 ; <nl> + <nl> + / / A JSON Patch document : <nl> + json patch = R " ( <nl> + [ <nl> + { " op " : " add " , " path " : " " , " value " : [ 1 , 2 , 3 ] } <nl> + ] <nl> + ) " _json ; <nl> + <nl> + / / The resulting JSON document : <nl> + json expected = { 1 , 2 , 3 } ; <nl> + <nl> + / / check if patched value is as expected <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> + } <nl> + <nl> + SECTION ( " add to end of the array " ) <nl> + { <nl> + / / The specified index MUST NOT be greater than the number of <nl> + / / elements in the array . The example below uses and index of <nl> + / / exactly the number of elements in the array which is legal . <nl> + <nl> + / / An example target JSON document : <nl> + json doc = { 0 , 1 , 2 } ; <nl> + <nl> + / / A JSON Patch document : <nl> + json patch = R " ( <nl> + [ <nl> + { " op " : " add " , " path " : " / 3 " , " value " : 3 } <nl> + ] <nl> + ) " _json ; <nl> + <nl> + / / The resulting JSON document : <nl> + json expected = { 0 , 1 , 2 , 3 } ; <nl> + <nl> + / / check if patched value is as expected <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> + } <nl> + } <nl> + <nl> SECTION ( " copy " ) <nl> { <nl> / / An example target JSON document : <nl> TEST_CASE ( " JSON patch " ) <nl> ] <nl> ) " _json ; <nl> <nl> + / / The resulting JSON document : <nl> json expected = R " ( <nl> { <nl> " foo " : { <nl> TEST_CASE ( " JSON patch " ) <nl> ) " _json ; <nl> <nl> / / check if patched value is as expected <nl> - CHECK ( doc . apply_patch ( patch ) = = expected ) ; <nl> + CHECK ( doc . patch ( patch ) = = expected ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , expected ) ) = = expected ) ; <nl> + } <nl> + <nl> + SECTION ( " replace " ) <nl> + { <nl> + json j = " string " ; <nl> + json patch = { { { " op " , " replace " } , { " path " , " " } , { " value " , 1 } } } ; <nl> + CHECK ( j . patch ( patch ) = = json ( 1 ) ) ; <nl> + } <nl> + <nl> + SECTION ( " documentation GIF " ) <nl> + { <nl> + { <nl> + / / a JSON patch <nl> + json p1 = R " ( <nl> + [ { " op " : " add " , " path " : " / GB " , " value " : " London " } ] <nl> + ) " _json ; <nl> + <nl> + / / a JSON value <nl> + json source = R " ( <nl> + { " D " : " Berlin " , " F " : " Paris " } <nl> + ) " _json ; <nl> + <nl> + / / apply the patch <nl> + json target = source . patch ( p1 ) ; <nl> + / / target = { " D " : " Berlin " , " F " : " Paris " , " GB " : " London " } <nl> + CHECK ( target = = R " ( { " D " : " Berlin " , " F " : " Paris " , " GB " : " London " } ) " _json ) ; <nl> + <nl> + / / create a diff from two JSONs <nl> + json p2 = json : : diff ( target , source ) ; <nl> + / / p2 = [ { " op " : " delete " , " path " : " / GB " } ] <nl> + CHECK ( p2 = = R " ( [ { " op " : " remove " , " path " : " / GB " } ] ) " _json ) ; <nl> + } <nl> + { <nl> + / / a JSON value <nl> + json j = { " good " , " bad " , " ugly " } ; <nl> + <nl> + / / a JSON pointer <nl> + auto ptr = json : : json_pointer ( " / 2 " ) ; <nl> + <nl> + / / use to access elements <nl> + j [ ptr ] = { { " it " , " cattivo " } } ; <nl> + CHECK ( j = = R " ( [ " good " , " bad " , { " it " : " cattivo " } ] ) " _json ) ; <nl> + <nl> + / / use user - defined string literal <nl> + j [ " / 2 / en " _json_pointer ] = " ugly " ; <nl> + CHECK ( j = = R " ( [ " good " , " bad " , { " en " : " ugly " , " it " : " cattivo " } ] ) " _json ) ; <nl> + <nl> + json flat = j . flatten ( ) ; <nl> + CHECK ( flat = = R " ( { " / 0 " : " good " , " / 1 " : " bad " , " / 2 / en " : " ugly " , " / 2 / it " : " cattivo " } ) " _json ) ; <nl> + } <nl> } <nl> } <nl> <nl> TEST_CASE ( " JSON patch " ) <nl> { <nl> SECTION ( " unknown operation " ) <nl> { <nl> + SECTION ( " not an array " ) <nl> + { <nl> + json j ; <nl> + json patch = { { " op " , " add " } , { " path " , " " } , { " value " , 1 } } ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " JSON patch must be an array of objects " ) ; <nl> + } <nl> + <nl> + SECTION ( " not an array of objects " ) <nl> + { <nl> + json j ; <nl> + json patch = { " op " , " add " , " path " , " " , " value " , 1 } ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " JSON patch must be an array of objects " ) ; <nl> + } <nl> + <nl> SECTION ( " missing ' op ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " foo " , " bar " } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation must have member ' op ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation must have member ' op ' " ) ; <nl> } <nl> <nl> SECTION ( " non - string ' op ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , 1 } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation must have string member ' op ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation must have string member ' op ' " ) ; <nl> + } <nl> + <nl> + SECTION ( " invalid operation " ) <nl> + { <nl> + json j ; <nl> + json patch = { { { " op " , " foo " } , { " path " , " " } } } ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation value ' foo ' is invalid " ) ; <nl> } <nl> } <nl> <nl> TEST_CASE ( " JSON patch " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " add " } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' add ' must have member ' path ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' add ' must have member ' path ' " ) ; <nl> } <nl> <nl> SECTION ( " non - string ' path ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " add " } , { " path " , 1 } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' add ' must have string member ' path ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' add ' must have string member ' path ' " ) ; <nl> } <nl> <nl> SECTION ( " missing ' value ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " add " } , { " path " , " " } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' add ' must have member ' value ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' add ' must have member ' value ' " ) ; <nl> + } <nl> + <nl> + SECTION ( " invalid array index " ) <nl> + { <nl> + json j = { 1 , 2 } ; <nl> + json patch = { { { " op " , " add " } , { " path " , " / 4 " } , { " value " , 4 } } } ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : out_of_range ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " array index 4 is out of range " ) ; <nl> } <nl> } <nl> <nl> TEST_CASE ( " JSON patch " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " remove " } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' remove ' must have member ' path ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' remove ' must have member ' path ' " ) ; <nl> } <nl> <nl> SECTION ( " non - string ' path ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " remove " } , { " path " , 1 } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' remove ' must have string member ' path ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' remove ' must have string member ' path ' " ) ; <nl> + } <nl> + <nl> + SECTION ( " nonexisting target location ( array ) " ) <nl> + { <nl> + json j = { 1 , 2 , 3 } ; <nl> + json patch = { { { " op " , " remove " } , { " path " , " / 17 " } } } ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : out_of_range ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " array index 17 is out of range " ) ; <nl> + } <nl> + <nl> + SECTION ( " nonexisting target location ( object ) " ) <nl> + { <nl> + json j = { { " foo " , 1 } , { " bar " , 2 } } ; <nl> + json patch = { { { " op " , " remove " } , { " path " , " / baz " } } } ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : out_of_range ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " key ' baz ' not found " ) ; <nl> + } <nl> + <nl> + SECTION ( " root element as target location " ) <nl> + { <nl> + json j = " string " ; <nl> + json patch = { { { " op " , " remove " } , { " path " , " " } } } ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : domain_error ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " JSON pointer has no parent " ) ; <nl> } <nl> } <nl> <nl> TEST_CASE ( " JSON patch " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " replace " } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' replace ' must have member ' path ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' replace ' must have member ' path ' " ) ; <nl> } <nl> <nl> SECTION ( " non - string ' path ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " replace " } , { " path " , 1 } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' replace ' must have string member ' path ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' replace ' must have string member ' path ' " ) ; <nl> } <nl> <nl> SECTION ( " missing ' value ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " replace " } , { " path " , " " } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' replace ' must have member ' value ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' replace ' must have member ' value ' " ) ; <nl> + } <nl> + <nl> + SECTION ( " nonexisting target location ( array ) " ) <nl> + { <nl> + json j = { 1 , 2 , 3 } ; <nl> + json patch = { { { " op " , " replace " } , { " path " , " / 17 " } , { " value " , 19 } } } ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : out_of_range ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " array index 17 is out of range " ) ; <nl> + } <nl> + <nl> + SECTION ( " nonexisting target location ( object ) " ) <nl> + { <nl> + json j = { { " foo " , 1 } , { " bar " , 2 } } ; <nl> + json patch = { { { " op " , " replace " } , { " path " , " / baz " } , { " value " , 3 } } } ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : out_of_range ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " key ' baz ' not found " ) ; <nl> } <nl> } <nl> <nl> TEST_CASE ( " JSON patch " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " move " } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' move ' must have member ' path ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' move ' must have member ' path ' " ) ; <nl> } <nl> <nl> SECTION ( " non - string ' path ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " move " } , { " path " , 1 } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' move ' must have string member ' path ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' move ' must have string member ' path ' " ) ; <nl> } <nl> <nl> SECTION ( " missing ' from ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " move " } , { " path " , " " } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' move ' must have member ' from ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' move ' must have member ' from ' " ) ; <nl> } <nl> <nl> SECTION ( " non - string ' from ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " move " } , { " path " , " " } , { " from " , 1 } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' move ' must have string member ' from ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' move ' must have string member ' from ' " ) ; <nl> + } <nl> + <nl> + SECTION ( " nonexisting from location ( array ) " ) <nl> + { <nl> + json j = { 1 , 2 , 3 } ; <nl> + json patch = { { { " op " , " move " } , { " path " , " / 0 " } , { " from " , " / 5 " } } } ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : out_of_range ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " array index 5 is out of range " ) ; <nl> + } <nl> + <nl> + SECTION ( " nonexisting from location ( object ) " ) <nl> + { <nl> + json j = { { " foo " , 1 } , { " bar " , 2 } } ; <nl> + json patch = { { { " op " , " move " } , { " path " , " / baz " } , { " from " , " / baz " } } } ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : out_of_range ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " key ' baz ' not found " ) ; <nl> } <nl> } <nl> <nl> TEST_CASE ( " JSON patch " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " copy " } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' copy ' must have member ' path ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' copy ' must have member ' path ' " ) ; <nl> } <nl> <nl> SECTION ( " non - string ' path ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " copy " } , { " path " , 1 } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' copy ' must have string member ' path ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' copy ' must have string member ' path ' " ) ; <nl> } <nl> <nl> SECTION ( " missing ' from ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " copy " } , { " path " , " " } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' copy ' must have member ' from ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' copy ' must have member ' from ' " ) ; <nl> } <nl> <nl> SECTION ( " non - string ' from ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " copy " } , { " path " , " " } , { " from " , 1 } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' copy ' must have string member ' from ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' copy ' must have string member ' from ' " ) ; <nl> + } <nl> + <nl> + SECTION ( " nonexisting from location ( array ) " ) <nl> + { <nl> + json j = { 1 , 2 , 3 } ; <nl> + json patch = { { { " op " , " copy " } , { " path " , " / 0 " } , { " from " , " / 5 " } } } ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : out_of_range ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " array index 5 is out of range " ) ; <nl> + } <nl> + <nl> + SECTION ( " nonexisting from location ( object ) " ) <nl> + { <nl> + json j = { { " foo " , 1 } , { " bar " , 2 } } ; <nl> + json patch = { { { " op " , " copy " } , { " path " , " / fob " } , { " from " , " / baz " } } } ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : out_of_range ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " key ' baz ' not found " ) ; <nl> } <nl> } <nl> <nl> TEST_CASE ( " JSON patch " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " test " } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' test ' must have member ' path ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' test ' must have member ' path ' " ) ; <nl> } <nl> <nl> SECTION ( " non - string ' path ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " test " } , { " path " , 1 } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' test ' must have string member ' path ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' test ' must have string member ' path ' " ) ; <nl> } <nl> <nl> SECTION ( " missing ' value ' " ) <nl> { <nl> json j ; <nl> json patch = { { { " op " , " test " } , { " path " , " " } } } ; <nl> - CHECK_THROWS_AS ( j . apply_patch ( patch ) , std : : domain_error ) ; <nl> - CHECK_THROWS_WITH ( j . apply_patch ( patch ) , " operation ' test ' must have member ' value ' " ) ; <nl> + CHECK_THROWS_AS ( j . patch ( patch ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j . patch ( patch ) , " operation ' test ' must have member ' value ' " ) ; <nl> } <nl> } <nl> } <nl> + <nl> + SECTION ( " Examples from jsonpatch . com " ) <nl> + { <nl> + SECTION ( " Simple Example " ) <nl> + { <nl> + / / The original document <nl> + json doc = R " ( <nl> + { <nl> + " baz " : " qux " , <nl> + " foo " : " bar " <nl> + } <nl> + ) " _json ; <nl> + <nl> + / / The patch <nl> + json patch = R " ( <nl> + [ <nl> + { " op " : " replace " , " path " : " / baz " , " value " : " boo " } , <nl> + { " op " : " add " , " path " : " / hello " , " value " : [ " world " ] } , <nl> + { " op " : " remove " , " path " : " / foo " } <nl> + ] <nl> + ) " _json ; <nl> + <nl> + / / The result <nl> + json result = R " ( <nl> + { <nl> + " baz " : " boo " , <nl> + " hello " : [ " world " ] <nl> + } <nl> + ) " _json ; <nl> + <nl> + / / check if patched value is as expected <nl> + CHECK ( doc . patch ( patch ) = = result ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , result ) ) = = result ) ; <nl> + } <nl> + <nl> + SECTION ( " Operations " ) <nl> + { <nl> + / / The original document <nl> + json doc = R " ( <nl> + { <nl> + " biscuits " : [ <nl> + { " name " : " Digestive " } , <nl> + { " name " : " Choco Liebniz " } <nl> + ] <nl> + } <nl> + ) " _json ; <nl> + <nl> + SECTION ( " add " ) <nl> + { <nl> + / / The patch <nl> + json patch = R " ( <nl> + [ <nl> + { " op " : " add " , " path " : " / biscuits / 1 " , " value " : { " name " : " Ginger Nut " } } <nl> + ] <nl> + ) " _json ; <nl> + <nl> + / / The result <nl> + json result = R " ( <nl> + { <nl> + " biscuits " : [ <nl> + { " name " : " Digestive " } , <nl> + { " name " : " Ginger Nut " } , <nl> + { " name " : " Choco Liebniz " } <nl> + ] <nl> + } <nl> + ) " _json ; <nl> + <nl> + / / check if patched value is as expected <nl> + CHECK ( doc . patch ( patch ) = = result ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , result ) ) = = result ) ; <nl> + } <nl> + <nl> + SECTION ( " remove " ) <nl> + { <nl> + / / The patch <nl> + json patch = R " ( <nl> + [ <nl> + { " op " : " remove " , " path " : " / biscuits " } <nl> + ] <nl> + ) " _json ; <nl> + <nl> + / / The result <nl> + json result = R " ( <nl> + { } <nl> + ) " _json ; <nl> + <nl> + / / check if patched value is as expected <nl> + CHECK ( doc . patch ( patch ) = = result ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , result ) ) = = result ) ; <nl> + } <nl> + <nl> + SECTION ( " replace " ) <nl> + { <nl> + / / The patch <nl> + json patch = R " ( <nl> + [ <nl> + { " op " : " replace " , " path " : " / biscuits / 0 / name " , " value " : " Chocolate Digestive " } <nl> + ] <nl> + ) " _json ; <nl> + <nl> + / / The result <nl> + json result = R " ( <nl> + { <nl> + " biscuits " : [ <nl> + { " name " : " Chocolate Digestive " } , <nl> + { " name " : " Choco Liebniz " } <nl> + ] <nl> + } <nl> + ) " _json ; <nl> + <nl> + / / check if patched value is as expected <nl> + CHECK ( doc . patch ( patch ) = = result ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , result ) ) = = result ) ; <nl> + } <nl> + <nl> + SECTION ( " copy " ) <nl> + { <nl> + / / The patch <nl> + json patch = R " ( <nl> + [ <nl> + { " op " : " copy " , " from " : " / biscuits / 0 " , " path " : " / best_biscuit " } <nl> + ] <nl> + ) " _json ; <nl> + <nl> + / / The result <nl> + json result = R " ( <nl> + { <nl> + " biscuits " : [ <nl> + { " name " : " Digestive " } , <nl> + { " name " : " Choco Liebniz " } <nl> + ] , <nl> + " best_biscuit " : { <nl> + " name " : " Digestive " <nl> + } <nl> + } <nl> + ) " _json ; <nl> + <nl> + / / check if patched value is as expected <nl> + CHECK ( doc . patch ( patch ) = = result ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , result ) ) = = result ) ; <nl> + } <nl> + <nl> + SECTION ( " move " ) <nl> + { <nl> + / / The patch <nl> + json patch = R " ( <nl> + [ <nl> + { " op " : " move " , " from " : " / biscuits " , " path " : " / cookies " } <nl> + ] <nl> + ) " _json ; <nl> + <nl> + / / The result <nl> + json result = R " ( <nl> + { <nl> + " cookies " : [ <nl> + { " name " : " Digestive " } , <nl> + { " name " : " Choco Liebniz " } <nl> + ] <nl> + } <nl> + ) " _json ; <nl> + <nl> + / / check if patched value is as expected <nl> + CHECK ( doc . patch ( patch ) = = result ) ; <nl> + <nl> + / / check roundtrip <nl> + CHECK ( doc . patch ( json : : diff ( doc , result ) ) = = result ) ; <nl> + } <nl> + <nl> + SECTION ( " test " ) <nl> + { <nl> + / / The patch <nl> + json patch = R " ( <nl> + [ <nl> + { " op " : " test " , " path " : " / best_biscuit / name " , " value " : " Choco Liebniz " } <nl> + ] <nl> + ) " _json ; <nl> + <nl> + / / the test will fail <nl> + CHECK_THROWS_AS ( doc . patch ( patch ) , std : : domain_error ) ; <nl> + CHECK_THROWS_WITH ( doc . patch ( patch ) , " unsuccessful : " + patch [ 0 ] . dump ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + SECTION ( " Examples from bruth . github . io / jsonpatch - js " ) <nl> + { <nl> + SECTION ( " add " ) <nl> + { <nl> + CHECK ( R " ( { } ) " _json . patch ( <nl> + R " ( [ { " op " : " add " , " path " : " / foo " , " value " : " bar " } ] ) " _json <nl> + ) = = R " ( { " foo " : " bar " } ) " _json ) ; <nl> + <nl> + CHECK ( R " ( { " foo " : [ 1 , 3 ] } ) " _json . patch ( <nl> + R " ( [ { " op " : " add " , " path " : " / foo " , " value " : " bar " } ] ) " _json <nl> + ) = = R " ( { " foo " : " bar " } ) " _json ) ; <nl> + <nl> + CHECK ( R " ( { " foo " : [ { } ] } ) " _json . patch ( <nl> + R " ( [ { " op " : " add " , " path " : " / foo / 0 / bar " , " value " : " baz " } ] ) " _json <nl> + ) = = R " ( { " foo " : [ { " bar " : " baz " } ] } ) " _json ) ; <nl> + } <nl> + <nl> + SECTION ( " remove " ) <nl> + { <nl> + CHECK ( R " ( { " foo " : " bar " } ) " _json . patch ( <nl> + R " ( [ { " op " : " remove " , " path " : " / foo " } ] ) " _json <nl> + ) = = R " ( { } ) " _json ) ; <nl> + <nl> + CHECK ( R " ( { " foo " : [ 1 , 2 , 3 ] } ) " _json . patch ( <nl> + R " ( [ { " op " : " remove " , " path " : " / foo / 1 " } ] ) " _json <nl> + ) = = R " ( { " foo " : [ 1 , 3 ] } ) " _json ) ; <nl> + <nl> + CHECK ( R " ( { " foo " : [ { " bar " : " baz " } ] } ) " _json . patch ( <nl> + R " ( [ { " op " : " remove " , " path " : " / foo / 0 / bar " } ] ) " _json <nl> + ) = = R " ( { " foo " : [ { } ] } ) " _json ) ; <nl> + } <nl> + <nl> + SECTION ( " replace " ) <nl> + { <nl> + CHECK ( R " ( { " foo " : " bar " } ) " _json . patch ( <nl> + R " ( [ { " op " : " replace " , " path " : " / foo " , " value " : 1 } ] ) " _json <nl> + ) = = R " ( { " foo " : 1 } ) " _json ) ; <nl> + <nl> + CHECK ( R " ( { " foo " : [ 1 , 2 , 3 ] } ) " _json . patch ( <nl> + R " ( [ { " op " : " replace " , " path " : " / foo / 1 " , " value " : 4 } ] ) " _json <nl> + ) = = R " ( { " foo " : [ 1 , 4 , 3 ] } ) " _json ) ; <nl> + <nl> + CHECK ( R " ( { " foo " : [ { " bar " : " baz " } ] } ) " _json . patch ( <nl> + R " ( [ { " op " : " replace " , " path " : " / foo / 0 / bar " , " value " : 1 } ] ) " _json <nl> + ) = = R " ( { " foo " : [ { " bar " : 1 } ] } ) " _json ) ; <nl> + } <nl> + <nl> + SECTION ( " move " ) <nl> + { <nl> + CHECK ( R " ( { " foo " : [ 1 , 2 , 3 ] } ) " _json . patch ( <nl> + R " ( [ { " op " : " move " , " from " : " / foo " , " path " : " / bar " } ] ) " _json <nl> + ) = = R " ( { " bar " : [ 1 , 2 , 3 ] } ) " _json ) ; <nl> + } <nl> + <nl> + SECTION ( " copy " ) <nl> + { <nl> + CHECK ( R " ( { " foo " : [ 1 , 2 , 3 ] } ) " _json . patch ( <nl> + R " ( [ { " op " : " copy " , " from " : " / foo / 1 " , " path " : " / bar " } ] ) " _json <nl> + ) = = R " ( { " foo " : [ 1 , 2 , 3 ] , " bar " : 2 } ) " _json ) ; <nl> + } <nl> + <nl> + SECTION ( " copy " ) <nl> + { <nl> + CHECK_NOTHROW ( R " ( { " foo " : " bar " } ) " _json . patch ( <nl> + R " ( [ { " op " : " test " , " path " : " / foo " , " value " : " bar " } ] ) " _json ) ) ; <nl> + } <nl> + } <nl> } <nl> <nl> TEST_CASE ( " regression tests " ) <nl> | cleanup , test , and diff | nlohmann/json | 5e0bf75d6056eee48976f9c4e86ed52461d289aa | 2016-04-25T21:17:04Z |
new file mode 100644 <nl> index 000000000000 . . e1a180840855 <nl> mmm / dev / null <nl> ppp b / validation - test / compiler_crashers / 28467 - child - source - range - not - contained - within - its - parent - guard - stmt . swift <nl> <nl> + / / This source file is part of the Swift . org open source project <nl> + / / Copyright ( c ) 2014 - 2016 Apple Inc . and the Swift project authors <nl> + / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + / / <nl> + / / See http : / / swift . org / LICENSE . txt for license information <nl> + / / See http : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + <nl> + / / RUN : not - - crash % target - swift - frontend % s - emit - ir <nl> + func a { guard { guard let a <nl> | [ swiftc ( 98 vs . 5180 ) ] Add crasher in ? | apple/swift | 69866e942b3a263dae9fc990b5d47fc33ed7a43c | 2016-11-03T14:32:47Z |
mmm a / src / torque / declarations . cc <nl> ppp b / src / torque / declarations . cc <nl> const Type * Declarations : : LookupType ( const Identifier * name ) { <nl> return alias - > type ( ) ; <nl> } <nl> <nl> - const Type * Declarations : : LookupType ( std : : string name ) { <nl> - return LookupType ( QualifiedName ( std : : move ( name ) ) ) ; <nl> - } <nl> - <nl> const Type * Declarations : : LookupGlobalType ( const std : : string & name ) { <nl> TypeAlias * declaration = EnsureUnique ( <nl> FilterDeclarables < TypeAlias > ( LookupGlobalScope ( name ) ) , name , " type " ) ; <nl> mmm a / src / torque / declarations . h <nl> ppp b / src / torque / declarations . h <nl> class Declarations { <nl> static const TypeAlias * LookupTypeAlias ( const QualifiedName & name ) ; <nl> static const Type * LookupType ( const QualifiedName & name ) ; <nl> static const Type * LookupType ( const Identifier * identifier ) ; <nl> - static const Type * LookupType ( std : : string name ) ; <nl> static const Type * LookupGlobalType ( const std : : string & name ) ; <nl> <nl> static Builtin * FindSomeInternalBuiltinWithType ( <nl> mmm a / src / torque / implementation - visitor . cc <nl> ppp b / src / torque / implementation - visitor . cc <nl> VisitResult ImplementationVisitor : : Visit ( NumberLiteralExpression * expr ) { <nl> / / TODO ( tebbi ) : Do not silently loose precision ; support 64bit literals . <nl> double d = std : : stod ( expr - > number . c_str ( ) ) ; <nl> int32_t i = static_cast < int32_t > ( d ) ; <nl> - const Type * result_type = Declarations : : LookupType ( CONST_FLOAT64_TYPE_STRING ) ; <nl> + const Type * result_type = TypeOracle : : GetConstFloat64Type ( ) ; <nl> if ( i = = d ) { <nl> if ( ( i > > 30 ) = = ( i > > 31 ) ) { <nl> - result_type = Declarations : : LookupType ( CONST_INT31_TYPE_STRING ) ; <nl> + result_type = TypeOracle : : GetConstInt31Type ( ) ; <nl> } else { <nl> - result_type = Declarations : : LookupType ( CONST_INT32_TYPE_STRING ) ; <nl> + result_type = TypeOracle : : GetConstInt32Type ( ) ; <nl> } <nl> } <nl> return VisitResult { result_type , expr - > number } ; <nl> mmm a / src / torque / type - oracle . h <nl> ppp b / src / torque / type - oracle . h <nl> class TypeOracle : public ContextualClass < TypeOracle > { <nl> return Get ( ) . GetBuiltinType ( FLOAT64_TYPE_STRING ) ; <nl> } <nl> <nl> + static const Type * GetConstFloat64Type ( ) { <nl> + return Get ( ) . GetBuiltinType ( CONST_FLOAT64_TYPE_STRING ) ; <nl> + } <nl> + <nl> static const Type * GetNeverType ( ) { <nl> return Get ( ) . GetBuiltinType ( NEVER_TYPE_STRING ) ; <nl> } <nl> class TypeOracle : public ContextualClass < TypeOracle > { <nl> return Get ( ) . GetBuiltinType ( CONST_INT31_TYPE_STRING ) ; <nl> } <nl> <nl> + static const Type * GetConstInt32Type ( ) { <nl> + return Get ( ) . GetBuiltinType ( CONST_INT32_TYPE_STRING ) ; <nl> + } <nl> + <nl> static bool IsImplicitlyConvertableFrom ( const Type * to , const Type * from ) { <nl> for ( Generic * from_constexpr : <nl> Declarations : : LookupGeneric ( kFromConstexprMacroName ) ) { <nl> | [ torque ] Remove LookupType ( std : : string & ) | v8/v8 | 41897941c90a0e3c04c2ee122955e63d3fd92db0 | 2019-05-14T09:35:53Z |
mmm a / src / video_core / command_classes / codecs / vp9 . cpp <nl> ppp b / src / video_core / command_classes / codecs / vp9 . cpp <nl> Vp9FrameContainer VP9 : : GetCurrentFrame ( const NvdecCommon : : NvdecRegisters & state ) <nl> frame . info . bitstream_size ) ; <nl> } <nl> / / Buffer two frames , saving the last show frame info <nl> - if ( next_next_frame . bit_stream . size ( ) ! = 0 ) { <nl> + if ( ! next_next_frame . bit_stream . empty ( ) ) { <nl> Vp9FrameContainer temp { <nl> . info = frame . info , <nl> . bit_stream = frame . bit_stream , <nl> Vp9FrameContainer VP9 : : GetCurrentFrame ( const NvdecCommon : : NvdecRegisters & state ) <nl> frame . bit_stream = next_next_frame . bit_stream ; <nl> next_next_frame = std : : move ( temp ) ; <nl> <nl> - if ( next_frame . bit_stream . size ( ) ! = 0 ) { <nl> - Vp9FrameContainer temp { <nl> + if ( ! next_frame . bit_stream . empty ( ) ) { <nl> + Vp9FrameContainer temp2 { <nl> . info = frame . info , <nl> . bit_stream = frame . bit_stream , <nl> } ; <nl> next_frame . info . show_frame = frame . info . last_frame_shown ; <nl> frame . info = next_frame . info ; <nl> frame . bit_stream = next_frame . bit_stream ; <nl> - next_frame = std : : move ( temp ) ; <nl> + next_frame = std : : move ( temp2 ) ; <nl> } else { <nl> next_frame . info = frame . info ; <nl> next_frame . bit_stream = frame . bit_stream ; <nl> | vp9 : Resolve variable shadowing | yuzu-emu/yuzu | dcc26c54a52eb6fe2fc5fcb7a4b34bc1a4ad4789 | 2020-10-27T06:20:17Z |
mmm a / script / cpplint . py <nl> ppp b / script / cpplint . py <nl> def find_files ( roots , test ) : <nl> <nl> <nl> def is_cpp_file ( filename ) : <nl> - return filename . endswith ( ' . cc ' ) or filename . endswith ( ' . h ' ) <nl> + return filename . endswith ( ' . cc ' ) or filename . endswith ( ' . h ' ) <nl> <nl> <nl> def find_changed_files ( ) : <nl> | fix inconsistent indentation | electron/electron | 2f88e69ed4d26fc52d09d2656b8a8a75117f9c58 | 2017-12-19T22:23:27Z |
mmm a / documentation / sphinx / source / downloads . rst <nl> ppp b / documentation / sphinx / source / downloads . rst <nl> macOS <nl> <nl> The macOS installation package is supported on macOS 10 . 7 + . It includes the client and ( optionally ) the server . <nl> <nl> - * ` FoundationDB - 6 . 3 . 2 . pkg < https : / / www . foundationdb . org / downloads / 6 . 3 . 2 / macOS / installers / FoundationDB - 6 . 3 . 2 . pkg > ` _ <nl> + * ` FoundationDB - 6 . 3 . 3 . pkg < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / macOS / installers / FoundationDB - 6 . 3 . 3 . pkg > ` _ <nl> <nl> Ubuntu <nl> mmmmmm <nl> <nl> The Ubuntu packages are supported on 64 - bit Ubuntu 12 . 04 + , but beware of the Linux kernel bug in Ubuntu 12 . x . <nl> <nl> - * ` foundationdb - clients - 6 . 3 . 2 - 1_amd64 . deb < https : / / www . foundationdb . org / downloads / 6 . 3 . 2 / ubuntu / installers / foundationdb - clients_6 . 3 . 2 - 1_amd64 . deb > ` _ <nl> - * ` foundationdb - server - 6 . 3 . 2 - 1_amd64 . deb < https : / / www . foundationdb . org / downloads / 6 . 3 . 2 / ubuntu / installers / foundationdb - server_6 . 3 . 2 - 1_amd64 . deb > ` _ ( depends on the clients package ) <nl> + * ` foundationdb - clients - 6 . 3 . 3 - 1_amd64 . deb < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / ubuntu / installers / foundationdb - clients_6 . 3 . 3 - 1_amd64 . deb > ` _ <nl> + * ` foundationdb - server - 6 . 3 . 3 - 1_amd64 . deb < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / ubuntu / installers / foundationdb - server_6 . 3 . 3 - 1_amd64 . deb > ` _ ( depends on the clients package ) <nl> <nl> RHEL / CentOS EL6 <nl> mmmmmmmmmmmmmmm <nl> <nl> The RHEL / CentOS EL6 packages are supported on 64 - bit RHEL / CentOS 6 . x . <nl> <nl> - * ` foundationdb - clients - 6 . 3 . 2 - 1 . el6 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 2 / rhel6 / installers / foundationdb - clients - 6 . 3 . 2 - 1 . el6 . x86_64 . rpm > ` _ <nl> - * ` foundationdb - server - 6 . 3 . 2 - 1 . el6 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 2 / rhel6 / installers / foundationdb - server - 6 . 3 . 2 - 1 . el6 . x86_64 . rpm > ` _ ( depends on the clients package ) <nl> + * ` foundationdb - clients - 6 . 3 . 3 - 1 . el6 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / rhel6 / installers / foundationdb - clients - 6 . 3 . 3 - 1 . el6 . x86_64 . rpm > ` _ <nl> + * ` foundationdb - server - 6 . 3 . 3 - 1 . el6 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / rhel6 / installers / foundationdb - server - 6 . 3 . 3 - 1 . el6 . x86_64 . rpm > ` _ ( depends on the clients package ) <nl> <nl> RHEL / CentOS EL7 <nl> mmmmmmmmmmmmmmm <nl> <nl> The RHEL / CentOS EL7 packages are supported on 64 - bit RHEL / CentOS 7 . x . <nl> <nl> - * ` foundationdb - clients - 6 . 3 . 2 - 1 . el7 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 2 / rhel7 / installers / foundationdb - clients - 6 . 3 . 2 - 1 . el7 . x86_64 . rpm > ` _ <nl> - * ` foundationdb - server - 6 . 3 . 2 - 1 . el7 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 2 / rhel7 / installers / foundationdb - server - 6 . 3 . 2 - 1 . el7 . x86_64 . rpm > ` _ ( depends on the clients package ) <nl> + * ` foundationdb - clients - 6 . 3 . 3 - 1 . el7 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / rhel7 / installers / foundationdb - clients - 6 . 3 . 3 - 1 . el7 . x86_64 . rpm > ` _ <nl> + * ` foundationdb - server - 6 . 3 . 3 - 1 . el7 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / rhel7 / installers / foundationdb - server - 6 . 3 . 3 - 1 . el7 . x86_64 . rpm > ` _ ( depends on the clients package ) <nl> <nl> Windows <nl> mmmmmm - <nl> <nl> The Windows installer is supported on 64 - bit Windows XP and later . It includes the client and ( optionally ) the server . <nl> <nl> - * ` foundationdb - 6 . 3 . 2 - x64 . msi < https : / / www . foundationdb . org / downloads / 6 . 3 . 2 / windows / installers / foundationdb - 6 . 3 . 2 - x64 . msi > ` _ <nl> + * ` foundationdb - 6 . 3 . 3 - x64 . msi < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / windows / installers / foundationdb - 6 . 3 . 3 - x64 . msi > ` _ <nl> <nl> API Language Bindings <nl> = = = = = = = = = = = = = = = = = = = = = <nl> On macOS and Windows , the FoundationDB Python API bindings are installed as part <nl> <nl> If you need to use the FoundationDB Python API from other Python installations or paths , use the Python package manager ` ` pip ` ` ( ` ` pip install foundationdb ` ` ) or download the Python package : <nl> <nl> - * ` foundationdb - 6 . 3 . 2 . tar . gz < https : / / www . foundationdb . org / downloads / 6 . 3 . 2 / bindings / python / foundationdb - 6 . 3 . 2 . tar . gz > ` _ <nl> + * ` foundationdb - 6 . 3 . 3 . tar . gz < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / bindings / python / foundationdb - 6 . 3 . 3 . tar . gz > ` _ <nl> <nl> Ruby 1 . 9 . 3 / 2 . 0 . 0 + <nl> mmmmmmmmmmmmmmm - - <nl> <nl> - * ` fdb - 6 . 3 . 2 . gem < https : / / www . foundationdb . org / downloads / 6 . 3 . 2 / bindings / ruby / fdb - 6 . 3 . 2 . gem > ` _ <nl> + * ` fdb - 6 . 3 . 3 . gem < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / bindings / ruby / fdb - 6 . 3 . 3 . gem > ` _ <nl> <nl> Java 8 + <nl> mmmmmm - <nl> <nl> - * ` fdb - java - 6 . 3 . 2 . jar < https : / / www . foundationdb . org / downloads / 6 . 3 . 2 / bindings / java / fdb - java - 6 . 3 . 2 . jar > ` _ <nl> - * ` fdb - java - 6 . 3 . 2 - javadoc . jar < https : / / www . foundationdb . org / downloads / 6 . 3 . 2 / bindings / java / fdb - java - 6 . 3 . 2 - javadoc . jar > ` _ <nl> + * ` fdb - java - 6 . 3 . 3 . jar < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / bindings / java / fdb - java - 6 . 3 . 3 . jar > ` _ <nl> + * ` fdb - java - 6 . 3 . 3 - javadoc . jar < https : / / www . foundationdb . org / downloads / 6 . 3 . 3 / bindings / java / fdb - java - 6 . 3 . 3 - javadoc . jar > ` _ <nl> <nl> Go 1 . 11 + <nl> mmmmmm - - <nl> mmm a / documentation / sphinx / source / release - notes / release - notes - 630 . rst <nl> ppp b / documentation / sphinx / source / release - notes / release - notes - 630 . rst <nl> <nl> Release Notes <nl> # # # # # # # # # # # # # <nl> <nl> - 6 . 3 . 2 <nl> + 6 . 3 . 3 <nl> = = = = = <nl> <nl> Features <nl> Fixes from previous versions <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> <nl> * The 6 . 3 . 1 patch release includes all fixes from the patch releases 6 . 2 . 21 and 6 . 2 . 22 . : doc : ` ( 6 . 2 Release Notes ) < / release - notes / release - notes - 620 > ` <nl> + * The 6 . 3 . 3 patch release includes all fixes from the patch release 6 . 2 . 23 . : doc : ` ( 6 . 2 Release Notes ) < / release - notes / release - notes - 620 > ` <nl> <nl> Fixes only impacting 6 . 3 . 0 + <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> <nl> + * Clients did not probably balance requests to the proxies . [ 6 . 3 . 3 ] ` ( PR # 3377 ) < https : / / github . com / apple / foundationdb / pull / 3377 > ` _ <nl> * Renamed ` ` MIN_DELAY_STORAGE_CANDIDACY_SECONDS ` ` knob to ` ` MIN_DELAY_CC_WORST_FIT_CANDIDACY_SECONDS ` ` . [ 6 . 3 . 2 ] ` ( PR # 3327 ) < https : / / github . com / apple / foundationdb / pull / 3327 > ` _ <nl> * Refreshing TLS certificates could cause crashes . [ 6 . 3 . 2 ] ` ( PR # 3352 ) < https : / / github . com / apple / foundationdb / pull / 3352 > ` _ <nl> * All storage class processes attempted to connect to the same coordinator . [ 6 . 3 . 2 ] ` ( PR # 3361 ) < https : / / github . com / apple / foundationdb / pull / 3361 > ` _ <nl> | updated documentation for 6 . 3 . 3 | apple/foundationdb | c9c80d6dec4304982684da9ffcda3c9eddeba35d | 2020-07-13T18:08:53Z |
mmm a / tensorflow / python / training / moving_averages . py <nl> ppp b / tensorflow / python / training / moving_averages . py <nl> def apply ( self , var_list = None ) : <nl> avg = slot_creator . create_zeros_slot ( <nl> var , <nl> self . _name , <nl> - colocate_with_primary = ( var . op . type in [ " Variable " , " VariableV2 " ] ) ) <nl> + colocate_with_primary = ( var . op . type in [ " Variable " , <nl> + " VariableV2 " , <nl> + " VarHandleOp " ] ) ) <nl> if self . _zero_debias : <nl> zero_debias_true . add ( avg ) <nl> self . _averages [ var ] = avg <nl> | Support resource variables in moving averages . | tensorflow/tensorflow | 7b2e60382bad9cf73b9a68c99c0d32e28fe17485 | 2018-02-06T20:34:22Z |
mmm a / tensorflow / compiler / jit / xla_device_ops . cc <nl> ppp b / tensorflow / compiler / jit / xla_device_ops . cc <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> - void XlaDeviceAssignOp : : Copy ( OpKernelContext * context , Tensor * lhs , <nl> - const Tensor & rhs ) { <nl> - std : : shared_ptr < xla : : GlobalData > gd = <nl> - XlaTransferManager : : GetTensorGlobalData ( rhs ) ; <nl> - XlaTransferManager : : SetTensorGlobalData ( std : : move ( gd ) , lhs ) ; <nl> - } <nl> - <nl> XlaDeviceDummyOp : : XlaDeviceDummyOp ( OpKernelConstruction * ctx ) : OpKernel ( ctx ) { } <nl> <nl> void XlaDeviceDummyOp : : Compute ( OpKernelContext * ctx ) { <nl> mmm a / tensorflow / compiler / jit / xla_device_ops . h <nl> ppp b / tensorflow / compiler / jit / xla_device_ops . h <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / resource_mgr . h " <nl> - # include " tensorflow / core / kernels / assign_op . h " <nl> # include " tensorflow / core / kernels / constant_op . h " <nl> # include " tensorflow / core / kernels / control_flow_ops . h " <nl> # include " tensorflow / core / kernels / identity_op . h " <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> - / / Implementation of Assign for XLA devices . <nl> - class XlaDeviceAssignOp : public AssignOp { <nl> - public : <nl> - using AssignOp : : AssignOp ; <nl> - <nl> - void Copy ( OpKernelContext * context , Tensor * lhs , const Tensor & rhs ) override ; <nl> - } ; <nl> - <nl> / / Dummy OpKernel , used for kernels assigned to an XLA device that should be <nl> / / compiled . Should never be called at runtime since such ops should be <nl> / / rewritten to a _XlaLaunch op . If it is called , it means the placer placed an <nl> class XlaDeviceDummyOp : public OpKernel { <nl> REGISTER_KERNEL_BUILDER ( Name ( " PlaceholderV2 " ) . Device ( DEVICE ) , \ <nl> PlaceholderOp ) ; \ <nl> \ <nl> - REGISTER_KERNEL_BUILDER ( \ <nl> - Name ( " Variable " ) . Device ( DEVICE ) . TypeConstraint ( " dtype " , TYPES ) , \ <nl> - VariableOp ) ; \ <nl> - REGISTER_KERNEL_BUILDER ( \ <nl> - Name ( " VariableV2 " ) . Device ( DEVICE ) . TypeConstraint ( " dtype " , TYPES ) , \ <nl> - VariableOp ) ; \ <nl> - REGISTER_KERNEL_BUILDER ( \ <nl> - Name ( " TemporaryVariable " ) . Device ( DEVICE ) . TypeConstraint ( " dtype " , TYPES ) , \ <nl> - TemporaryVariableOp ) ; \ <nl> - REGISTER_KERNEL_BUILDER ( Name ( " DestroyTemporaryVariable " ) \ <nl> - . Device ( DEVICE ) \ <nl> - . TypeConstraint ( " T " , TYPES ) , \ <nl> - DestroyTemporaryVariableOp ) ; \ <nl> - REGISTER_KERNEL_BUILDER ( Name ( " IsVariableInitialized " ) \ <nl> - . Device ( DEVICE ) \ <nl> - . TypeConstraint ( " dtype " , TYPES ) \ <nl> - . HostMemory ( " is_initialized " ) , \ <nl> - IsVariableInitializedOp ) ; \ <nl> - REGISTER_KERNEL_BUILDER ( \ <nl> - Name ( " Assign " ) . Device ( DEVICE ) . TypeConstraint ( " T " , TYPES ) , \ <nl> - XlaDeviceAssignOp ) ; \ <nl> - \ <nl> REGISTER_KERNEL_BUILDER ( Name ( " ControlTrigger " ) . Device ( DEVICE ) , \ <nl> ControlTriggerOp ) ; \ <nl> REGISTER_KERNEL_BUILDER ( Name ( " Enter " ) . Device ( DEVICE ) , EnterOp ) ; \ <nl> | [ TF : XLA ] Remove registrations for non - resource variable ops on XLA_ * devices . | tensorflow/tensorflow | 9b30c6a4be3cf87c6a284d622150f9e2748bb403 | 2017-03-28T16:46:11Z |
mmm a / src / runtime / ext / ext_string . cpp <nl> ppp b / src / runtime / ext / ext_string . cpp <nl> Variant f_str_word_count ( CStrRef str , int64 format / * = 0 * / , <nl> if ( ! format ) { <nl> return word_count ; <nl> } <nl> - return ret ; <nl> + return ret . isNull ( ) ? Array : : Create ( ) : ret ; <nl> } <nl> <nl> Variant f_strtr ( CStrRef str , CVarRef from , CVarRef to / * = null_variant * / ) { <nl> mmm a / src / test / test_ext_string . cpp <nl> ppp b / src / test / test_ext_string . cpp <nl> bool TestExtString : : test_count_chars ( ) { <nl> <nl> bool TestExtString : : test_str_word_count ( ) { <nl> VS ( f_str_word_count ( " Two Ts and one F . " ) , 5 ) ; <nl> + VS ( f_str_word_count ( " " , 2 ) , Array : : Create ( ) ) ; <nl> + VS ( f_str_word_count ( 1 , 2 ) , Array : : Create ( ) ) ; <nl> + VS ( f_str_word_count ( " 1 2 " , 2 ) , Array : : Create ( ) ) ; <nl> return Count ( true ) ; <nl> } <nl> <nl> | Fix behavior of str_word_count ( ) ( format = = 1 | | 2 ) where no words found . | facebook/hhvm | 14c0e15cbe71f76d464d633e9bfd5e46dd72ac58 | 2012-07-24T01:09:54Z |
mmm a / Source / CNTKv2LibraryDll / API / CNTKLibrary . h <nl> ppp b / Source / CNTKv2LibraryDll / API / CNTKLibrary . h <nl> namespace CNTK <nl> DictionaryIterator end ( ) const { return m_dictionaryData - > end ( ) ; } <nl> ConstDictionaryIterator cend ( ) const { return m_dictionaryData - > cend ( ) ; } <nl> <nl> - size_t Size ( ) { return m_dictionaryData - > size ( ) ; } <nl> + size_t Size ( ) const { return m_dictionaryData - > size ( ) ; } <nl> + <nl> + std : : unordered_set < std : : wstring > Keys ( ) <nl> + { <nl> + std : : unordered_set < std : : wstring > keys ; <nl> + for ( const auto & kv : * m_dictionaryData ) <nl> + keys . insert ( kv . first ) ; <nl> + return keys ; <nl> + } <nl> <nl> friend CNTK_API std : : istream & operator > > ( std : : istream & stream , Dictionary & us ) ; <nl> friend CNTK_API std : : ostream & operator < < ( std : : ostream & stream , const Dictionary & us ) ; <nl> namespace CNTK <nl> / / / <nl> / / / Create an instance of the random_sample operation on specified sampling weights input vector <nl> / / / <nl> - / / TODO : The initial random seed should be specifiable <nl> - CNTK_API FunctionPtr RandomSample ( const Variable & operand , size_t numSamples , bool allowDuplicates , const std : : wstring & name / * = L " " * / ) ; <nl> + CNTK_API FunctionPtr RandomSample ( const Variable & operand , size_t numSamples , bool allowDuplicates , unsigned long seed = SentinelValueForAutoSelectRandomSeed , const std : : wstring & name = L " " ) ; <nl> <nl> / / / <nl> / / / Create an instance of the random_sample_inclusion_frequency operation on specified sampling weights input vector <nl> / / / <nl> - / / TODO : The initial random seed should be specifiable <nl> - CNTK_API FunctionPtr RandomSampleInclusionFrequency ( const Variable & operand , size_t numSamples , bool allowDuplicates , const std : : wstring & name / * = L " " * / ) ; <nl> + CNTK_API FunctionPtr RandomSampleInclusionFrequency ( const Variable & operand , size_t numSamples , bool allowDuplicates , unsigned long seed = SentinelValueForAutoSelectRandomSeed , const std : : wstring & name = L " " ) ; <nl> <nl> / / / <nl> / / / Create an instance of the dropout operation on specified tensor input operand <nl> / / / <nl> - / / TODO : The initial random seed should be specifiable <nl> - CNTK_API FunctionPtr Dropout ( const Variable & operand , double dropoutRate , const std : : wstring & name = L " " ) ; <nl> + CNTK_API FunctionPtr Dropout ( const Variable & operand , double dropoutRate , unsigned long seed = SentinelValueForAutoSelectRandomSeed , const std : : wstring & name = L " " ) ; <nl> <nl> / / / <nl> / / / Create an instance of the reshape operation on specified tensor input operand <nl> namespace CNTK <nl> bool TrainLocalMinibatch ( const std : : unordered_map < Variable , ValuePtr > & arguments , std : : unordered_map < Variable , ValuePtr > & outputsToFetch , bool sweepEnd , const DeviceDescriptor & computeDevice ) ; <nl> bool TrainDistributedMinibatch ( const std : : unordered_map < Variable , ValuePtr > & arguments , std : : unordered_map < Variable , ValuePtr > & outputsToFetch , bool sweepEnd , const DeviceDescriptor & computeDevice ) ; <nl> <nl> - void Save ( const std : : wstring & modelFilePath , const std : : vector < DictionaryValue > & learnerState , const Dictionary & externalState ) ; <nl> + void Save ( const std : : wstring & modelFilePath , const std : : vector < DictionaryValue > & learnerState , <nl> + const Dictionary & externalState , const Dictionary & distributedState = { } ) ; <nl> <nl> void UpdateTrainingProgress ( size_t numSamples , const ValuePtr & loss , const ValuePtr & evalCriterion , const DeviceDescriptor & computeDevice ) ; <nl> void AddProgressWriters ( const std : : vector < ProgressWriterPtr > & progressWriters ) ; <nl> mmm a / Source / CNTKv2LibraryDll / API / CNTKLibraryInternals . h <nl> ppp b / Source / CNTKv2LibraryDll / API / CNTKLibraryInternals . h <nl> namespace CNTK <nl> <nl> CNTK_API size_t NewUniqueId ( ) ; <nl> <nl> + CNTK_API size_t GenerateRandomSeed ( ) ; <nl> + <nl> / / Internal hooks for testing and higher - level bindings <nl> / / These should not be directly called by C + + API users <nl> CNTK_API void EnableReversingTensorShapesInErrorMessages ( ) ; <nl> mmm a / Source / CNTKv2LibraryDll / BlockFunction . h <nl> ppp b / Source / CNTKv2LibraryDll / BlockFunction . h <nl> <nl> # include " stdafx . h " <nl> # include " CNTKLibrary . h " <nl> # include " PrimitiveFunction . h " <nl> + # include " Utils . h " <nl> + # include " Variable . h " <nl> <nl> namespace CNTK <nl> { <nl> mmm a / Source / CNTKv2LibraryDll / CNTKv2LibraryDll . vcxproj <nl> ppp b / Source / CNTKv2LibraryDll / CNTKv2LibraryDll . vcxproj <nl> <nl> < Warning Condition = " ! $ ( HasProtobuf ) " Text = " CNTKv2LibraryDll requires Protocol Buffers to build . Please see https : / / github . com / Microsoft / CNTK / wiki / Setup - CNTK - on - Windows # protobuf for installation instructions . " / > <nl> < Error Condition = " ! $ ( HasBoost ) " Text = " CNTKv2LibraryDll requires the Boost library to build . Please see https : / / github . com / Microsoft / CNTK / wiki / Setup - CNTK - on - Windows # boost for installation instructions . " / > <nl> < / Target > <nl> - < / Project > <nl> + < / Project > <nl> \ No newline at end of file <nl> mmm a / Source / CNTKv2LibraryDll / Common . cpp <nl> ppp b / Source / CNTKv2LibraryDll / Common . cpp <nl> namespace CNTK <nl> { <nl> namespace Internal <nl> { <nl> - static std : : atomic < unsigned long long > s_nextUniqueId ( 0 ) ; <nl> + static std : : atomic_ullong s_nextUniqueId = ATOMIC_VAR_INIT ( 0 ) ; <nl> size_t NewUniqueId ( ) <nl> { <nl> return s_nextUniqueId + + ; <nl> } <nl> <nl> + static std : : atomic_ullong s_currentRandomSeed = ATOMIC_VAR_INIT ( 0 ) ; <nl> + <nl> + / / This is used to generate a default seed for stateful nodes ( dropout , and both <nl> + / / flavors of random sample ) . As a result , in distributed environment , each worker <nl> + / / ends up having a different seed . <nl> + <nl> + size_t GenerateRandomSeed ( ) <nl> + { <nl> + static size_t numWorkers = 1 , rank = 0 ; <nl> + static bool initialized = false ; <nl> + if ( MPIWrapper : : GetTotalNumberOfMPINodes ( ) ! = 0 & & ! initialized ) <nl> + { <nl> + DistributedCommunicatorPtr communicator = MPICommunicator ( ) ; <nl> + numWorkers = communicator - > Workers ( ) . size ( ) ; <nl> + rank = communicator - > CurrentWorker ( ) . m_globalRank ; <nl> + <nl> + if ( numWorkers < 1 ) <nl> + numWorkers = 1 ; <nl> + } <nl> + <nl> + initialized = true ; <nl> + return ( numWorkers * s_currentRandomSeed + + ) + rank ; <nl> + } <nl> + <nl> std : : atomic < bool > s_reverseTensorShapesInErrorMessages ( false ) ; <nl> void EnableReversingTensorShapesInErrorMessages ( ) <nl> { <nl> mmm a / Source / CNTKv2LibraryDll / CompositeFunction . cpp <nl> ppp b / Source / CNTKv2LibraryDll / CompositeFunction . cpp <nl> namespace CNTK <nl> return dict ; <nl> } <nl> <nl> + / / Copy the internal state from the network into the function graph , <nl> + / / specifically from RngUser nodes into the attributes dictionaries of <nl> + / / the corresponding stateful primitive functions . <nl> + void CompositeFunction : : UpdateInternalState ( ) const <nl> + { <nl> + if ( ! m_computationNetwork ) <nl> + return ; <nl> + <nl> + for ( auto & function : m_allPrimitiveFunctions ) <nl> + { <nl> + auto primitiveFunction = dynamic_cast < PrimitiveFunction * > ( function . get ( ) ) ; <nl> + if ( ! primitiveFunction - > IsStateful ( ) ) <nl> + continue ; <nl> + <nl> + / / TODO : same for BatchNorm <nl> + <nl> + auto & outputs = primitiveFunction - > RawOutputs ( ) ; <nl> + if ( outputs . size ( ) ! = 1 ) <nl> + LogicError ( " Function ' % S ' UpdateInternalState : a stateful primitive function must have a single output . " , AsString ( ) . c_str ( ) ) ; <nl> + <nl> + const auto & rng = m_variableToNodeMap . at ( outputs [ 0 ] ) - > As < RngUser > ( ) ; <nl> + <nl> + Dictionary state ; <nl> + state [ PrimitiveFunction : : AttributeNameRngSeed ] = static_cast < size_t > ( rng - > GetRngSeed ( ) ) ; <nl> + state [ PrimitiveFunction : : AttributeNameRngOffset ] = static_cast < size_t > ( rng - > GetRngOffset ( ) ) ; <nl> + primitiveFunction - > SetState ( state ) ; <nl> + } <nl> + } <nl> + <nl> + / / Generate a dictionary representing the internal ( local ) state of the function graph . <nl> + Dictionary CompositeFunction : : GetInternalState ( ) const <nl> + { <nl> + UpdateInternalState ( ) ; <nl> + <nl> + Dictionary stateDictionary ; <nl> + for ( auto & function : m_allPrimitiveFunctions ) <nl> + { <nl> + auto primitiveFunction = dynamic_cast < const PrimitiveFunction * > ( function . get ( ) ) ; <nl> + if ( ! primitiveFunction - > IsStateful ( ) ) <nl> + continue ; <nl> + <nl> + / / TODO : same for BatchNorm <nl> + <nl> + stateDictionary [ primitiveFunction - > Uid ( ) ] = primitiveFunction - > GetState ( ) ; <nl> + } <nl> + return stateDictionary ; <nl> + } <nl> + <nl> / * virtual * / Dictionary CompositeFunction : : Serialize ( ) const <nl> { <nl> + UpdateInternalState ( ) ; <nl> + <nl> Dictionary dict = SerializeBlockComposite ( ) ; <nl> <nl> / / Find cycles in the graph and " break " them by inserting placeholders . <nl> namespace CNTK <nl> } <nl> <nl> dict [ functionsKey ] = std : : move ( functionDictionaries ) ; <nl> - <nl> - / / Now , collect and store the internal state for all non - pure ( stateful ) functions in the graph <nl> - / / ( with the corresponding nodes that subclass from RngUser : Dropout , RandomSample , etc ) . <nl> - Dictionary stateDictionary ; <nl> - for ( const auto & kv : m_variableToNodeMap ) <nl> - { <nl> - if ( kv . second - > Is < RngUser > ( ) & & kv . first . IsOutput ( ) ) <nl> - { <nl> - / / The RNG state should be associated with the actual function that the computation node <nl> - / / corresponds to , and not the block primitives that wrap the actual function <nl> - auto ownerFunction = kv . first . Owner ( ) . get ( ) ; <nl> - if ( ! ownerFunction - > IsBlock ( ) ) <nl> - { <nl> - auto rng = kv . second - > As < RngUser > ( ) ; <nl> - Dictionary state ; <nl> - state [ rngSeedKey ] = static_cast < size_t > ( rng - > GetRngSeed ( ) ) ; <nl> - state [ rngOffsetKey ] = static_cast < size_t > ( rng - > GetRngOffset ( ) ) ; <nl> - stateDictionary [ ownerFunction - > Uid ( ) ] = state ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - dict [ stateKey ] = std : : move ( stateDictionary ) ; <nl> <nl> return dict ; <nl> } <nl> namespace CNTK <nl> uidToInputMap [ inputVar . Uid ( ) ] = inputVar ; <nl> } <nl> <nl> - Dictionary stateDictionary ; <nl> - if ( dict . Contains ( stateKey ) ) <nl> - stateDictionary = dict [ stateKey ] . Value < Dictionary > ( ) ; <nl> - <nl> const auto & functions = dict [ functionsKey ] . Value < vector < DictionaryValue > > ( ) ; <nl> <nl> std : : unordered_map < Variable , Variable > allPlaceholderReplacements ; <nl> namespace CNTK <nl> if ( opType = = PrimitiveOpType : : Combine ) <nl> continue ; <nl> <nl> - if ( primitiveFunction - > IsStateful ( ) ) <nl> - { <nl> - if ( stateDictionary . Contains ( primitiveFunction - > Uid ( ) ) ) <nl> - { <nl> - auto state = stateDictionary [ primitiveFunction - > Uid ( ) ] . Value < Dictionary > ( ) ; <nl> - auto seed = state [ rngSeedKey ] . Value < size_t > ( ) ; <nl> - auto offset = state [ rngOffsetKey ] . Value < size_t > ( ) ; <nl> - primitiveFunction - > m_attributes [ PrimitiveFunction : : AttributeNameRngSeed ] = seed ; <nl> - primitiveFunction - > m_attributes [ PrimitiveFunction : : AttributeNameRngOffset ] = offset ; <nl> - } <nl> - else if ( Internal : : GetComputationNetworkTraceLevel ( ) > 0 ) <nl> - { <nl> - / / TODO : all logging functionality should be refactored to live in a logging utility class . <nl> - fprintf ( stderr , " WARNING : no state information found for the stateful function ( % ls ) " <nl> - " when deserializing from a dictionary ( version = % zu ) . " <nl> - " Reproducibility not guaranteed . " , primitiveFunction - > OpName ( ) . c_str ( ) , version ) ; <nl> - } <nl> - } <nl> - <nl> for ( const auto & output : root - > RawOutputs ( ) ) <nl> { <nl> const auto & it = uidToInputMap . find ( output . Uid ( ) ) ; <nl> namespace CNTK <nl> } <nl> } <nl> <nl> + <nl> + / / starting with the serialization version = 3 , the state is preserved inside the attribute dictionaries of the <nl> + / / corresponding primitive functions . Earlier versions have a dedicated key - value pair in the composite function dict . <nl> + if ( version < 3 ) <nl> + RestoreStatefulFunctions ( version , dict , allPrimitiveFunctions ) ; <nl> + <nl> return DeserializeBlockComposite ( dict , allPrimitiveFunctions , allPlaceholderReplacements , device ) ; <nl> } <nl> <nl> - void CompositeFunction : : CopyState ( const CompositeFunction & source ) <nl> + void CompositeFunction : : RestoreStatefulFunctions ( size_t version , const Dictionary & dict , std : : unordered_set < FunctionPtr > functions ) <nl> { <nl> - / / Create a map with all non - pure ( stateful ) functions in the function graph . <nl> - auto collectStatefulFunctions = [ ] ( const std : : unordered_set < FunctionPtr > & allPrimitiveFunctions ) - > std : : map < std : : wstring , FunctionPtr > { <nl> - std : : map < std : : wstring , FunctionPtr > functionMap ; <nl> - for ( auto funcPtr : allPrimitiveFunctions ) <nl> + Dictionary stateDictionary ; <nl> + if ( dict . Contains ( stateKey ) ) <nl> + stateDictionary = dict [ stateKey ] . Value < Dictionary > ( ) ; <nl> + <nl> + for ( auto & function : functions ) <nl> + { <nl> + auto primitiveFunction = dynamic_cast < PrimitiveFunction * > ( function . get ( ) ) ; <nl> + if ( ! primitiveFunction - > IsStateful ( ) ) <nl> + continue ; <nl> + <nl> + if ( stateDictionary . Contains ( primitiveFunction - > Uid ( ) ) ) <nl> + { <nl> + auto state = stateDictionary [ primitiveFunction - > Uid ( ) ] . Value < Dictionary > ( ) ; <nl> + / / Add key - value pairs expected by the SetState method to the state dictionary . <nl> + state [ PrimitiveFunction : : AttributeNameRngSeed ] = state [ rngSeedKey ] . Value < size_t > ( ) ; <nl> + state [ PrimitiveFunction : : AttributeNameRngOffset ] = state [ rngOffsetKey ] . Value < size_t > ( ) ; <nl> + primitiveFunction - > SetState ( state ) ; <nl> + } <nl> + else <nl> { <nl> + if ( Internal : : GetComputationNetworkTraceLevel ( ) > 0 ) { <nl> + / / TODO : all logging functionality should be refactored to live in a logging utility class . <nl> + fprintf ( stderr , " WARNING : no state information found for the stateful function ( % ls ) " <nl> + " when deserializing from a dictionary ( version = % zu ) . " <nl> + " Reproducibility not guaranteed . " , primitiveFunction - > OpName ( ) . c_str ( ) , version ) ; <nl> + } <nl> + <nl> + / / Create state from scratch , so that function attributes contain all the required key - value pairs . <nl> + Dictionary state ; <nl> + state [ PrimitiveFunction : : AttributeNameRngSeed ] = Internal : : GenerateRandomSeed ( ) ; <nl> + state [ PrimitiveFunction : : AttributeNameRngOffset ] = 0 ; <nl> + primitiveFunction - > SetState ( state ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void CompositeFunction : : CopyState ( const CompositeFunction & source ) <nl> + { <nl> + / / Collect a vector of stateful funciton uids using a pre - order traversal of a function graphs . <nl> + auto collectStatefulFunctionUIDs = [ ] ( const Function & function ) - > vector < wstring > { <nl> + vector < wstring > uids ; <nl> + PreorderTraverseFunctions ( function . RootFunction ( ) , [ & uids ] ( const FunctionPtr & funcPtr ) { <nl> auto primitiveFunction = dynamic_cast < const PrimitiveFunction * > ( funcPtr . get ( ) ) ; <nl> - if ( primitiveFunction - > IsStateful ( ) ) <nl> + if ( primitiveFunction - > IsStateful ( ) ) <nl> { <nl> - functionMap [ primitiveFunction - > Uid ( ) ] = funcPtr ; <nl> + uids . push_back ( funcPtr - > Uid ( ) ) ; <nl> } <nl> - } <nl> - return functionMap ; <nl> + } , true ) ; <nl> + <nl> + return uids ; <nl> } ; <nl> <nl> - std : : map < std : : wstring , FunctionPtr > statefulFunctionsTo = collectStatefulFunctions ( m_allPrimitiveFunctions ) ; <nl> - std : : map < std : : wstring , FunctionPtr > statefulFunctionsFrom = collectStatefulFunctions ( source . m_allPrimitiveFunctions ) ; <nl> + auto theirUIDs = collectStatefulFunctionUIDs ( source ) ; <nl> + auto ourUIDs = collectStatefulFunctionUIDs ( * this ) ; <nl> <nl> - assert ( statefulFunctionsTo . size ( ) = = statefulFunctionsFrom . size ( ) ) ; <nl> - if ( statefulFunctionsFrom . size ( ) = = 0 ) <nl> - { <nl> - return ; <nl> - } <nl> + if ( theirUIDs . size ( ) ! = ourUIDs . size ( ) ) <nl> + CNTK : : LogicError ( " Cannot copy internal state , the source and the destination contain different number of stateful functions . " ) ; <nl> + <nl> + auto state = source . GetInternalState ( ) ; <nl> <nl> - / / Copy state captured in the attributes dictionaries . <nl> - for ( const auto & kv : statefulFunctionsFrom ) <nl> + if ( theirUIDs = = ourUIDs ) <nl> { <nl> - statefulFunctionsTo [ kv . first ] - > m_attributes = kv . second - > Attributes ( ) ; <nl> + / / uids are identialy , no need to remap . <nl> + SetInternalState ( state ) ; <nl> + return ; <nl> } <nl> + <nl> + / / build a map of souce funtion to the destination ( this ) function UIDs . <nl> + map < wstring , wstring > uidMap ; <nl> + for ( auto i = 0 ; i < theirUIDs . size ( ) ; i + + ) <nl> + uidMap [ theirUIDs [ i ] ] = ourUIDs [ i ] ; <nl> + <nl> + Dictionary remappedState ; <nl> + for ( auto & kv : state ) <nl> + remappedState [ uidMap [ kv . first ] ] = kv . second ; <nl> <nl> - UpdateInternalNetworkState ( ) ; <nl> + SetInternalState ( remappedState ) ; <nl> } <nl> <nl> - void CompositeFunction : : UpdateInternalNetworkState ( ) <nl> + void CompositeFunction : : SetInternalState ( const Dictionary & state ) <nl> { <nl> - if ( ! m_computationNetwork ) <nl> - { <nl> + if ( state . Size ( ) = = 0 ) <nl> return ; <nl> - } <nl> <nl> for ( const auto & function : m_allPrimitiveFunctions ) <nl> { <nl> - auto primitiveFunction = dynamic_cast < const PrimitiveFunction * > ( function . get ( ) ) ; <nl> - if ( primitiveFunction - > IsStateful ( ) ) <nl> + auto primitiveFunction = dynamic_cast < PrimitiveFunction * > ( function . get ( ) ) ; <nl> + if ( ! primitiveFunction - > IsStateful ( ) ) <nl> + continue ; <nl> + <nl> + auto functionState = state [ primitiveFunction - > Uid ( ) ] . Value < Dictionary > ( ) ; <nl> + <nl> + primitiveFunction - > SetState ( functionState ) ; <nl> + <nl> + if ( ! m_computationNetwork ) <nl> + continue ; <nl> + <nl> + auto seed = functionState [ PrimitiveFunction : : AttributeNameRngSeed ] . Value < size_t > ( ) ; <nl> + auto offset = functionState [ PrimitiveFunction : : AttributeNameRngOffset ] . Value < size_t > ( ) ; <nl> + <nl> + / / copy the state directly into the network <nl> + for ( const auto & output : function - > RawOutputs ( ) ) <nl> { <nl> - for ( const auto & output : function - > RawOutputs ( ) ) <nl> - { <nl> - auto node = m_variableToNodeMap . at ( output ) ; <nl> - auto attributes = function - > Attributes ( ) ; <nl> - auto seed = attributes [ PrimitiveFunction : : AttributeNameRngSeed ] . Value < size_t > ( ) ; <nl> - auto offset = attributes [ PrimitiveFunction : : AttributeNameRngOffset ] . Value < size_t > ( ) ; <nl> - node - > As < RngUser > ( ) - > SetRngState ( seed , offset ) ; <nl> - } <nl> + auto node = m_variableToNodeMap . at ( output ) ; <nl> + node - > As < RngUser > ( ) - > SetRngState ( seed , offset ) ; <nl> } <nl> } <nl> } <nl> namespace CNTK <nl> <nl> if ( computationNodePtr - > Is < RngUser > ( ) ) <nl> { <nl> - if ( functionConfig . Contains ( PrimitiveFunction : : AttributeNameRngSeed ) ) <nl> - { <nl> - auto seed = functionConfig [ PrimitiveFunction : : AttributeNameRngSeed ] . Value < size_t > ( ) ; <nl> - uint64_t offset = 0 ; <nl> - if ( functionConfig . Contains ( PrimitiveFunction : : AttributeNameRngOffset ) ) <nl> - { <nl> - offset = functionConfig [ PrimitiveFunction : : AttributeNameRngOffset ] . Value < size_t > ( ) ; <nl> - } <nl> - computationNodePtr - > As < RngUser > ( ) - > SetRngState ( seed , offset ) ; <nl> - } <nl> + auto seed = functionConfig [ PrimitiveFunction : : AttributeNameRngSeed ] . Value < size_t > ( ) ; <nl> + auto offset = functionConfig [ PrimitiveFunction : : AttributeNameRngOffset ] . Value < size_t > ( ) ; <nl> + computationNodePtr - > As < RngUser > ( ) - > SetRngState ( seed , offset ) ; <nl> } <nl> } <nl> else <nl> mmm a / Source / CNTKv2LibraryDll / CompositeFunction . h <nl> ppp b / Source / CNTKv2LibraryDll / CompositeFunction . h <nl> namespace CNTK <nl> Dictionary SerializeBlockComposite ( ) const ; <nl> <nl> virtual Dictionary Serialize ( ) const override ; <nl> - <nl> + <nl> virtual size_t CurrentVersion ( ) const override { return s_serializationVersion ; } <nl> <nl> static FunctionPtr DeserializeBlockComposite ( const Dictionary & dict , <nl> namespace CNTK <nl> return inputs ; <nl> } <nl> <nl> - / / If the network is already created , copy internal state over from the functions in the graph into the underlying network . <nl> - void UpdateInternalNetworkState ( ) ; <nl> <nl> - / / Copy state info from source function graph into ' this ' function graph . <nl> + / / Copy the internal state from the network into the function graph . <nl> + void UpdateInternalState ( ) const ; <nl> + <nl> + / / Generate a dictionary representing the internal ( local ) state of the function graph . <nl> + Dictionary GetInternalState ( ) const ; <nl> + <nl> + / / Update the internal state using the provided dictionary . <nl> + / / If the network is already created , directly update its state . Otherwise , copy the state from the <nl> + / / dictionary into the function graph . <nl> + void SetInternalState ( const Dictionary & state ) ; <nl> + <nl> + / / Copy state info from source function graph into ' this ' function graph . <nl> + / / Both graphs must be equivalent . <nl> void CopyState ( const CompositeFunction & source ) ; <nl> <nl> + / / This function is only needed for backwards compatibility to support deserializing composite funcitions that <nl> + / / stored the internal state inside a dedicated value in the dictionary . <nl> + static void RestoreStatefulFunctions ( size_t version , const Dictionary & dict , std : : unordered_set < FunctionPtr > PrimitiveFunctions ) ; <nl> + <nl> static Variable GetMappingForNoOpOutput ( const Variable & variable , bool recursive = false ) ; <nl> static Variable GetMappingVariable ( const Variable & variable , bool recursive = false ) ; <nl> <nl> namespace CNTK <nl> / / Version history : <nl> / / 1 - - initial version . <nl> / / 2 - - add support for stateful functions ( with corresponding nodes inheriting from RngUser ) . <nl> - static const size_t s_serializationVersion = 2 ; <nl> + / / 3 - - store internal function state directly in the attributes dictionary . <nl> + static const size_t s_serializationVersion = 3 ; <nl> } ; <nl> } <nl> mmm a / Source / CNTKv2LibraryDll / Function . cpp <nl> ppp b / Source / CNTKv2LibraryDll / Function . cpp <nl> <nl> # include " PrimitiveFunction . h " <nl> # include " CompositeFunction . h " <nl> # include " BlockFunction . h " <nl> + # include " Utils . h " <nl> <nl> using namespace Microsoft : : MSR : : CNTK ; <nl> <nl> namespace CNTK <nl> LogicError ( " Slice : Invalid axis argument provided . Slice along the dynamic batch axis is currently unsupported . To slice a sequence along its ordered dynamic axis use Sequence : : Slice . " ) ; <nl> } <nl> <nl> - FunctionPtr RandomSample ( const Variable & operand , size_t numSamples , bool allowDuplicates , const std : : wstring & name ) <nl> + FunctionPtr RandomSample ( const Variable & operand , size_t numSamples , bool allowDuplicates , unsigned long seed , const std : : wstring & name ) <nl> { <nl> auto additionalProperties = Dictionary ( ) ; <nl> additionalProperties [ PrimitiveFunction : : AttributeNameNumSamples ] = numSamples ; <nl> additionalProperties [ PrimitiveFunction : : AttributeNameAllowDuplicates ] = allowDuplicates ; <nl> <nl> + if ( seed = = SentinelValueForAutoSelectRandomSeed ) <nl> + seed = Internal : : GenerateRandomSeed ( ) ; <nl> + <nl> + additionalProperties [ PrimitiveFunction : : AttributeNameRngSeed ] = size_t ( seed ) ; <nl> + additionalProperties [ PrimitiveFunction : : AttributeNameRngOffset ] = size_t ( 0 ) ; <nl> + <nl> return UnaryOp ( PrimitiveOpType : : RandomSample , operand , std : : move ( additionalProperties ) , name ) ; <nl> } <nl> <nl> - FunctionPtr RandomSampleInclusionFrequency ( const Variable & operand , size_t numSamples , bool allowDuplicates , const std : : wstring & name ) <nl> + FunctionPtr RandomSampleInclusionFrequency ( const Variable & operand , size_t numSamples , bool allowDuplicates , unsigned long seed , const std : : wstring & name ) <nl> { <nl> auto additionalProperties = Dictionary ( ) ; <nl> additionalProperties [ PrimitiveFunction : : AttributeNameNumSamples ] = numSamples ; <nl> additionalProperties [ PrimitiveFunction : : AttributeNameAllowDuplicates ] = allowDuplicates ; <nl> <nl> + if ( seed = = SentinelValueForAutoSelectRandomSeed ) <nl> + seed = Internal : : GenerateRandomSeed ( ) ; <nl> + <nl> + additionalProperties [ PrimitiveFunction : : AttributeNameRngSeed ] = size_t ( seed ) ; <nl> + additionalProperties [ PrimitiveFunction : : AttributeNameRngOffset ] = size_t ( 0 ) ; <nl> + <nl> return UnaryOp ( PrimitiveOpType : : RandomSampleInclusionFrequency , operand , std : : move ( additionalProperties ) , name ) ; <nl> } <nl> <nl> - FunctionPtr Dropout ( const Variable & operand , double dropoutRate , const std : : wstring & name ) <nl> + FunctionPtr Dropout ( const Variable & operand , double dropoutRate , unsigned long seed , const std : : wstring & name ) <nl> { <nl> auto additionalProperties = Dictionary ( ) ; <nl> additionalProperties [ PrimitiveFunction : : AttributeNameDropoutRate ] = dropoutRate ; <nl> <nl> + if ( seed = = SentinelValueForAutoSelectRandomSeed ) <nl> + seed = Internal : : GenerateRandomSeed ( ) ; <nl> + <nl> + additionalProperties [ PrimitiveFunction : : AttributeNameRngSeed ] = size_t ( seed ) ; <nl> + additionalProperties [ PrimitiveFunction : : AttributeNameRngOffset ] = size_t ( 0 ) ; <nl> + <nl> return UnaryOp ( PrimitiveOpType : : Dropout , operand , std : : move ( additionalProperties ) , name ) ; <nl> } <nl> <nl> mmm a / Source / CNTKv2LibraryDll / PrimitiveFunction . cpp <nl> ppp b / Source / CNTKv2LibraryDll / PrimitiveFunction . cpp <nl> <nl> # include " BlockFunction . h " <nl> # include " CompositeFunction . h " <nl> # include " SpecialPurposeNodes . h " <nl> + # include " ConvolveGeometry . h " <nl> + # include " ConvolutionalNodes . h " <nl> + # include " Variable . h " <nl> <nl> using namespace Microsoft : : MSR : : CNTK ; <nl> <nl> namespace CNTK <nl> <nl> / / Names of the various attributes of CNTK primitive Functions <nl> / * static * / const std : : wstring PrimitiveFunction : : AttributeNameAxis = L " axis " ; <nl> - / * static * / const std : : wstring PrimitiveFunction : : AttributeNameAxisVec = L " axisVec " ; <nl> + / * static * / const std : : wstring PrimitiveFunction : : AttributeNameAxisVec = L " axisVec " ; <nl> / * static * / const std : : wstring PrimitiveFunction : : AttributeNameAxis1 = L " axis1 " ; <nl> / * static * / const std : : wstring PrimitiveFunction : : AttributeNameAxis2 = L " axis2 " ; <nl> / * static * / const std : : wstring PrimitiveFunction : : AttributeNameAllowDuplicates = L " allowDuplicates " ; <nl> namespace CNTK <nl> return std : : shared_ptr < PrimitiveFunction > ( new PrimitiveFunction ( op , inputs , std : : move ( attributes ) , name , uid ) , <nl> [ ] ( PrimitiveFunction * ptr ) { delete ptr ; } ) ; <nl> } <nl> + <nl> + static const vector < wstring > s_stateAttributes = { PrimitiveFunction : : AttributeNameRngSeed , PrimitiveFunction : : AttributeNameRngOffset } ; <nl> + <nl> + Dictionary PrimitiveFunction : : GetState ( ) const <nl> + { <nl> + if ( ! IsStateful ( ) ) <nl> + LogicError ( " Function ' % S ' is not stateful . " , AsString ( ) . c_str ( ) ) ; <nl> + <nl> + Dictionary state ; <nl> + for ( auto & key : s_stateAttributes ) <nl> + { <nl> + state [ key ] = m_attributes [ key ] ; <nl> + } <nl> + <nl> + return state ; <nl> + } <nl> + <nl> + void PrimitiveFunction : : SetState ( const Dictionary & state ) <nl> + { <nl> + if ( ! IsStateful ( ) ) <nl> + LogicError ( " Function ' % S ' is not stateful . " , AsString ( ) . c_str ( ) ) ; <nl> + <nl> + for ( auto & key : s_stateAttributes ) <nl> + { <nl> + m_attributes [ key ] = state [ key ] ; <nl> + } <nl> + } <nl> + <nl> + / * static * / void PrimitiveFunction : : FixNDShape ( size_t filterRank , size_t inputRank , NDShape & shape , size_t deflt , const NDShape & from / * = NDShape ( ) * / ) <nl> + { <nl> + auto dims = shape . Dimensions ( ) ; <nl> + Microsoft : : MSR : : CNTK : : ConvolutionNodeBase < float > : : FixVectorShape ( filterRank , inputRank , dims , deflt , from . Dimensions ( ) ) ; <nl> + shape = NDShape ( dims ) ; <nl> + } <nl> + <nl> + NDShape PrimitiveFunction : : ConvolutionOpOutputShape ( PrimitiveOpType op , const NDShape & operandShape , NDShape & kernelShape , NDShape & outputMapCount , NDShape & strides , <nl> + std : : vector < bool > & sharing , std : : vector < bool > & autoPad , NDShape & lowerPad , NDShape & upperPad , <nl> + bool transpose , bool inferDimensions , bool ceilOutputDim / * = false * / ) const <nl> + { <nl> + if ( inferDimensions ) <nl> + { <nl> + size_t inputRank = operandShape . Rank ( ) ; <nl> + <nl> + / / Unknown kernel shape valid only for pooling , however , the shape should have expanded before <nl> + / / this call . <nl> + if ( kernelShape = = NDShape : : Unknown ) <nl> + RuntimeError ( " Convolution : Kernel shape can ' t be Unknown . " ) ; <nl> + <nl> + / / infer reduction dimensions if not given <nl> + / / If kernel has a lower rank than the input then the remaining dimensions are to be reduced over . <nl> + size_t filterRank = kernelShape . Rank ( ) ; <nl> + <nl> + / / If the trailing axis dimensionality of the kernel shape is NDShape : : InferredDimension , we reduce over it by <nl> + / / picking the corresponding operand shape dimensionality <nl> + / / This is done by shrinking the filter rank and let the dimensions be inferred from the operand ' s shape <nl> + / / TODO : Should we do this for all of the axes in kernelShape that have a dimensionailty of NDShape : : InferredDimension ? <nl> + if ( kernelShape [ filterRank - 1 ] = = NDShape : : InferredDimension ) <nl> + { <nl> + filterRank - - ; <nl> + kernelShape = kernelShape . SubShape ( 0 , filterRank ) ; <nl> + } <nl> + <nl> + NDShape fromShape ; <nl> + if ( op = = PrimitiveOpType : : Convolution ) <nl> + fromShape = operandShape ; <nl> + <nl> + size_t fillRank = ( ! transpose ) ? filterRank : filterRank - 1 ; <nl> + FixNDShape ( fillRank , inputRank , kernelShape , 1 , fromShape ) ; / / convolve over red dim ; pool over 1 <nl> + FixNDShape ( fillRank , inputRank , strides , 1 , fromShape ) ; / / stride for reduction dims is red dim or 1 <nl> + FixNDShape ( fillRank , inputRank , lowerPad , 0 ) ; <nl> + FixNDShape ( fillRank , inputRank , upperPad , 0 ) ; <nl> + Microsoft : : MSR : : CNTK : : ConvolutionNodeBase < float > : : FixVectorShape ( fillRank , inputRank , sharing , true ) ; <nl> + Microsoft : : MSR : : CNTK : : ConvolutionNodeBase < float > : : FixVectorShape ( fillRank , inputRank , autoPad , false ) ; / / no padding for reduction dims <nl> + } <nl> + <nl> + decltype ( & Microsoft : : MSR : : CNTK : : ConvolveGeometry : : ComputeOutputShape ) computeOutputShapeFunc ; <nl> + if ( ! transpose ) <nl> + computeOutputShapeFunc = & Microsoft : : MSR : : CNTK : : ConvolveGeometry : : ComputeOutputShape ; <nl> + else <nl> + computeOutputShapeFunc = & Microsoft : : MSR : : CNTK : : ConvolveGeometry : : ComputeInputShape ; <nl> + <nl> + return AsNDShape ( computeOutputShapeFunc ( AsTensorShape ( operandShape ) , AsTensorShape ( kernelShape ) , AsTensorShape ( outputMapCount ) , AsTensorShape ( strides ) , sharing , autoPad , AsTensorShape ( lowerPad ) , AsTensorShape ( upperPad ) , ceilOutputDim ) ) ; <nl> + } <nl> + <nl> + / * static * / bool PrimitiveFunction : : UpdateOperandShapes ( std : : vector < std : : pair < Variable , NDShape > > & newOperandShapes ) <nl> + { <nl> + bool anyParameterOperandDimsInferred = false ; <nl> + auto updateOperandShapeFunc = [ ] ( Variable & operand , const NDShape & newOperandShape ) { <nl> + if ( ( operand . IsParameter ( ) | | operand . IsConstant ( ) ) & & ( operand . Shape ( ) ! = newOperandShape ) ) <nl> + { <nl> + operand . m_dataFields - > m_shape = newOperandShape ; <nl> + return true ; <nl> + } <nl> + <nl> + return false ; <nl> + } ; <nl> + <nl> + for ( auto & newOperandShapePair : newOperandShapes ) <nl> + anyParameterOperandDimsInferred = updateOperandShapeFunc ( newOperandShapePair . first , newOperandShapePair . second ) | | anyParameterOperandDimsInferred ; <nl> + <nl> + return anyParameterOperandDimsInferred ; <nl> + } <nl> + <nl> + NDShape PrimitiveFunction : : NaryElementwiseOpOutputShape ( PrimitiveOpType op , std : : vector < Variable > & operands , bool broadcastAllowed , bool inferInputDimensions ) const <nl> + { <nl> + assert ( operands . size ( ) > 1 ) ; <nl> + <nl> + / / TODO : Is this logic of transitively constructing the output shape from the operands correct ? <nl> + Variable dummyOutputVariable = PlaceholderVariable ( NDShape ( ) ) ; <nl> + for ( auto & operand : operands ) <nl> + dummyOutputVariable . m_dataFields - > m_shape = BinaryElementwiseOpOutputShape ( op , dummyOutputVariable , operand , broadcastAllowed , inferInputDimensions ) ; <nl> + <nl> + return dummyOutputVariable . Shape ( ) ; <nl> + } <nl> } <nl> mmm a / Source / CNTKv2LibraryDll / PrimitiveFunction . h <nl> ppp b / Source / CNTKv2LibraryDll / PrimitiveFunction . h <nl> <nl> # include " stdafx . h " <nl> # include " CNTKLibrary . h " <nl> # include " PrimitiveOpType . h " <nl> - # include " Utils . h " <nl> - # include " ConvolveGeometry . h " <nl> - # include " ConvolutionalNodes . h " <nl> - # include " Variable . h " <nl> <nl> namespace std <nl> { <nl> namespace CNTK <nl> ( OpType ( ) = = PrimitiveOpType : : RandomSampleInclusionFrequency ) ; <nl> } <nl> <nl> + Dictionary GetState ( ) const ; <nl> + <nl> + void SetState ( const Dictionary & state ) ; <nl> + <nl> private : <nl> <nl> / / The following helper functions are used to determine the output shape for different <nl> namespace CNTK <nl> } <nl> <nl> / / Returns a boolean indicating if any operand shape was updated <nl> - static bool UpdateOperandShapes ( std : : vector < std : : pair < Variable , NDShape > > & newOperandShapes ) <nl> - { <nl> - bool anyParameterOperandDimsInferred = false ; <nl> - auto updateOperandShapeFunc = [ ] ( Variable & operand , const NDShape & newOperandShape ) { <nl> - if ( ( operand . IsParameter ( ) | | operand . IsConstant ( ) ) & & ( operand . Shape ( ) ! = newOperandShape ) ) <nl> - { <nl> - operand . m_dataFields - > m_shape = newOperandShape ; <nl> - return true ; <nl> - } <nl> - <nl> - return false ; <nl> - } ; <nl> - <nl> - for ( auto & newOperandShapePair : newOperandShapes ) <nl> - anyParameterOperandDimsInferred = updateOperandShapeFunc ( newOperandShapePair . first , newOperandShapePair . second ) | | anyParameterOperandDimsInferred ; <nl> - <nl> - return anyParameterOperandDimsInferred ; <nl> - } <nl> + static bool UpdateOperandShapes ( std : : vector < std : : pair < Variable , NDShape > > & newOperandShapes ) ; <nl> <nl> / / Returns a pair comprising of the output shape and boolean indicating if any input operand shape was modified <nl> / * static * / NDShape BinaryElementwiseOpOutputShape ( PrimitiveOpType op , Variable & leftOperand , Variable & rightOperand , bool broadcastAllowed , bool inferInputDimensions ) const <nl> namespace CNTK <nl> } <nl> } <nl> <nl> + <nl> + UNUSED ( broadcastAllowed ) ; <nl> + / / BUGBUG : if ( broadcastAllowed ) is missing here ? <nl> + <nl> / / Broadcast in remaining axes <nl> for ( size_t i = shapeWithSmallerNumAxes . Rank ( ) ; i < numOutputAxes ; + + i ) <nl> outputDims [ i ] = shapeWithLargerNumAxes [ i ] ; <nl> namespace CNTK <nl> return NDShape ( std : : move ( outputDims ) ) ; <nl> } <nl> <nl> - / * static * / NDShape NaryElementwiseOpOutputShape ( PrimitiveOpType op , std : : vector < Variable > & operands , bool broadcastAllowed , bool inferInputDimensions ) const <nl> - { <nl> - assert ( operands . size ( ) > 1 ) ; <nl> - <nl> - / / TODO : Is this logic of transitively constructing the output shape from the operands correct ? <nl> - Variable dummyOutputVariable = PlaceholderVariable ( NDShape ( ) ) ; <nl> - for ( auto & operand : operands ) <nl> - dummyOutputVariable . m_dataFields - > m_shape = BinaryElementwiseOpOutputShape ( op , dummyOutputVariable , operand , broadcastAllowed , inferInputDimensions ) ; <nl> - <nl> - return dummyOutputVariable . Shape ( ) ; <nl> - } <nl> + / * static * / NDShape NaryElementwiseOpOutputShape ( PrimitiveOpType op , std : : vector < Variable > & operands , bool broadcastAllowed , bool inferInputDimensions ) const ; <nl> <nl> / / Returns a pair comprising of the output shape and boolean indicating if any input operand shape was modified <nl> / * static * / NDShape TimesOpOutputShape ( Variable & leftOperand , Variable & rightOperand , size_t outputRank , int inferInputRankToMap , bool inferInputDimensions ) const <nl> namespace CNTK <nl> return NDShape ( std : : move ( outputDims ) ) ; <nl> } <nl> <nl> - static void FixNDShape ( size_t filterRank , size_t inputRank , NDShape & shape , size_t deflt , const NDShape & from = NDShape ( ) ) <nl> - { <nl> - auto dims = shape . Dimensions ( ) ; <nl> - Microsoft : : MSR : : CNTK : : ConvolutionNodeBase < float > : : FixVectorShape ( filterRank , inputRank , dims , deflt , from . Dimensions ( ) ) ; <nl> - shape = NDShape ( dims ) ; <nl> - } <nl> + static void FixNDShape ( size_t filterRank , size_t inputRank , NDShape & shape , size_t deflt , const NDShape & from = NDShape ( ) ) ; <nl> <nl> / * static * / NDShape ConvolutionOpOutputShape ( PrimitiveOpType op , const NDShape & operandShape , NDShape & kernelShape , NDShape & outputMapCount , NDShape & strides , <nl> - std : : vector < bool > & sharing , std : : vector < bool > & autoPad , NDShape & lowerPad , NDShape & upperPad , <nl> - bool transpose , bool inferDimensions , bool ceilOutputDim = false ) const <nl> - { <nl> - if ( inferDimensions ) <nl> - { <nl> - size_t inputRank = operandShape . Rank ( ) ; <nl> - <nl> - / / Unknown kernel shape valid only for pooling , however , the shape should have expanded before <nl> - / / this call . <nl> - if ( kernelShape = = NDShape : : Unknown ) <nl> - RuntimeError ( " Convolution : Kernel shape can ' t be Unknown . " ) ; <nl> - <nl> - / / infer reduction dimensions if not given <nl> - / / If kernel has a lower rank than the input then the remaining dimensions are to be reduced over . <nl> - size_t filterRank = kernelShape . Rank ( ) ; <nl> - <nl> - / / If the trailing axis dimensionality of the kernel shape is NDShape : : InferredDimension , we reduce over it by <nl> - / / picking the corresponding operand shape dimensionality <nl> - / / This is done by shrinking the filter rank and let the dimensions be inferred from the operand ' s shape <nl> - / / TODO : Should we do this for all of the axes in kernelShape that have a dimensionailty of NDShape : : InferredDimension ? <nl> - if ( kernelShape [ filterRank - 1 ] = = NDShape : : InferredDimension ) <nl> - { <nl> - filterRank - - ; <nl> - kernelShape = kernelShape . SubShape ( 0 , filterRank ) ; <nl> - } <nl> - <nl> - NDShape fromShape ; <nl> - if ( op = = PrimitiveOpType : : Convolution ) <nl> - fromShape = operandShape ; <nl> - <nl> - size_t fillRank = ( ! transpose ) ? filterRank : filterRank - 1 ; <nl> - FixNDShape ( fillRank , inputRank , kernelShape , 1 , fromShape ) ; / / convolve over red dim ; pool over 1 <nl> - FixNDShape ( fillRank , inputRank , strides , 1 , fromShape ) ; / / stride for reduction dims is red dim or 1 <nl> - FixNDShape ( fillRank , inputRank , lowerPad , 0 ) ; <nl> - FixNDShape ( fillRank , inputRank , upperPad , 0 ) ; <nl> - Microsoft : : MSR : : CNTK : : ConvolutionNodeBase < float > : : FixVectorShape ( fillRank , inputRank , sharing , true ) ; <nl> - Microsoft : : MSR : : CNTK : : ConvolutionNodeBase < float > : : FixVectorShape ( fillRank , inputRank , autoPad , false ) ; / / no padding for reduction dims <nl> - } <nl> - <nl> - decltype ( & Microsoft : : MSR : : CNTK : : ConvolveGeometry : : ComputeOutputShape ) computeOutputShapeFunc ; <nl> - if ( ! transpose ) <nl> - computeOutputShapeFunc = & Microsoft : : MSR : : CNTK : : ConvolveGeometry : : ComputeOutputShape ; <nl> - else <nl> - computeOutputShapeFunc = & Microsoft : : MSR : : CNTK : : ConvolveGeometry : : ComputeInputShape ; <nl> - <nl> - return AsNDShape ( computeOutputShapeFunc ( AsTensorShape ( operandShape ) , AsTensorShape ( kernelShape ) , AsTensorShape ( outputMapCount ) , AsTensorShape ( strides ) , sharing , autoPad , AsTensorShape ( lowerPad ) , AsTensorShape ( upperPad ) , ceilOutputDim ) ) ; <nl> - } <nl> + std : : vector < bool > & sharing , std : : vector < bool > & autoPad , NDShape & lowerPad , NDShape & upperPad , <nl> + bool transpose , bool inferDimensions , bool ceilOutputDim = false ) const ; <nl> <nl> / * static * / NDShape BatchNormalizationOutputShape ( std : : vector < Variable > & operands , bool spatial , bool inferDimensions ) const <nl> { <nl> mmm a / Source / CNTKv2LibraryDll / Serialization . h <nl> ppp b / Source / CNTKv2LibraryDll / Serialization . h <nl> namespace CNTK <nl> const std : : wstring blockFunctionOpNameKey = L " block_function_op_name " ; <nl> const std : : wstring blockFunctionCompositeArgumentsMapKeysKey = L " block_function_composite_arguments_map_keys " ; <nl> const std : : wstring blockFunctionCompositeArgumentsMapValuesKey = L " block_function_composite_arguments_map_values " ; <nl> + const std : : wstring internalWorkerStateKey = L " internal_worker_state " ; <nl> + const std : : wstring externalWorkerStateKey = L " external_worker_state " ; <nl> <nl> template < typename T > <nl> inline std : : string GetVersionsString ( size_t currentVersion , size_t dictVersion ) <nl> mmm a / Source / CNTKv2LibraryDll / Trainer . cpp <nl> ppp b / Source / CNTKv2LibraryDll / Trainer . cpp <nl> <nl> # include " Utils . h " <nl> # include " Learner . h " <nl> # include " PerformanceProfiler . h " <nl> + # include " CompositeFunction . h " <nl> + # include " Serialization . h " <nl> <nl> namespace <nl> { <nl> + const std : : wstring versionPropertyName = L " Version " ; <nl> const std : : wstring learnersPropertyName = L " Learners " ; <nl> const std : : wstring externalStatePropertyName = L " ExternalState " ; <nl> + const std : : wstring distributedStatePropertyName = L " DistributedState " ; <nl> + <nl> + / / Version history : <nl> + / / 0 - - a version number before the versioning was introduced for the trainer ' s checkpoints . <nl> + / / 1 - - initial version : added a key - value pair for the checkpoint version info , added <nl> + / / distributed state key to save all local state collected from distributed workers . <nl> + static const size_t trainerCheckpointVersion = 1 ; <nl> } <nl> <nl> namespace CNTK <nl> namespace CNTK <nl> void Trainer : : SaveCheckpoint ( const std : : wstring & modelFilePath , Dictionary externalState ) <nl> { <nl> auto learnersState = m_parameterLearners - > CreateCheckpoint ( ) ; <nl> + <nl> if ( ! m_distributed ) <nl> return Save ( modelFilePath , learnersState , externalState ) ; <nl> <nl> + auto compositeFunction = dynamic_cast < CompositeFunction * > ( m_combinedTrainingFunction . get ( ) ) ; <nl> + <nl> + Dictionary state ; <nl> + state [ internalWorkerStateKey ] = compositeFunction - > GetInternalState ( ) ; / / this is the local worker ' s state . <nl> + state [ externalWorkerStateKey ] = externalState ; <nl> + <nl> / / Collect distrbuted external state . <nl> DistributedCommunicatorPtr communicator = MPICommunicator ( ) ; <nl> communicator - > Barrier ( ) ; <nl> <nl> std : : vector < DictionaryPtr > remoteState ; <nl> - communicator - > Gather ( externalState , remoteState , communicator - > Workers ( ) ) ; <nl> + communicator - > Gather ( state , remoteState , communicator - > Workers ( ) ) ; <nl> <nl> Dictionary aggregatedState ; <nl> for ( const auto & w : communicator - > Workers ( ) ) <nl> namespace CNTK <nl> } <nl> <nl> if ( communicator - > CurrentWorker ( ) . IsMain ( ) ) <nl> - Save ( modelFilePath , learnersState , aggregatedState ) ; <nl> + Save ( modelFilePath , learnersState , externalState , aggregatedState ) ; <nl> <nl> / / all workers need to sync up after saving model to avoid read - after - write hazard <nl> / / i . e . one worker is in the middle of write while another tries to read <nl> communicator - > Barrier ( ) ; <nl> } <nl> <nl> - void Trainer : : Save ( const std : : wstring & modelFilePath , const std : : vector < DictionaryValue > & learnerState , const Dictionary & externalState ) <nl> + void Trainer : : Save ( const std : : wstring & modelFilePath , const std : : vector < DictionaryValue > & learnerState , const Dictionary & externalState , const Dictionary & distributedState ) <nl> { <nl> std : : wstring tempModelFile = modelFilePath + L " . tmp " ; <nl> Dictionary state ; <nl> + state [ versionPropertyName ] = trainerCheckpointVersion ; <nl> state [ learnersPropertyName ] = learnerState ; <nl> state [ externalStatePropertyName ] = externalState ; <nl> + state [ distributedStatePropertyName ] = distributedState ; <nl> <nl> m_combinedTrainingFunction - > SaveModel ( tempModelFile ) ; <nl> std : : wstring trainerStateCheckpointFilePath = GetTrainerStateCheckpointFilePath ( modelFilePath ) ; <nl> namespace CNTK <nl> <nl> Dictionary checkpoint = Dictionary : : Load ( GetTrainerStateCheckpointFilePath ( modelFilePath ) ) ; <nl> <nl> + size_t version = 0 ; <nl> + <nl> + if ( checkpoint . Contains ( versionPropertyName ) ) <nl> + version = checkpoint [ versionPropertyName ] . Value < size_t > ( ) ; <nl> + <nl> auto learnerState = checkpoint [ learnersPropertyName ] . Value < std : : vector < DictionaryValue > > ( ) ; <nl> auto externalState = checkpoint [ externalStatePropertyName ] . Value < Dictionary > ( ) ; <nl> <nl> + m_parameterLearners - > RestoreFromCheckpoint ( learnerState ) ; <nl> + <nl> if ( ! m_distributed ) <nl> { <nl> - m_parameterLearners - > RestoreFromCheckpoint ( learnerState ) ; <nl> return externalState ; <nl> } <nl> <nl> - m_parameterLearners - > RestoreFromCheckpoint ( learnerState ) ; <nl> + / / this ensures that nobody will start writing to the model / checkpoint files , until <nl> + / / everybody is done reading them . <nl> DistributedCommunicatorPtr communicator = MPICommunicator ( ) ; <nl> communicator - > Barrier ( ) ; <nl> <nl> - auto key = std : : to_wstring ( communicator - > CurrentWorker ( ) . m_globalRank ) ; <nl> + auto mainWorkerId = std : : to_wstring ( 0 ) ; <nl> + auto localWorkerId = std : : to_wstring ( communicator - > CurrentWorker ( ) . m_globalRank ) ; <nl> <nl> - if ( externalState . Contains ( key ) ) <nl> + / / before version 1 , there was no distributed state per se . Instead , the external state <nl> + / / contained a dictionary of worker - specific external states . <nl> + if ( version = = 0 ) <nl> + { <nl> + auto key = externalState . Contains ( localWorkerId ) ? localWorkerId : mainWorkerId ; <nl> return externalState [ key ] . Value < Dictionary > ( ) ; <nl> - else <nl> - return externalState [ std : : to_wstring ( 0 ) ] . Value < Dictionary > ( ) ; <nl> + } <nl> + <nl> + Dictionary distributedState = checkpoint [ distributedStatePropertyName ] . Value < Dictionary > ( ) ; <nl> + <nl> + if ( communicator - > CurrentWorker ( ) . IsMain ( ) | | ! distributedState . Contains ( localWorkerId ) ) <nl> + { <nl> + return externalState ; <nl> + } <nl> + <nl> + / / the checkpoint contains internal state for this worker . <nl> + Dictionary localState = distributedState [ localWorkerId ] . Value < Dictionary > ( ) ; <nl> + <nl> + auto internalState = localState [ internalWorkerStateKey ] . Value < Dictionary > ( ) ; <nl> + auto compositeFunction = std : : dynamic_pointer_cast < CompositeFunction > ( m_combinedTrainingFunction ) ; <nl> + if ( compositeFunction = = nullptr ) <nl> + RuntimeError ( " Combined training function is not a CompositeFunction . " ) ; <nl> + <nl> + / / this assumes the compositeFunction ( restored form a checkpoint made by the main node ) and <nl> + / / the internal worker state both have identical UIDs . <nl> + compositeFunction - > SetInternalState ( internalState ) ; <nl> + <nl> + return localState [ externalWorkerStateKey ] . Value < Dictionary > ( ) ; <nl> } <nl> <nl> double Trainer : : PreviousMinibatchLossAverage ( ) const <nl> mmm a / Source / CNTKv2LibraryDll / Variable . h <nl> ppp b / Source / CNTKv2LibraryDll / Variable . h <nl> <nl> # include " stdafx . h " <nl> # include " CNTKLibrary . h " <nl> # include < fstream > <nl> - # include " Utils . h " <nl> <nl> namespace CNTK <nl> { <nl> mmm a / Source / Math / CPURNGHandle . cpp <nl> ppp b / Source / Math / CPURNGHandle . cpp <nl> <nl> namespace Microsoft { namespace MSR { namespace CNTK { <nl> <nl> CPURNGHandle : : CPURNGHandle ( int deviceId , uint64_t seed , uint64_t offset ) <nl> - : RNGHandle ( deviceId ) <nl> + : RNGHandle ( deviceId ) , <nl> + m_generator ( seed ) <nl> { <nl> - m_generator . reset ( new std : : mt19937_64 ( seed ) ) ; <nl> - m_generator - > discard ( offset ) ; <nl> + m_generator . discard ( offset ) ; <nl> } <nl> <nl> } } } <nl> mmm a / Source / Math / CPURNGHandle . h <nl> ppp b / Source / Math / CPURNGHandle . h <nl> class CPURNGHandle : public RNGHandle <nl> <nl> std : : mt19937_64 & Generator ( ) <nl> { <nl> - return * m_generator ; <nl> + return m_generator ; <nl> } <nl> <nl> private : <nl> - std : : unique_ptr < std : : mt19937_64 > m_generator ; <nl> - / / TODO : why is this a ptr ? <nl> + std : : mt19937_64 m_generator ; <nl> } ; <nl> <nl> } } } <nl> mmm a / Tests / EndToEndTests / CNTKv2Library / EndToEndTests / FrameMode . cpp <nl> ppp b / Tests / EndToEndTests / CNTKv2Library / EndToEndTests / FrameMode . cpp <nl> <nl> # include " Common . h " <nl> <nl> using namespace CNTK ; <nl> + using namespace std ; <nl> using namespace std : : placeholders ; <nl> <nl> extern bool Is1bitSGDAvailable ( ) ; <nl> namespace <nl> const size_t numMinibatchesToTrain = ( numSamplesPerSweep * numSweepsToTrainWith ) / minibatchSize ; <nl> const size_t totalNumberOfSamples = numSamplesPerSweep * numSweepsToTrainWith ; <nl> <nl> + <nl> + const std : : wstring g_attributeNameRngSeed = L " rngSeed " ; <nl> + const std : : wstring g_attributeNameRngOffset = L " rngOffset " ; <nl> + <nl> + inline MinibatchSourcePtr GetMinibatchSource ( const FeedForwardClassifier & classifier ) <nl> + { <nl> + return TextFormatMinibatchSource ( g_inputFile , <nl> + { { g_featureStreamName , classifier . inputDim } , <nl> + { g_labelsStreamName , classifier . ouputDim } } , <nl> + totalNumberOfSamples , true ) ; <nl> + } <nl> + <nl> void LoopBasedOnSamples ( const std : : wstring & name , const DeviceDescriptor & device , std : : function < DistributedLearnerPtr ( LearnerPtr ) > factory , const FeedForwardClassifier & classifier ) <nl> { <nl> printf ( " Training loop thru samples with % ls . \ n " , name . c_str ( ) ) ; <nl> <nl> - auto minibatchSource = TextFormatMinibatchSource ( g_inputFile , <nl> - { { g_featureStreamName , classifier . inputDim } , { g_labelsStreamName , classifier . ouputDim } } , <nl> - totalNumberOfSamples , <nl> - true ) ; <nl> + auto minibatchSource = GetMinibatchSource ( classifier ) ; <nl> <nl> auto featureStreamInfo = minibatchSource - > StreamInfo ( g_featureStreamName ) ; <nl> auto labelStreamInfo = minibatchSource - > StreamInfo ( g_labelsStreamName ) ; <nl> void TestFrameMode ( ) <nl> } <nl> sync - > Barrier ( ) ; <nl> } <nl> + <nl> + <nl> + void TestDistributedCheckpointing ( ) <nl> + { <nl> + std : : vector < DeviceDescriptor > devices ; <nl> + if ( ShouldRunOnCpu ( ) ) <nl> + devices . push_back ( DeviceDescriptor : : CPUDevice ( ) ) ; <nl> + if ( ShouldRunOnGpu ( ) ) <nl> + devices . push_back ( DeviceDescriptor : : GPUDevice ( 0 ) ) ; <nl> + <nl> + auto sync = MPICommunicator ( ) ; <nl> + <nl> + auto numWorkers = sync - > Workers ( ) . size ( ) ; <nl> + auto workerRank = sync - > CurrentWorker ( ) . m_globalRank ; <nl> + <nl> + for ( auto device : devices ) <nl> + { <nl> + <nl> + auto ff = BuildFeedForwardClassifier ( device ) ; <nl> + ff . output = Dropout ( ff . output , 0 . 5 ) ; <nl> + ff . trainingLoss = CNTK : : CrossEntropyWithSoftmax ( ff . output , ff . labels , L " lossFunction " ) ; <nl> + ff . prediction = CNTK : : ClassificationError ( ff . output , ff . labels , L " classificationError " ) ; <nl> + <nl> + { <nl> + auto & attributes = ff . output - > RootFunction ( ) - > Attributes ( ) ; <nl> + size_t seed = attributes [ g_attributeNameRngSeed ] . Value < size_t > ( ) ; <nl> + / / Check that ( 1 ) the seed is in the attributes dictionary and <nl> + / / ( 2 ) the auto - generated seed value reflects the workerRank . <nl> + if ( numWorkers > 1 & & seed % numWorkers ! = workerRank ) <nl> + ReportFailure ( " Unexpected seed value " ) ; <nl> + } <nl> + <nl> + auto learner = SGDLearner ( ff . output - > Parameters ( ) , LearningRatePerSampleSchedule ( 0 . 02 ) ) ; <nl> + auto distributedLearner = CreateDataParallelDistributedLearner ( MPICommunicator ( ) , learner , 0 ) ; <nl> + auto trainer = CreateTrainer ( ff . output , ff . trainingLoss , ff . prediction , { distributedLearner } ) ; <nl> + <nl> + auto minibatchSource = GetMinibatchSource ( ff ) ; <nl> + <nl> + auto featureStreamInfo = minibatchSource - > StreamInfo ( g_featureStreamName ) ; <nl> + auto labelStreamInfo = minibatchSource - > StreamInfo ( g_labelsStreamName ) ; <nl> + <nl> + vector < double > expectedLoss ( 100 ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) <nl> + { <nl> + if ( i % 10 = = 0 ) <nl> + { <nl> + auto checkpoint = minibatchSource - > GetCheckpointState ( ) ; <nl> + trainer - > SaveCheckpoint ( L " distributed_checkpoint_test . " + to_wstring ( i ) , checkpoint ) ; <nl> + } <nl> + <nl> + auto minibatchData = minibatchSource - > GetNextMinibatch ( minibatchSize , device ) ; <nl> + unordered_map < Variable , MinibatchData > minibatch = { { ff . features , minibatchData [ featureStreamInfo ] } , { ff . labels , minibatchData [ labelStreamInfo ] } } ; <nl> + <nl> + trainer - > TrainMinibatch ( minibatch , device ) ; <nl> + expectedLoss [ i ] = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> + } <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) <nl> + { <nl> + if ( i % 10 = = 0 ) <nl> + { <nl> + auto checkpoint = trainer - > RestoreFromCheckpoint ( L " distributed_checkpoint_test . " + to_wstring ( i ) ) ; <nl> + minibatchSource - > RestoreFromCheckpoint ( checkpoint ) ; <nl> + <nl> + auto & attributes = ff . output - > RootFunction ( ) - > Attributes ( ) ; <nl> + size_t seed = attributes [ g_attributeNameRngSeed ] . Value < size_t > ( ) ; <nl> + size_t offset = attributes [ g_attributeNameRngOffset ] . Value < size_t > ( ) ; <nl> + <nl> + / / Check that the worker - specific seed value was properly restored from the checkpoint . <nl> + if ( numWorkers > 1 & & seed % numWorkers ! = workerRank ) <nl> + ReportFailure ( " Unexpected seed value " ) ; <nl> + / / Check the offset and verify that it changes depending on the number of processed minibatches . <nl> + if ( offset ! = i * minibatchSize * ff . inputDim ) <nl> + ReportFailure ( " Unexpected seed value " ) ; <nl> + } <nl> + <nl> + auto minibatchData = minibatchSource - > GetNextMinibatch ( minibatchSize , device ) ; <nl> + unordered_map < Variable , MinibatchData > minibatch = { { ff . features , minibatchData [ featureStreamInfo ] } , { ff . labels , minibatchData [ labelStreamInfo ] } } ; <nl> + <nl> + trainer - > TrainMinibatch ( minibatch , device ) ; <nl> + auto loss = trainer - > PreviousMinibatchLossAverage ( ) ; <nl> + <nl> + FloatingPointCompare ( loss , expectedLoss [ i ] , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> + <nl> + } <nl> + } <nl> + <nl> + sync - > Barrier ( ) ; <nl> + } <nl> mmm a / Tests / EndToEndTests / CNTKv2Library / EndToEndTests / Main . cpp <nl> ppp b / Tests / EndToEndTests / CNTKv2Library / EndToEndTests / Main . cpp <nl> void MNISTClassifierTests ( ) ; <nl> void TrainSequenceToSequenceTranslator ( ) ; <nl> void TrainTruncatedLSTMAcousticModelClassifier ( ) ; <nl> void TestFrameMode ( ) ; <nl> + void TestDistributedCheckpointing ( ) ; <nl> <nl> int main ( int argc , char * argv [ ] ) <nl> { <nl> int main ( int argc , char * argv [ ] ) <nl> <nl> TestFrameMode ( ) ; <nl> <nl> + TestDistributedCheckpointing ( ) ; <nl> + <nl> std : : string testsPassedMsg = " \ nCNTKv2Library - Distribution tests : Passed \ n " ; <nl> <nl> printf ( " % s " , testsPassedMsg . c_str ( ) ) ; <nl> mmm a / Tests / EndToEndTests / UnitTests / CNTKv2Library / baseline . txt <nl> ppp b / Tests / EndToEndTests / UnitTests / CNTKv2Library / baseline . txt <nl> Test module " V2LibraryTests " has passed with : <nl> <nl> Test case " SerializationSuite / CheckpointingWithStatefulNodesInGPU " has passed <nl> <nl> + Test case " SerializationSuite / CheckpointingWithStatefulNodesAndExplicitSeedsOnCPU " has passed <nl> + <nl> + Test case " SerializationSuite / CheckpointingWithStatefulNodesAndExplicitSeedsOnGPU " has passed <nl> + <nl> Test suite " FeedForwardSuite " has passed with : <nl> 6 test cases out of 6 passed <nl> 8 assertions out of 8 passed <nl> mmm a / Tests / UnitTests / V2LibraryTests / SerializationTests . cpp <nl> ppp b / Tests / UnitTests / V2LibraryTests / SerializationTests . cpp <nl> void TestCheckpointingWithStatefulNodes ( const DeviceDescriptor & device ) <nl> } <nl> } <nl> <nl> + <nl> + void TestCheckpointingWithStatefulNodesAndExplicitSeeds ( const DeviceDescriptor & device ) <nl> + { <nl> + auto featureStreamName = L " features " ; <nl> + auto labelsStreamName = L " labels " ; <nl> + <nl> + size_t inputDim = 784 ; <nl> + size_t numOutputClasses = 10 ; <nl> + auto features = InputVariable ( { inputDim } , false / * isSparse * / , DataType : : Float , featureStreamName ) ; <nl> + auto labels = InputVariable ( { numOutputClasses } , DataType : : Float , labelsStreamName ) ; <nl> + <nl> + auto net1 = BuildFFClassifierNet ( features , numOutputClasses , device , 1 ) ; <nl> + auto net2 = net1 - > Clone ( ParameterCloningMethod : : Clone , { { features , features } } ) ; <nl> + auto net3 = net1 - > Clone ( ParameterCloningMethod : : Clone , { { features , features } } ) ; <nl> + <nl> + auto trainer1 = BuildTrainer ( Dropout ( net1 , 0 . 5 , 123 ) , labels ) ; <nl> + auto trainer2 = BuildTrainer ( Dropout ( net2 , 0 . 5 , 123 ) , labels ) ; <nl> + auto trainer3 = BuildTrainer ( Dropout ( net3 , 0 . 5 , 321 ) , labels ) ; <nl> + <nl> + const size_t minibatchSize = 50 ; <nl> + const size_t maxSamples = 150 ; <nl> + auto minibatchSource = TextFormatMinibatchSource ( L " Train - 28x28_cntk_text . txt " , { { featureStreamName , inputDim } , { labelsStreamName , numOutputClasses } } , 2 * maxSamples , false ) ; <nl> + <nl> + auto featureStreamInfo = minibatchSource - > StreamInfo ( features ) ; <nl> + auto labelStreamInfo = minibatchSource - > StreamInfo ( labels ) ; <nl> + <nl> + for ( int i = 0 ; i < maxSamples ; i + = minibatchSize ) <nl> + { <nl> + auto minibatchData = minibatchSource - > GetNextMinibatch ( minibatchSize , device ) ; <nl> + unordered_map < Variable , MinibatchData > minibatch = { { features , minibatchData [ featureStreamInfo ] } , { labels , minibatchData [ labelStreamInfo ] } } ; <nl> + <nl> + trainer1 - > TrainMinibatch ( minibatch , device ) ; <nl> + trainer2 - > TrainMinibatch ( minibatch , device ) ; <nl> + trainer3 - > TrainMinibatch ( minibatch , device ) ; <nl> + auto loss1 = trainer1 - > PreviousMinibatchLossAverage ( ) ; <nl> + auto loss2 = trainer2 - > PreviousMinibatchLossAverage ( ) ; <nl> + auto loss3 = trainer3 - > PreviousMinibatchLossAverage ( ) ; <nl> + FloatingPointCompare ( loss1 , loss2 , " Training loss does not match expectation " ) ; <nl> + BOOST_TEST ( ( abs ( loss1 - loss2 ) < = abs ( loss2 - loss3 ) ) ) ; <nl> + } <nl> + <nl> + trainer1 - > SaveCheckpoint ( L " seeded_stateful_nodes . model " ) ; <nl> + auto state = minibatchSource - > GetCheckpointState ( ) ; <nl> + <nl> + vector < double > expectedLoss ; <nl> + for ( int i = 0 ; i < maxSamples ; i + = minibatchSize ) <nl> + { <nl> + auto minibatchData = minibatchSource - > GetNextMinibatch ( minibatchSize , device ) ; <nl> + unordered_map < Variable , MinibatchData > minibatch = { { features , minibatchData [ featureStreamInfo ] } , { labels , minibatchData [ labelStreamInfo ] } } ; <nl> + <nl> + trainer1 - > TrainMinibatch ( minibatch , device ) ; <nl> + expectedLoss . push_back ( trainer1 - > PreviousMinibatchLossAverage ( ) ) ; <nl> + } <nl> + <nl> + trainer1 - > RestoreFromCheckpoint ( L " seeded_stateful_nodes . model " ) ; <nl> + minibatchSource - > RestoreFromCheckpoint ( state ) ; <nl> + <nl> + for ( int i = 0 ; i * minibatchSize < maxSamples ; i + + ) <nl> + { <nl> + auto minibatchData = minibatchSource - > GetNextMinibatch ( minibatchSize , device ) ; <nl> + unordered_map < Variable , MinibatchData > minibatch = { { features , minibatchData [ featureStreamInfo ] } , { labels , minibatchData [ labelStreamInfo ] } } ; <nl> + <nl> + trainer1 - > TrainMinibatch ( minibatch , device ) ; <nl> + double loss = trainer1 - > PreviousMinibatchLossAverage ( ) ; <nl> + FloatingPointCompare ( loss , expectedLoss [ i ] , " Post checkpoint restoration training loss does not match expectation " ) ; <nl> + } <nl> + } <nl> + <nl> void TestLoadingModelFromMemoryBuffer ( ) <nl> { <nl> ifstream modelFileStream ( " batch . norm . no . sample . count . v2 . bin " , ifstream : : binary ) ; <nl> BOOST_AUTO_TEST_CASE ( CheckpointingWithStatefulNodesInGPU ) <nl> TestCheckpointingWithStatefulNodes ( DeviceDescriptor : : GPUDevice ( 0 ) ) ; <nl> } <nl> <nl> + <nl> + BOOST_AUTO_TEST_CASE ( CheckpointingWithStatefulNodesAndExplicitSeedsOnCPU ) <nl> + { <nl> + TestCheckpointingWithStatefulNodesAndExplicitSeeds ( DeviceDescriptor : : CPUDevice ( ) ) ; <nl> + } <nl> + <nl> + BOOST_AUTO_TEST_CASE ( CheckpointingWithStatefulNodesAndExplicitSeedsOnGPU ) <nl> + { <nl> + if ( ShouldRunOnGpu ( ) ) <nl> + TestCheckpointingWithStatefulNodesAndExplicitSeeds ( DeviceDescriptor : : GPUDevice ( 0 ) ) ; <nl> + } <nl> + <nl> BOOST_AUTO_TEST_SUITE_END ( ) <nl> <nl> } } <nl> mmm a / Tutorials / CNTK_106A_LSTM_Timeseries_with_Simulated_Data . ipynb <nl> ppp b / Tutorials / CNTK_106A_LSTM_Timeseries_with_Simulated_Data . ipynb <nl> <nl> " with C . layers . default_options ( initial_state = 0 . 1 ) : \ n " , <nl> " m = C . layers . Recurrence ( C . layers . LSTM ( N ) ) ( x ) \ n " , <nl> " m = C . ops . sequence . last ( m ) \ n " , <nl> - " m = C . layers . Dropout ( 0 . 2 ) ( m ) \ n " , <nl> + " m = C . layers . Dropout ( 0 . 2 , seed = 1 ) ( m ) \ n " , <nl> " m = cntk . layers . Dense ( 1 ) ( m ) \ n " , <nl> " return m " <nl> ] <nl> mmm a / bindings / python / cntk / cntk_py . i <nl> ppp b / bindings / python / cntk / cntk_py . i <nl> public : <nl> } <nl> } <nl> <nl> + <nl> + % ignore CNTK : : Dictionary : : Keys ; <nl> + <nl> % extend CNTK : : Dictionary { <nl> PyObject * __getitem__ ( const wchar_t * key ) { <nl> PyObject * DictionaryValueToPy ( const CNTK : : DictionaryValue & ) ; <nl> mmm a / bindings / python / cntk / layers / layers . py <nl> ppp b / bindings / python / cntk / layers / layers . py <nl> <nl> from . . ops import parameter , input , placeholder , combine <nl> from . . ops import times , element_times , convolution , convolution_transpose , pooling , unpooling , batch_normalization , dropout , splice , reshape , sequence , softmax , tanh , reduce_sum , reduce_mean , sqrt <nl> from cntk . internal import _as_tuple <nl> + from cntk . cntk_py import sentinel_value_for_auto_select_random_seed as SentinelValueForAutoSelectRandomSeed <nl> from . blocks import * <nl> from . higher_order_layers import * <nl> from . blocks import _initializer_for , _get_initial_state_or_default , _INFERRED # helpers <nl> def maxunpool ( x , y ) : <nl> <nl> <nl> # TODO : should the rate ( s ) be default_options ? <nl> - def Dropout ( dropout_rate = None , keep_prob = None , name = ' ' ) : <nl> + def Dropout ( dropout_rate = None , <nl> + keep_prob = None , <nl> + seed = SentinelValueForAutoSelectRandomSeed , <nl> + name = ' ' ) : <nl> ' ' ' <nl> Layer factory function to create a drop - out layer . <nl> <nl> def Dropout ( dropout_rate = None , keep_prob = None , name = ' ' ) : <nl> Args : <nl> dropout_rate ( float ) : probability of dropping out an element , mutually exclusive with ` ` keep_prob ` ` <nl> keep_prob ( float ) : probability of keeping an element , mutually exclusive with ` ` dropout_rate ` ` <nl> + seed ( int ) : random seed . <nl> name ( str , defaults to ' ' ) : the name of the function instance in the network <nl> <nl> Returns : <nl> def Dropout ( dropout_rate = None , keep_prob = None , name = ' ' ) : <nl> dropout_rate = 1 - keep_prob <nl> @ BlockFunction ( ' Dropout ' , name ) <nl> def dropout_f ( x ) : <nl> - return dropout ( x , dropout_rate = dropout_rate ) <nl> + return dropout ( x , dropout_rate = dropout_rate , seed = seed ) <nl> return dropout_f <nl> <nl> <nl> mmm a / bindings / python / cntk / ops / __init__ . py <nl> ppp b / bindings / python / cntk / ops / __init__ . py <nl> <nl> from cntk . internal . utils import get_data_type <nl> from . . axis import Axis <nl> from . . import cntk_py <nl> + from . . cntk_py import sentinel_value_for_auto_select_random_seed as SentinelValueForAutoSelectRandomSeed <nl> from . . default_options import get_default_override , default_override_or <nl> <nl> TIMES_NO_INFERRED_INPUT_RANK = cntk_py . TimesNoInferredInputRank <nl> def argmin ( x , axis = None , name = ' ' ) : <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> @ typemap <nl> - def random_sample ( weights , num_samples , allow_duplicates , name = ' ' ) : <nl> + def random_sample ( <nl> + weights , <nl> + num_samples , <nl> + allow_duplicates , <nl> + seed = SentinelValueForAutoSelectRandomSeed , <nl> + name = ' ' ) : <nl> ' ' ' <nl> Estimates inclusion frequencies for random sampling with or without <nl> replacement . <nl> def random_sample ( weights , num_samples , allow_duplicates , name = ' ' ) : <nl> num_samples ( int ) : number of expected samples <nl> allow_duplicates ( bool ) : If sampling is done <nl> with replacement ( ` True ` ) or without ( ` False ` ) . <nl> + seed ( int ) : random seed . <nl> + name ( : class : ` str ` , optional ) : the name of the Function instance in the network . <nl> <nl> Returns : <nl> : class : ` ~ cntk . ops . functions . Function ` <nl> def random_sample ( weights , num_samples , allow_duplicates , name = ' ' ) : <nl> from cntk . cntk_py import random_sample <nl> weights = sanitize_input ( weights ) <nl> <nl> - return random_sample ( weights , num_samples , allow_duplicates , name ) <nl> + return random_sample ( weights , num_samples , allow_duplicates , seed , name ) <nl> <nl> <nl> @ typemap <nl> def random_sample_inclusion_frequency ( <nl> weights , <nl> num_samples , <nl> - allow_duplicates , <nl> + allow_duplicates , <nl> + seed = SentinelValueForAutoSelectRandomSeed , <nl> name = ' ' ) : <nl> ' ' ' <nl> For weighted sampling with the specifed sample size ( ` num_samples ` ) <nl> def random_sample_inclusion_frequency ( <nl> num_samples ( int ) : number of expected samples <nl> allow_duplicates ( bool ) : If sampling is done <nl> with replacement ( ` True ` ) or without ( ` False ` ) . <nl> + seed ( int ) : random seed . <nl> + name ( : class : ` str ` , optional ) : the name of the Function instance in the network . <nl> <nl> Example : <nl> > > > import numpy as np <nl> def random_sample_inclusion_frequency ( <nl> weights , <nl> num_samples , <nl> allow_duplicates , <nl> + seed , <nl> name ) <nl> <nl> <nl> @ typemap <nl> - def dropout ( x , dropout_rate = 0 . 0 , name = ' ' ) : <nl> + def dropout ( x , dropout_rate = 0 . 0 , seed = SentinelValueForAutoSelectRandomSeed , name = ' ' ) : <nl> ' ' ' <nl> Each element of the input is independently set to 0 with probabily ` ` dropout_rate ` ` <nl> or to 1 / ( 1 - ` ` dropout_rate ` ` ) times its original value ( with probability 1 - ` ` dropout_rate ` ` ) . <nl> def dropout ( x , dropout_rate = 0 . 0 , name = ' ' ) : <nl> Args : <nl> x : input tensor <nl> dropout_rate ( float , [ 0 , 1 ) ) : probability that an element of ` ` x ` ` will be set to zero <nl> + seed ( int ) : random seed . <nl> name ( : class : ` str ` , optional ) : the name of the Function instance in the network <nl> <nl> Returns : <nl> def dropout ( x , dropout_rate = 0 . 0 , name = ' ' ) : <nl> from cntk . cntk_py import dropout <nl> x = sanitize_input ( x ) <nl> <nl> - return dropout ( x , dropout_rate , name ) <nl> + return dropout ( x , dropout_rate , seed , name ) <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # variables_and_parameters ops <nl> mmm a / bindings / python / cntk / ops / tests / non_linear_test . py <nl> ppp b / bindings / python / cntk / ops / tests / non_linear_test . py <nl> def test_op_dropout ( shape , dropout_rate , device_id , precision ) : <nl> assert ( abs ( resulted_non_zeros - expected_non_zeros ) < <nl> max_off ) <nl> <nl> + def test_op_dropout_with_explicit_seed ( device_id , precision ) : <nl> + from cntk import combine , dropout , input <nl> + <nl> + value = np . ones ( shape = ( 10 , 10 ) , dtype = PRECISION_TO_TYPE [ precision ] ) <nl> + <nl> + a = input ( shape = value . shape , <nl> + dtype = sanitize_dtype_cntk ( PRECISION_TO_TYPE [ precision ] ) , <nl> + needs_gradient = True , <nl> + name = ' a ' ) <nl> + <nl> + seed = 123 ; <nl> + <nl> + dropout_nodes = [ <nl> + dropout ( a , dropout_rate = 0 . 5 , seed = seed ) , <nl> + dropout ( a , dropout_rate = 0 . 5 , seed = seed ) , <nl> + dropout ( a , dropout_rate = 0 . 5 , seed = seed + 1 ) , <nl> + dropout ( a , dropout_rate = 0 . 5 ) <nl> + ] <nl> + <nl> + value . shape = ( 1 , 1 ) + value . shape <nl> + forward_input = { a : value } <nl> + results = [ ] <nl> + for node in dropout_nodes : <nl> + forward , backward = cntk_eval ( node , <nl> + forward_input , <nl> + precision , <nl> + cntk_device ( device_id ) , <nl> + backward_pass = True ) <nl> + <nl> + results . append ( forward [ node . output ] ) <nl> + <nl> + assert np . allclose ( results [ 0 ] , results [ 1 ] ) <nl> + assert not np . allclose ( results [ 0 ] , results [ 2 ] ) <nl> + assert not np . allclose ( results [ 0 ] , results [ 3 ] ) <nl> + <nl> <nl> @ pytest . mark . parametrize ( " dropout_rate " , [ - 0 . 1 , 1 . 0 , 100 ] ) <nl> def test_op_dropout_bad_input ( dropout_rate ) : <nl> mmm a / bindings / python / cntk / ops / tests / random_sample_test . py <nl> ppp b / bindings / python / cntk / ops / tests / random_sample_test . py <nl> def test_random_sample_without_replacement ( weights , num_samples , expected_count , <nl> denseResult = times ( result , identity ) <nl> observed_count = np . sum ( denseResult . eval ( ) , 0 ) <nl> assert np . allclose ( observed_count , expected_count , atol = tolerance ) <nl> + <nl> + def test_random_sample_with_explicit_seed ( device_id , precision ) : <nl> + weights = AA ( [ x for x in range ( 0 , 10 ) ] , precision ) <nl> + identity = np . identity ( weights . size ) <nl> + allow_duplicates = False # sample without replacement <nl> + num_samples = 5 ; <nl> + seed = 123 <nl> + to_dense = lambda x : times ( x , identity ) . eval ( ) <nl> + result1 = to_dense ( random_sample ( weights , num_samples , allow_duplicates , seed ) ) <nl> + result2 = to_dense ( random_sample ( weights , num_samples , allow_duplicates , seed ) ) <nl> + result3 = to_dense ( random_sample ( weights , num_samples , allow_duplicates , seed + 1 ) ) <nl> + result4 = to_dense ( random_sample ( weights , num_samples , allow_duplicates ) ) <nl> + assert np . allclose ( result1 , result2 ) <nl> + assert not np . allclose ( result1 , result3 ) <nl> + assert not np . allclose ( result1 , result4 ) <nl> mmm a / bindings / python / cntk / tests / attributes_test . py <nl> ppp b / bindings / python / cntk / tests / attributes_test . py <nl> def test_convolution_transpose_attributes ( ) : <nl> <nl> def test_dropout_attributes ( ) : <nl> x = C . input ( ( 1 , 5 , 5 ) ) <nl> - f = C . dropout ( x , 0 . 5 ) <nl> + f = C . dropout ( x , 0 . 5 , 42 ) <nl> d = f . root_function . attributes <nl> - expected = { ' dropoutRate ' : 0 . 5 } <nl> + expected = { ' dropoutRate ' : 0 . 5 , ' rngSeed ' : 42 , ' rngOffset ' : 0 } <nl> _check ( expected , d ) <nl> <nl> def test_slice_attributes ( ) : <nl> | Integrate alrezni / v2_dropout into master | microsoft/CNTK | 94993f3c81a9f07dd01141b3b1925a59298e459f | 2017-04-04T16:16:31Z |
mmm a / src / compiler / OWNERS <nl> ppp b / src / compiler / OWNERS <nl> mstarzinger @ chromium . org <nl> mtrofin @ chromium . org <nl> titzer @ chromium . org <nl> danno @ chromium . org <nl> + tebbi @ chromium . org <nl> <nl> per - file wasm - * = ahaas @ chromium . org <nl> per - file wasm - * = clemensh @ chromium . org <nl> | [ Turbofan ] Add tebbi @ chromium . org to src / compiler / OWNERS . | v8/v8 | 8e18765d698ec96489f16d04d3529c5f77ccf27f | 2017-03-14T09:21:17Z |
mmm a / code / dynamic_programming / src / longest_palindromic_substring / longest_palindromic_substring . py <nl> ppp b / code / dynamic_programming / src / longest_palindromic_substring / longest_palindromic_substring . py <nl> <nl> def longest_pal ( test_num ) : <nl> num = str ( test_num ) <nl> - # booleanArray with [ start ] [ end ] <nl> - pal_boolean_array = [ [ False for e in range ( len ( num ) ) ] for s in range ( len ( num ) ) ] <nl> - # all one - letter substrings are palindromes <nl> - for s in range ( len ( num ) ) : # length one substrings are all palindromes <nl> + # booleanArray with [ start ] [ end ] <nl> + pal_boolean_array = [ <nl> + [ False for e in range ( len ( num ) ) ] for s in range ( len ( num ) ) ] <nl> + # all one - letter substrings are palindromes <nl> + for s in range ( len ( num ) ) : # length one substrings are all palindromes <nl> pal_boolean_array [ s ] [ s ] = True <nl> longest = 1 <nl> <nl> - for s in range ( len ( num ) - 1 ) : # check substrings of length 2 <nl> - palindrome_boolean = num [ s ] = = num [ s + 1 ] <nl> - pal_boolean_array [ s ] [ s + 1 ] = palindrome_boolean <nl> + for s in range ( len ( num ) - 1 ) : # check substrings of length 2 <nl> + palindrome_boolean = num [ s ] = = num [ s + 1 ] <nl> + pal_boolean_array [ s ] [ s + 1 ] = palindrome_boolean <nl> if palindrome_boolean : <nl> longest = 2 <nl> <nl> - for lengths_to_check in range ( 3 , len ( num ) + 1 ) : # lengths greater than 2 <nl> + for lengths_to_check in range ( 3 , len ( num ) + 1 ) : # lengths greater than 2 <nl> for s in range ( len ( num ) - lengths_to_check + 1 ) : <nl> - palindrome_boolean = num [ s ] = = num [ s + lengths_to_check - 1 ] and pal_boolean_array [ s + 1 ] [ s + lengths_to_check - 2 ] <nl> - pal_boolean_array [ s ] [ s + lengths_to_check - 1 ] = palindrome_boolean <nl> + other_characters_symmetry = pal_boolean_array [ s + <nl> + 1 ] [ s + lengths_to_check - 2 ] <nl> + palindrome_boolean = num [ s ] = = num [ s + <nl> + lengths_to_check - <nl> + 1 ] and other_characters_symmetry <nl> + pal_boolean_array [ s ] [ s + lengths_to_check - 1 ] = palindrome_boolean <nl> if palindrome_boolean : <nl> longest = max ( longest , lengths_to_check ) <nl> return longest <nl> | Used Pep8 Automatic Formatter | OpenGenus/cosmos | fb9f4c36cbab95c1f9e3d82dc7065e56a67eb934 | 2019-03-13T01:28:42Z |
mmm a / addons / resource . language . en_gb / resources / strings . po <nl> ppp b / addons / resource . language . en_gb / resources / strings . po <nl> msgid " Add " <nl> msgstr " " <nl> <nl> # : addons / skin . estuary / 1080i / Custom_1106_VideoOSDSettings . xml <nl> + # : addons / skin . estuary / 1080i / Custom_1105_MusicOSDSettings . xml <nl> msgctxt " # 15020 " <nl> msgid " Audio DSP manager " <nl> msgstr " " <nl> msgstr " " <nl> # : xbmc / pvr / windows / GUIWindowPVRChannels . cpp <nl> # : xbmc / pvr / windows / GUIWindowPVRRecordings . cpp <nl> # : xbmc / music / windows / GUIWindowMusicBase . cpp <nl> + # : addons / skin . estuary / 1080i / Custom_1105_MusicOSDSettings . xml <nl> msgctxt " # 15047 " <nl> msgid " Audio DSP settings " <nl> msgstr " " <nl> mmm a / addons / skin . estuary / 1080i / Custom_1105_MusicOSDSettings . xml <nl> ppp b / addons / skin . estuary / 1080i / Custom_1105_MusicOSDSettings . xml <nl> <nl> < param name = " align " value = " center " / > <nl> < / include > <nl> < content > <nl> + < item > <nl> + < label > $ LOCALIZE [ 15047 ] < / label > <nl> + < onclick > ActivateWindow ( OSDAudioDSPSettings ) < / onclick > <nl> + < visible > system . getbool ( audiooutput . dspaddonsenabled ) < / visible > <nl> + < / item > <nl> + < item > <nl> + < label > $ LOCALIZE [ 15020 ] < / label > <nl> + < onclick > ActivateWindow ( AudioDSPManager ) < / onclick > <nl> + < visible > system . getbool ( audiooutput . dspaddonsenabled ) < / visible > <nl> + < / item > <nl> < item > <nl> < label > $ LOCALIZE [ 250 ] : [ COLOR button_focus ] $ INFO [ Visualisation . Name ] [ / COLOR ] < / label > <nl> < onclick > Dialog . Close ( all ) < / onclick > <nl> mmm a / xbmc / guilib / GUISliderControl . cpp <nl> ppp b / xbmc / guilib / GUISliderControl . cpp <nl> void CGUISliderControl : : Process ( unsigned int currentTime , CDirtyRegionList & dirt <nl> SetIntValue ( val ) ; <nl> } <nl> <nl> - float fScale ; <nl> - if ( m_orientation = = HORIZONTAL ) <nl> - fScale = m_height = = 0 ? 1 . 0f : m_height / m_guiBackground . GetTextureHeight ( ) ; <nl> - else <nl> - fScale = m_width = = 0 ? 1 . 0f : m_width / m_guiBackground . GetTextureWidth ( ) ; <nl> <nl> dirty | = m_guiBackground . SetHeight ( m_height ) ; <nl> dirty | = m_guiBackground . SetWidth ( m_width ) ; <nl> dirty | = m_guiBackground . Process ( currentTime ) ; <nl> <nl> CGUITexture & nibLower = ( m_bHasFocus & & ! IsDisabled ( ) & & m_currentSelector = = RangeSelectorLower ) ? m_guiSelectorLowerFocus : m_guiSelectorLower ; <nl> + float fScale ; <nl> + if ( m_orientation = = HORIZONTAL ) <nl> + fScale = m_height = = 0 ? 1 . 0f : m_height / m_guiBackground . GetTextureHeight ( ) ; <nl> + else <nl> + fScale = m_width = = 0 ? 1 . 0f : m_width / nibLower . GetTextureWidth ( ) ; <nl> + <nl> dirty | = ProcessSelector ( nibLower , currentTime , fScale , RangeSelectorLower ) ; <nl> if ( m_rangeSelection ) <nl> { <nl> CGUITexture & nibUpper = ( m_bHasFocus & & ! IsDisabled ( ) & & m_currentSelector = = RangeSelectorUpper ) ? m_guiSelectorUpperFocus : m_guiSelectorUpper ; <nl> + if ( m_orientation = = HORIZONTAL ) <nl> + fScale = m_height = = 0 ? 1 . 0f : m_height / m_guiBackground . GetTextureHeight ( ) ; <nl> + else <nl> + fScale = m_width = = 0 ? 1 . 0f : m_width / nibUpper . GetTextureWidth ( ) ; ; <nl> dirty | = ProcessSelector ( nibUpper , currentTime , fScale , RangeSelectorUpper ) ; <nl> } <nl> <nl> void CGUISliderControl : : SetFromPosition ( const CPoint & point , bool guessSelector <nl> if ( m_orientation = = HORIZONTAL ) <nl> fPercent = ( point . x - m_guiBackground . GetXPosition ( ) ) / m_guiBackground . GetWidth ( ) ; <nl> else <nl> - fPercent = ( point . y - m_guiBackground . GetYPosition ( ) ) / m_guiBackground . GetHeight ( ) ; <nl> + fPercent = ( m_guiBackground . GetYPosition ( ) + m_guiBackground . GetHeight ( ) - point . y ) / m_guiBackground . GetHeight ( ) ; <nl> + <nl> if ( fPercent < 0 ) fPercent = 0 ; <nl> if ( fPercent > 1 ) fPercent = 1 ; <nl> <nl> | Merge pull request from AchimTuran / skinning_improvements | xbmc/xbmc | d92453ec88ef13445ffdb216a0643499201285d2 | 2016-05-28T11:12:39Z |
mmm a / Marlin / Configuration . h <nl> ppp b / Marlin / Configuration . h <nl> <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = DELTA Printer = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - / / For a Delta printer rplace the configuration files wilth the files in the <nl> + / / For a Delta printer replace the configuration files with the files in the <nl> / / example_configurations / delta directory . <nl> / / <nl> <nl> <nl> / / 702 = Minitronics v1 . 0 <nl> / / 90 = Alpha OMCA board <nl> / / 91 = Final OMCA board <nl> - / / 301 = Rambo <nl> + / / 301 = Rambo <nl> / / 21 = Elefu Ra Board ( v3 ) <nl> <nl> # ifndef MOTHERBOARD <nl> <nl> <nl> # define POWER_SUPPLY 1 <nl> <nl> - / / Define this to have the electronics keep the powersupply off on startup . If you don ' t know what this is leave it . <nl> + / / Define this to have the electronics keep the power supply off on startup . If you don ' t know what this is leave it . <nl> / / # define PS_DEFAULT_OFF <nl> <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> / / 0 is not used <nl> / / 1 is 100k thermistor - best choice for EPCOS 100k ( 4 . 7k pullup ) <nl> / / 2 is 200k thermistor - ATC Semitec 204GT - 2 ( 4 . 7k pullup ) <nl> - / / 3 is mendel - parts thermistor ( 4 . 7k pullup ) <nl> + / / 3 is Mendel - parts thermistor ( 4 . 7k pullup ) <nl> / / 4 is 10k thermistor ! ! do not use it for a hotend . It gives bad resolution at high temp . ! ! <nl> / / 5 is 100K thermistor - ATC Semitec 104GT - 2 ( Used in ParCan & J - Head ) ( 4 . 7k pullup ) <nl> / / 6 is 100k EPCOS - Not as accurate as table 1 ( created using a fluke thermocouple ) ( 4 . 7k pullup ) <nl> <nl> / / 9 is 100k GE Sensing AL03006 - 58 . 2K - 97 - G1 ( 4 . 7k pullup ) <nl> / / 10 is 100k RS thermistor 198 - 961 ( 4 . 7k pullup ) <nl> / / 20 is the PT100 circuit found in the Ultimainboard V2 . x <nl> - / / 60 is 100k Maker ' s Tool Works Kapton Bed Thermister <nl> + / / 60 is 100k Maker ' s Tool Works Kapton Bed Thermistor <nl> / / <nl> / / 1k ohm pullup tables - This is not normal , you would have to have changed out your 4 . 7k for 1k <nl> / / ( but gives greater accuracy and more stable PID ) <nl> <nl> # define K1 0 . 95 / / smoothing factor within the PID <nl> # define PID_dT ( ( OVERSAMPLENR * 8 . 0 ) / ( F_CPU / 64 . 0 / 256 . 0 ) ) / / sampling period of the temperature routine <nl> <nl> - / / If you are using a preconfigured hotend then you can use one of the value sets by uncommenting it <nl> + / / If you are using a pre - configured hotend then you can use one of the value sets by uncommenting it <nl> / / Ultimaker <nl> # define DEFAULT_Kp 22 . 2 <nl> # define DEFAULT_Ki 1 . 08 <nl> # define DEFAULT_Kd 114 <nl> <nl> - / / Makergear <nl> + / / MakerGear <nl> / / # define DEFAULT_Kp 7 . 0 <nl> / / # define DEFAULT_Ki 0 . 1 <nl> / / # define DEFAULT_Kd 12 <nl> <nl> # define ENDSTOPPULLUPS / / Comment this out ( using / / at the start of the line ) to disable the endstop pullup resistors <nl> <nl> # ifndef ENDSTOPPULLUPS <nl> - / / fine Enstop settings : Individual Pullups . will be ignored if ENDSTOPPULLUPS is defined <nl> + / / fine endstop settings : Individual pullups . will be ignored if ENDSTOPPULLUPS is defined <nl> / / # define ENDSTOPPULLUP_XMAX <nl> / / # define ENDSTOPPULLUP_YMAX <nl> / / # define ENDSTOPPULLUP_ZMAX <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> # define BACK_PROBE_BED_POSITION 180 <nl> # define FRONT_PROBE_BED_POSITION 20 <nl> <nl> - / / these are the offsets to the prob relative to the extruder tip ( Hotend - Probe ) <nl> + / / these are the offsets to the probe relative to the extruder tip ( Hotend - Probe ) <nl> # define X_PROBE_OFFSET_FROM_EXTRUDER - 25 <nl> # define Y_PROBE_OFFSET_FROM_EXTRUDER - 29 <nl> # define Z_PROBE_OFFSET_FROM_EXTRUDER - 12 . 35 <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / # define PROBE_SERVO_DEACTIVATION_DELAY 300 <nl> <nl> <nl> - / / If you have enabled the Bed Auto Levelling and are using the same Z Probe for Z Homing , <nl> + / / If you have enabled the Bed Auto Leveling and are using the same Z Probe for Z Homing , <nl> / / it is highly recommended you let this Z_SAFE_HOMING enabled ! ! ! <nl> <nl> # define Z_SAFE_HOMING / / This feature is meant to avoid Z homing with probe outside the bed area . <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / # define BED_CENTER_AT_0_0 / / If defined , the center of the bed is at ( X = 0 , Y = 0 ) <nl> <nl> / / Manual homing switch locations : <nl> - / / For deltabots this means top and center of the cartesian print volume . <nl> + / / For deltabots this means top and center of the Cartesian print volume . <nl> # define MANUAL_X_HOME_POS 0 <nl> # define MANUAL_Y_HOME_POS 0 <nl> # define MANUAL_Z_HOME_POS 0 <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> <nl> # define DEFAULT_AXIS_STEPS_PER_UNIT { 78 . 7402 , 78 . 7402 , 200 . 0 * 8 / 3 , 760 * 1 . 1 } / / default steps per unit for Ultimaker <nl> # define DEFAULT_MAX_FEEDRATE { 500 , 500 , 5 , 25 } / / ( mm / sec ) <nl> - # define DEFAULT_MAX_ACCELERATION { 9000 , 9000 , 100 , 10000 } / / X , Y , Z , E maximum start speed for accelerated moves . E default values are good for skeinforge 40 + , for older versions raise them a lot . <nl> + # define DEFAULT_MAX_ACCELERATION { 9000 , 9000 , 100 , 10000 } / / X , Y , Z , E maximum start speed for accelerated moves . E default values are good for Skeinforge 40 + , for older versions raise them a lot . <nl> <nl> # define DEFAULT_ACCELERATION 3000 / / X , Y , Z and E max acceleration in mm / s ^ 2 for printing moves <nl> # define DEFAULT_RETRACT_ACCELERATION 3000 / / X , Y , Z and E max acceleration in mm / s ^ 2 for retracts <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> / / EEPROM <nl> - / / the microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> - / / M500 - stores paramters in EEPROM <nl> + / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> + / / M500 - stores parameters in EEPROM <nl> / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> - / / define this to enable eeprom support <nl> + / / define this to enable EEPROM support <nl> / / # define EEPROM_SETTINGS <nl> / / to disable EEPROM Serial responses and decrease program space by ~ 1700 byte : comment this out : <nl> / / please keep turned on if you can . <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> # define ABS_PREHEAT_FAN_SPEED 255 / / Insert Value between 0 and 255 <nl> <nl> / / LCD and SD support <nl> - / / # define ULTRA_LCD / / general lcd support , also 16x2 <nl> + / / # define ULTRA_LCD / / general LCD support , also 16x2 <nl> / / # define DOGLCD / / Support for SPI LCD 128x64 ( Controller ST7565R graphic Display Family ) <nl> / / # define SDSUPPORT / / Enable SD Card Support in Hardware Console <nl> / / # define SDSLOW / / Use slower SD transfer mode ( not normally needed - uncomment if you ' re getting volume init error ) <nl> / / # define ENCODER_PULSES_PER_STEP 1 / / Increase if you have a high resolution encoder <nl> / / # define ENCODER_STEPS_PER_MENU_ITEM 5 / / Set according to ENCODER_PULSES_PER_STEP or your liking <nl> - / / # define ULTIMAKERCONTROLLER / / as available from the ultimaker online store . <nl> - / / # define ULTIPANEL / / the ultipanel as on thingiverse <nl> + / / # define ULTIMAKERCONTROLLER / / as available from the Ultimaker online store . <nl> + / / # define ULTIPANEL / / the UltiPanel as on Thingiverse <nl> / / # define LCD_FEEDBACK_FREQUENCY_HZ 1000 / / this is the tone frequency the buzzer plays when on UI feedback . ie Screen Click <nl> / / # define LCD_FEEDBACK_FREQUENCY_DURATION_MS 100 / / the duration the buzzer plays the UI feedback sound . ie Screen Click <nl> <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / schematics # ! shiftregister - connection <nl> / / # define SR_LCD <nl> # ifdef SR_LCD <nl> - # define SR_LCD_2W_NL / / Non latching 2 wire shiftregister <nl> + # define SR_LCD_2W_NL / / Non latching 2 wire shift register <nl> / / # define NEWPANEL <nl> # endif <nl> <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> # define LCD_WIDTH 20 <nl> # define LCD_HEIGHT 4 <nl> # endif <nl> - # else / / no panel but just lcd <nl> + # else / / no panel but just LCD <nl> # ifdef ULTRA_LCD <nl> # ifdef DOGLCD / / Change number of lines to match the 128x64 graphics display <nl> # define LCD_WIDTH 20 <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / Increase the FAN pwm frequency . Removes the PWM noise but increases heating in the FET / Arduino <nl> / / # define FAST_PWM_FAN <nl> <nl> - / / Temperature status leds that display the hotend and bet temperature . <nl> - / / If alle hotends and bed temperature and temperature setpoint are < 54C then the BLUE led is on . <nl> + / / Temperature status LEDs that display the hotend and bet temperature . <nl> + / / If all hotends and bed temperature and temperature setpoint are < 54C then the BLUE led is on . <nl> / / Otherwise the RED led is on . There is 1C hysteresis . <nl> / / # define TEMP_STAT_LEDS <nl> <nl> mmm a / Marlin / Configuration_adv . h <nl> ppp b / Marlin / Configuration_adv . h <nl> <nl> # define BED_CHECK_INTERVAL 5000 / / ms between checks in bang - bang control <nl> <nl> / / / / Heating sanity check : <nl> - / / This waits for the watchperiod in milliseconds whenever an M104 or M109 increases the target temperature <nl> + / / This waits for the watch period in milliseconds whenever an M104 or M109 increases the target temperature <nl> / / If the temperature has not increased at the end of that period , the target temperature is set to zero . <nl> / / It can be reset with another M104 / M109 . This check is also only triggered if the target temperature and the current temperature <nl> / / differ by at least 2x WATCH_TEMP_INCREASE <nl> <nl> / / # define WATCH_TEMP_INCREASE 10 / / Heat up at least 10 degree in 20 seconds <nl> <nl> # ifdef PIDTEMP <nl> - / / this adds an experimental additional term to the heatingpower , proportional to the extrusion speed . <nl> - / / if Kc is choosen well , the additional required power due to increased melting should be compensated . <nl> + / / this adds an experimental additional term to the heating power , proportional to the extrusion speed . <nl> + / / if Kc is chosen well , the additional required power due to increased melting should be compensated . <nl> # define PID_ADD_EXTRUSION_RATE <nl> # ifdef PID_ADD_EXTRUSION_RATE <nl> - # define DEFAULT_Kc ( 1 ) / / heatingpower = Kc * ( e_speed ) <nl> + # define DEFAULT_Kc ( 1 ) / / heating power = Kc * ( e_speed ) <nl> # endif <nl> # endif <nl> <nl> <nl> / / the target temperature is set to mintemp + factor * se [ steps / sec ] and limited by mintemp and maxtemp <nl> / / you exit the value by any M109 without F * <nl> / / Also , if the temperature is set to a value < mintemp , it is not changed by autotemp . <nl> - / / on an ultimaker , some initial testing worked with M109 S215 B260 F1 in the start . gcode <nl> + / / on an Ultimaker , some initial testing worked with M109 S215 B260 F1 in the start . gcode <nl> # define AUTOTEMP <nl> # ifdef AUTOTEMP <nl> # define AUTOTEMP_OLDWEIGHT 0 . 98 <nl> <nl> # define SD_FINISHED_STEPPERRELEASE true / / if sd support and the file is finished : disable steppers ? <nl> # define SD_FINISHED_RELEASECOMMAND " M84 X Y Z E " / / You might want to keep the z enabled so your bed stays in place . <nl> <nl> - # define SDCARD_RATHERRECENTFIRST / / reverse file order of sd card menu display . Its sorted practically after the filesystem block order . <nl> - / / if a file is deleted , it frees a block . hence , the order is not purely cronological . To still have auto0 . g accessible , there is again the option to do that . <nl> + # define SDCARD_RATHERRECENTFIRST / / reverse file order of sd card menu display . Its sorted practically after the file system block order . <nl> + / / if a file is deleted , it frees a block . hence , the order is not purely chronological . To still have auto0 . g accessible , there is again the option to do that . <nl> / / using : <nl> / / # define MENU_ADDAUTOSTART <nl> <nl> - / / The hardware watchdog should reset the Microcontroller disabling all outputs , in case the firmware gets stuck and doesn ' t do temperature regulation . <nl> + / / The hardware watchdog should reset the microcontroller disabling all outputs , in case the firmware gets stuck and doesn ' t do temperature regulation . <nl> / / # define USE_WATCHDOG <nl> <nl> # ifdef USE_WATCHDOG <nl> <nl> / / # define ABORT_ON_ENDSTOP_HIT_FEATURE_ENABLED <nl> <nl> / / Babystepping enables the user to control the axis in tiny amounts , independently from the normal printing process <nl> - / / it can e . g . be used to change z - positions in the print startup phase in realtime <nl> + / / it can e . g . be used to change z - positions in the print startup phase in real - time <nl> / / does not respect endstops ! <nl> / / # define BABYSTEPPING <nl> # ifdef BABYSTEPPING <nl> <nl> / / <nl> / / advance ( steps ) = STEPS_PER_CUBIC_MM_E * EXTUDER_ADVANCE_K * cubic mm per second ^ 2 <nl> / / <nl> - / / hooke ' s law says : force = k * distance <nl> - / / bernoulli ' s priniciple says : v ^ 2 / 2 + g . h + pressure / density = constant <nl> + / / Hooke ' s law says : force = k * distance <nl> + / / Bernoulli ' s principle says : v ^ 2 / 2 + g . h + pressure / density = constant <nl> / / so : v ^ 2 is proportional to number of steps we advance the extruder <nl> / / # define ADVANCE <nl> <nl> const unsigned int dropsegments = 5 ; / / everything with less than this number of st <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> / / The number of linear motions that can be in the plan at any give time . <nl> - / / THE BLOCK_BUFFER_SIZE NEEDS TO BE A POWER OF 2 , i . g . 8 , 16 , 32 because shifts and ors are used to do the ringbuffering . <nl> + / / THE BLOCK_BUFFER_SIZE NEEDS TO BE A POWER OF 2 , i . g . 8 , 16 , 32 because shifts and ors are used to do the ring - buffering . <nl> # if defined SDSUPPORT <nl> # define BLOCK_BUFFER_SIZE 16 / / SD , LCD , Buttons take more memory , block buffer needs to be smaller <nl> # else <nl> const unsigned int dropsegments = 5 ; / / everything with less than this number of st <nl> # endif <nl> <nl> <nl> - / / The ASCII buffer for recieving from the serial : <nl> + / / The ASCII buffer for receiving from the serial : <nl> # define MAX_CMD_SIZE 96 <nl> # define BUFSIZE 4 <nl> <nl> <nl> - / / Firmware based and LCD controled retract <nl> + / / Firmware based and LCD controlled retract <nl> / / M207 and M208 can be used to define parameters for the retraction . <nl> / / The retraction can be called by the slicer using G10 and G11 <nl> / / until then , intended retractions can be detected by moves that only extrude and the direction . <nl> | Merge pull request from Cylindric3D / typofixes_1 | MarlinFirmware/Marlin | 3f455889d06f74509c108fee9c48c5d5b28d2f8e | 2014-02-17T11:30:01Z |
mmm a / tensorflow / python / kernel_tests / tensor_array_ops_test . py <nl> ppp b / tensorflow / python / kernel_tests / tensor_array_ops_test . py <nl> def testSkipEagerTensorArrayInt64GPU ( self ) : <nl> self . assertAllEqual ( v0 , - 3 ) <nl> self . assertAllEqual ( v1 , 100 ) <nl> <nl> + def testInferShapeFalseValid ( self ) : <nl> + ta = tensor_array_ops . TensorArray ( <nl> + dtypes . float32 , size = 3 , infer_shape = False , element_shape = [ None , 10 , 20 ] ) <nl> + ta = ta . write ( 0 , array_ops . ones ( [ 50 , 10 , 20 ] ) ) <nl> + ta = ta . write ( 1 , array_ops . ones ( [ 50 , 10 , 20 ] ) ) <nl> + ta = ta . write ( 2 , array_ops . ones ( [ 1 , 10 , 20 ] ) ) <nl> + ta = ta . concat ( ) <nl> + <nl> + correct = np . ones ( [ 101 , 10 , 20 ] ) <nl> + <nl> + self . assertAllEqual ( ta , correct ) <nl> + <nl> + def testInferShapeFalseInvalid ( self ) : <nl> + ta = tensor_array_ops . TensorArray ( <nl> + dtypes . float32 , size = 2 , infer_shape = False , element_shape = [ None , 10 , 20 ] ) <nl> + ta = ta . write ( 0 , array_ops . ones ( [ 50 , 10 , 20 ] ) ) <nl> + <nl> + with self . assertRaises ( ValueError ) : <nl> + ta = ta . write ( 1 , array_ops . ones ( [ 1 , 20 , 20 ] ) ) <nl> + <nl> + def testInferShapeTrue ( self ) : <nl> + ta = tensor_array_ops . TensorArray ( <nl> + dtypes . float32 , size = 3 , infer_shape = True , element_shape = [ None , 10 , 20 ] ) <nl> + self . assertAllEqual ( ( None , 10 , 20 ) , ta . element_shape . as_list ( ) ) <nl> + ta = ta . write ( 0 , array_ops . ones ( [ 50 , 10 , 20 ] ) ) <nl> + self . assertAllEqual ( ( 50 , 10 , 20 ) , ta . element_shape . as_list ( ) ) <nl> + ta = ta . write ( 1 , array_ops . ones ( [ 50 , 10 , 20 ] ) ) <nl> + with self . assertRaises ( ValueError ) : <nl> + ta = ta . write ( <nl> + 2 , array_ops . ones ( [ 1 , 10 , 20 ] ) <nl> + ) # Inconsistent shapes : saw ( 1 , 10 , 20 ) but expected ( 50 , 10 , 20 ) <nl> + <nl> <nl> class TensorArrayBenchmark ( test . Benchmark ) : <nl> <nl> mmm a / tensorflow / python / ops / tensor_array_ops . py <nl> ppp b / tensorflow / python / ops / tensor_array_ops . py <nl> def __init__ ( self , <nl> # of the first write . If ` infer_shape ` is true , all writes checks for <nl> # shape equality . <nl> self . _element_shape = [ tensor_shape . as_shape ( element_shape ) ] <nl> - self . _infer_shape = element_shape is not None or infer_shape <nl> + self . _infer_shape = infer_shape <nl> with ops . name_scope ( name , " TensorArray " , [ handle , size , flow ] ) as scope : <nl> if handle is not None : <nl> self . _handle = handle <nl> def handle ( self ) : <nl> def element_shape ( self ) : <nl> return self . _element_shape [ 0 ] <nl> <nl> - def _merge_element_shape ( self , shape ) : <nl> + def _check_element_shape ( self , shape ) : <nl> " " " Changes the element shape of the array given a shape to merge with . <nl> <nl> Args : <nl> def _merge_element_shape ( self , shape ) : <nl> element shape of the ` TensorArray ` . <nl> " " " <nl> if not shape . is_compatible_with ( self . element_shape ) : <nl> - raise ValueError ( <nl> - " Inconsistent shapes : saw % s but expected % s " <nl> - " ( and infer_shape = True ) " % ( shape , self . element_shape ) ) <nl> - self . _element_shape [ 0 ] = self . element_shape . merge_with ( shape ) <nl> + raise ValueError ( " Inconsistent shapes : saw % s but expected % s " % <nl> + ( shape , self . element_shape ) ) <nl> + if self . _infer_shape : <nl> + self . _element_shape [ 0 ] = self . element_shape . merge_with ( shape ) <nl> <nl> @ contextlib . contextmanager <nl> def _maybe_colocate_with ( self , value ) : <nl> def write ( self , index , value , name = None ) : <nl> value = ops . convert_to_tensor ( <nl> value , preferred_dtype = self . _dtype , name = " value " ) <nl> _check_dtypes ( value , self . _dtype ) <nl> - if self . _infer_shape : <nl> - self . _merge_element_shape ( value . shape ) <nl> + self . _check_element_shape ( value . shape ) <nl> with self . _maybe_colocate_with ( value ) : <nl> flow_out = gen_data_flow_ops . tensor_array_write_v3 ( <nl> handle = self . _handle , <nl> def scatter ( self , indices , value , name = None ) : <nl> value = ops . convert_to_tensor ( <nl> value , preferred_dtype = self . _dtype , name = " value " ) <nl> _check_dtypes ( value , self . _dtype ) <nl> - if self . _infer_shape and not context . executing_eagerly ( ) : <nl> - self . _merge_element_shape ( value . shape [ 1 : ] ) <nl> + if not context . executing_eagerly ( ) : <nl> + self . _check_element_shape ( value . shape [ 1 : ] ) <nl> with self . _maybe_colocate_with ( value ) : <nl> flow_out = gen_data_flow_ops . tensor_array_scatter_v3 ( <nl> handle = self . _handle , <nl> def split ( self , value , lengths , name = None ) : <nl> value = ops . convert_to_tensor ( value , dtype = self . _dtype , name = " value " ) <nl> with self . _maybe_colocate_with ( value ) : <nl> lengths_64 = math_ops . cast ( lengths , dtypes . int64 ) <nl> - if self . _infer_shape and not context . executing_eagerly ( ) : <nl> + if not context . executing_eagerly ( ) : <nl> clengths = tensor_util . constant_value ( lengths_64 ) <nl> - if value . shape . dims is not None : <nl> - if clengths is not None and clengths . max ( ) = = clengths . min ( ) : <nl> - self . _merge_element_shape ( <nl> + if value . shape . dims is not None and clengths is not None : <nl> + if clengths . shape and clengths . max ( ) = = clengths . min ( ) : <nl> + self . _check_element_shape ( <nl> tensor_shape . TensorShape ( [ clengths [ 0 ] ] ) . concatenate ( <nl> value . shape [ 1 : ] ) ) <nl> flow_out = gen_data_flow_ops . tensor_array_split_v3 ( <nl> def __init__ ( self , <nl> # of the first write . If ` infer_shape ` is true , all writes checks for <nl> # shape equality . <nl> self . _element_shape = [ tensor_shape . as_shape ( element_shape ) ] <nl> - self . _infer_shape = element_shape is not None or infer_shape <nl> + self . _infer_shape = infer_shape <nl> with ops . name_scope ( name , " TensorArrayV2 " , [ size , flow ] ) as scope : <nl> if flow is None : <nl> self . _flow = list_ops . tensor_list_reserve ( <nl> def handle ( self ) : <nl> # complain . <nl> return None <nl> <nl> - def _merge_element_shape ( self , shape ) : <nl> + def _check_element_shape ( self , shape ) : <nl> " " " Changes the element shape of the array given a shape to merge with . <nl> <nl> Args : <nl> def _merge_element_shape ( self , shape ) : <nl> element shape of the ` TensorArray ` . <nl> " " " <nl> if not shape . is_compatible_with ( self . element_shape ) : <nl> - raise ValueError ( <nl> - " Inconsistent shapes : saw % s but expected % s " <nl> - " ( and infer_shape = True ) " % ( shape , self . element_shape ) ) <nl> - self . _element_shape [ 0 ] = self . element_shape . merge_with ( shape ) <nl> + raise ValueError ( " Inconsistent shapes : saw % s but expected % s " % <nl> + ( shape , self . element_shape ) ) <nl> + if self . _infer_shape : <nl> + self . _element_shape [ 0 ] = self . element_shape . merge_with ( shape ) <nl> <nl> def identity ( self ) : <nl> " " " See TensorArray . " " " <nl> def write ( self , index , value , name = None ) : <nl> value = ops . convert_to_tensor ( <nl> value , preferred_dtype = self . _dtype , name = " value " ) <nl> _check_dtypes ( value , self . _dtype ) <nl> - if self . _infer_shape : <nl> - self . _merge_element_shape ( value . shape ) <nl> + self . _check_element_shape ( value . shape ) <nl> flow_out = list_ops . tensor_list_set_item ( <nl> input_handle = self . _flow , <nl> index = index , <nl> def unstack ( self , value , name = None ) : <nl> value = ops . convert_to_tensor ( <nl> value , preferred_dtype = self . _dtype , name = " value " ) <nl> _check_dtypes ( value , self . _dtype ) <nl> - if self . _infer_shape and not context . executing_eagerly ( ) : <nl> - self . _merge_element_shape ( value . shape [ 1 : ] ) <nl> + self . _check_element_shape ( value . shape [ 1 : ] ) <nl> flow_out = list_ops . tensor_list_from_tensor ( <nl> tensor = value , element_shape = value . shape [ 1 : ] ) <nl> return build_ta_with_new_flow ( self , flow_out ) <nl> def scatter ( self , indices , value , name = None ) : <nl> value = ops . convert_to_tensor ( <nl> value , preferred_dtype = self . _dtype , name = " value " ) <nl> _check_dtypes ( value , self . _dtype ) <nl> - if self . _infer_shape and not context . executing_eagerly ( ) : <nl> - self . _merge_element_shape ( value . shape [ 1 : ] ) <nl> + self . _check_element_shape ( value . shape [ 1 : ] ) <nl> flow_out = list_ops . tensor_list_scatter ( <nl> tensor = value , indices = indices , element_shape = self . element_shape , <nl> input_handle = self . _flow ) <nl> def split ( self , value , lengths , name = None ) : <nl> value , preferred_dtype = self . _dtype , name = " value " ) <nl> _check_dtypes ( value , self . _dtype ) <nl> lengths_64 = math_ops . cast ( lengths , dtypes . int64 ) <nl> - if self . _infer_shape and not context . executing_eagerly ( ) : <nl> + if not context . executing_eagerly ( ) : <nl> clengths = tensor_util . constant_value ( lengths_64 ) <nl> - if value . shape . dims is not None : <nl> - if clengths is not None and clengths . max ( ) = = clengths . min ( ) : <nl> - self . _merge_element_shape ( <nl> + if value . shape . dims is not None and clengths is not None : <nl> + if clengths . shape and clengths . max ( ) = = clengths . min ( ) : <nl> + self . _check_element_shape ( <nl> tensor_shape . TensorShape ( [ clengths [ 0 ] ] ) . concatenate ( <nl> value . shape [ 1 : ] ) ) <nl> flow_out = list_ops . tensor_list_split ( <nl> def __init__ ( self , <nl> # we assign a dummy value to _flow in case other code assumes it to be <nl> # a Tensor <nl> self . _flow = constant_op . constant ( 0 , dtype = dtypes . int32 ) <nl> - self . _infer_shape = element_shape is not None or infer_shape <nl> + self . _infer_shape = infer_shape <nl> self . _element_shape = tensor_shape . as_shape ( element_shape ) <nl> self . _colocate_with_first_write_call = colocate_with_first_write_call <nl> <nl> def _write ( self , index , value ) : <nl> " TensorArray dtype is % s but Op is trying to write dtype % s " % <nl> ( self . _dtype . name , value . dtype . name ) ) <nl> <nl> + if not self . _element_shape . is_compatible_with ( value . shape ) : <nl> + raise ValueError ( " Incompatible shape for value ( % s ) , expected ( % s ) " % <nl> + ( value . shape , self . _element_shape ) ) <nl> + <nl> if self . _infer_shape : <nl> - if not self . _element_shape . is_compatible_with ( value . shape ) : <nl> - raise ValueError ( " Incompatible shape for value ( % s ) , expected ( % s ) " % <nl> - ( value . shape , self . _element_shape ) ) <nl> - else : <nl> - self . _element_shape = self . _element_shape . merge_with ( value . shape ) <nl> + self . _element_shape = self . _element_shape . merge_with ( value . shape ) <nl> <nl> self . _tensor_array [ index ] = value <nl> <nl> | Fix TensorArray shape inference . Shape merging should not happen if infer_shape = False . | tensorflow/tensorflow | 650920e6ac567e4ad17cda78df6681d72e443423 | 2019-07-25T16:14:46Z |
mmm a / modules / planning / on_lane_planning . cc <nl> ppp b / modules / planning / on_lane_planning . cc <nl> void OnLanePlanning : : RunOnce ( const LocalView & local_view , <nl> ADCTrajectory * const ptr_trajectory_pb ) { <nl> local_view_ = local_view ; <nl> const double start_timestamp = Clock : : NowInSeconds ( ) ; <nl> + const double start_system_timestamp = <nl> + std : : chrono : : duration < double > ( <nl> + std : : chrono : : system_clock : : now ( ) . time_since_epoch ( ) ) <nl> + . count ( ) ; <nl> <nl> / / localization <nl> ADEBUG < < " Get localization : " <nl> void OnLanePlanning : : RunOnce ( const LocalView & local_view , <nl> for ( const auto & p : ptr_trajectory_pb - > trajectory_point ( ) ) { <nl> ADEBUG < < p . DebugString ( ) ; <nl> } <nl> - const auto time_diff_ms = ( Clock : : NowInSeconds ( ) - start_timestamp ) * 1000 ; <nl> + const auto end_system_timestamp = <nl> + std : : chrono : : duration < double > ( <nl> + std : : chrono : : system_clock : : now ( ) . time_since_epoch ( ) ) <nl> + . count ( ) ; <nl> + const auto time_diff_ms = <nl> + ( end_system_timestamp - start_system_timestamp ) * 1000 ; <nl> ADEBUG < < " total planning time spend : " < < time_diff_ms < < " ms . " ; <nl> <nl> ptr_trajectory_pb - > mutable_latency_stats ( ) - > set_total_time_ms ( time_diff_ms ) ; <nl> | planning : use system time to calculate planning time spend . | ApolloAuto/apollo | 78fe4b71ff22cd85ebdda8e27aaec06e8c78d2b8 | 2019-04-24T00:54:53Z |
mmm a / libraries / fc <nl> ppp b / libraries / fc <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 9150e7294b8dce4d5daded08b2c56bc2b2ae5586 <nl> + Subproject commit c9e0e66bcc314caa91d6a135f0d569390e423864 <nl> | update fc | EOSIO/eos | d2410d4cfa9d9eafe3e482dbf01c0b759fddcee4 | 2019-08-13T22:19:15Z |
new file mode 100644 <nl> index 000000000000 . . 59d40a47f583 <nl> mmm / dev / null <nl> ppp b / validation - test / compiler_crashers / 28369 - swift - decl - walk . swift <nl> <nl> + / / This source file is part of the Swift . org open source project <nl> + / / Copyright ( c ) 2014 - 2016 Apple Inc . and the Swift project authors <nl> + / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + / / <nl> + / / See http : / / swift . org / LICENSE . txt for license information <nl> + / / See http : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + <nl> + / / Credits : https : / / twitter . com / kiliankoe / status / 752090953977036800 <nl> + <nl> + / / RUN : not - - crash % target - swift - frontend % s - parse <nl> + protocol P { <nl> + } <nl> + struct A < T > { <nl> + func a < B where T : P > ( ) - > B { <nl> + } <nl> + } <nl> | Merge pull request from practicalswift / 28369 - swift - decl - walk . swift | apple/swift | d10ea6ef75b3cf61b6112c8b580055df3f4bb208 | 2016-07-20T16:01:34Z |
mmm a / src / qt / bitcoingui . cpp <nl> ppp b / src / qt / bitcoingui . cpp <nl> BitcoinGUI : : ~ BitcoinGUI ( ) <nl> trayIcon - > hide ( ) ; <nl> # ifdef Q_OS_MAC <nl> delete appMenuBar ; <nl> + MacDockIconHandler : : instance ( ) - > setMainWindow ( NULL ) ; <nl> # endif <nl> } <nl> <nl> void BitcoinGUI : : createTrayIconMenu ( ) <nl> # else <nl> / / Note : On Mac , the dock icon is used to provide the tray ' s functionality . <nl> MacDockIconHandler * dockIconHandler = MacDockIconHandler : : instance ( ) ; <nl> + dockIconHandler - > setMainWindow ( ( QMainWindow * ) this ) ; <nl> trayIconMenu = dockIconHandler - > dockMenu ( ) ; <nl> # endif <nl> <nl> mmm a / src / qt / macdockiconhandler . h <nl> ppp b / src / qt / macdockiconhandler . h <nl> <nl> # define MACDOCKICONHANDLER_H <nl> <nl> # include < QObject > <nl> + # include < QMainWindow > <nl> <nl> QT_BEGIN_NAMESPACE <nl> class QMenu ; <nl> class MacDockIconHandler : public QObject <nl> <nl> QMenu * dockMenu ( ) ; <nl> void setIcon ( const QIcon & icon ) ; <nl> - <nl> + void setMainWindow ( QMainWindow * window ) ; <nl> static MacDockIconHandler * instance ( ) ; <nl> <nl> void handleDockIconClickEvent ( ) ; <nl> class MacDockIconHandler : public QObject <nl> DockIconClickEventHandler * m_dockIconClickEventHandler ; <nl> QWidget * m_dummyWidget ; <nl> QMenu * m_dockMenu ; <nl> + QMainWindow * mainWindow ; <nl> } ; <nl> <nl> # endif / / MACDOCKICONCLICKHANDLER_H <nl> mmm a / src / qt / macdockiconhandler . mm <nl> ppp b / src / qt / macdockiconhandler . mm <nl> - ( void ) handleDockClickEvent : ( NSAppleEventDescriptor * ) event withReplyEvent : ( NSAp <nl> Q_UNUSED ( event ) <nl> Q_UNUSED ( replyEvent ) <nl> <nl> - if ( dockIconHandler ) <nl> + if ( dockIconHandler ) { <nl> dockIconHandler - > handleDockIconClickEvent ( ) ; <nl> + } <nl> } <nl> <nl> @ end <nl> - ( void ) handleDockClickEvent : ( NSAppleEventDescriptor * ) event withReplyEvent : ( NSAp <nl> [ pool release ] ; <nl> } <nl> <nl> + void MacDockIconHandler : : setMainWindow ( QMainWindow * window ) { <nl> + this - > mainWindow = window ; <nl> + } <nl> + <nl> MacDockIconHandler : : ~ MacDockIconHandler ( ) <nl> { <nl> [ this - > m_dockIconClickEventHandler release ] ; <nl> delete this - > m_dummyWidget ; <nl> + this - > setMainWindow ( NULL ) ; <nl> } <nl> <nl> QMenu * MacDockIconHandler : : dockMenu ( ) <nl> - ( void ) handleDockClickEvent : ( NSAppleEventDescriptor * ) event withReplyEvent : ( NSAp <nl> <nl> void MacDockIconHandler : : handleDockIconClickEvent ( ) <nl> { <nl> + this - > mainWindow - > activateWindow ( ) ; <nl> + this - > mainWindow - > show ( ) ; <nl> + <nl> emit this - > dockIconClicked ( ) ; <nl> } <nl> | Merge pull request from jonasschnelli / mac_win_reopen_fix | bitcoin/bitcoin | 63888d43bee33ec2f234894dc52fbd4b4f4710b8 | 2013-04-27T09:15:54Z |
mmm a / src / protocol_api . hpp <nl> ppp b / src / protocol_api . hpp <nl> <nl> # include < utility > <nl> # include < vector > <nl> <nl> - # include " btree / keys . hpp " <nl> # include " buffer_cache / types . hpp " <nl> # include " concurrency / fifo_checker . hpp " <nl> # include " concurrency / fifo_enforcer . hpp " <nl> <nl> # include " containers / binary_blob . hpp " <nl> # include " containers / scoped . hpp " <nl> # include " containers / object_buffer . hpp " <nl> - # include " hash_region . hpp " <nl> + # include " region / region . hpp " <nl> # include " rpc / serialize_macros . hpp " <nl> # include " timestamps . hpp " <nl> <nl> class traversal_progress_combiner_t ; <nl> struct write_t ; <nl> struct write_response_t ; <nl> <nl> - typedef hash_region_t < key_range_t > region_t ; <nl> - <nl> class cannot_perform_query_exc_t : public std : : exception { <nl> public : <nl> explicit cannot_perform_query_exc_t ( const std : : string & s ) : message ( s ) { } <nl> new file mode 100644 <nl> index 00000000000 . . f6f70939c40 <nl> mmm / dev / null <nl> ppp b / src / region / region . cc <nl> <nl> + # include " region / region . hpp " <nl> + <nl> + <nl> new file mode 100644 <nl> index 00000000000 . . 85ce9420862 <nl> mmm / dev / null <nl> ppp b / src / region / region . hpp <nl> <nl> + # ifndef REGION_REGION_HPP_ <nl> + # define REGION_REGION_HPP_ <nl> + <nl> + # include " btree / keys . hpp " <nl> + # include " hash_region . hpp " <nl> + <nl> + typedef hash_region_t < key_range_t > region_t ; <nl> + <nl> + <nl> + <nl> + <nl> + <nl> + <nl> + # endif / / REGION_REGION_HPP_ <nl> | Moved region_t to region . hpp . | rethinkdb/rethinkdb | 109eb16a543dcd385e599dadeec81b45abb0ef5c | 2014-04-04T00:04:40Z |
mmm a / aten / src / ATen / native / Normalization . cpp <nl> ppp b / aten / src / ATen / native / Normalization . cpp <nl> std : : tuple < Tensor , Tensor , Tensor > batch_norm_backward_cpu_template ( const Tensor <nl> return std : : make_tuple ( grad_input , grad_weight , grad_bias ) ; <nl> } <nl> <nl> - Tensor batch_norm ( <nl> + / / _batch_norm_impl_index ( _backward ) are used in the JIT be able to keep the run - time selection <nl> + / / of backends , while enabling it to keep the information about the used backend , so that it can <nl> + / / use its corresponding backward implementation . <nl> + / / XXX : The indices of backends need to be kept synchronized between this function and its _backward . <nl> + std : : tuple < Tensor , Tensor , Tensor , int64_t > _batch_norm_impl_index ( <nl> const Tensor & input , const Tensor & weight / * optional * / , const Tensor & bias / * optional * / , <nl> const Tensor & running_mean / * optional * / , const Tensor & running_var / * optional * / , <nl> bool training , double momentum , double eps , bool cudnn_enabled ) { <nl> - <nl> auto num_features = input . sizes ( ) [ 1 ] ; <nl> if ( running_mean . defined ( ) ) { <nl> check_dims_match_num_input_features ( " running_mean " , num_features , running_mean . numel ( ) ) ; <nl> Tensor batch_norm ( <nl> & & cudnn_enabled & & detail : : getCUDAHooks ( ) . versionCuDNN ( ) > = 5110L ) ; <nl> <nl> if ( use_cudnn & & eps > = detail : : getCUDAHooks ( ) . batchnormMinEpsilonCuDNN ( ) ) { <nl> - return std : : get < 0 > ( at : : cudnn_batch_norm ( <nl> - input . contiguous ( ) , weight . contiguous ( ) , <nl> - bias . contiguous ( ) , <nl> - running_mean . defined ( ) ? running_mean . contiguous ( ) : running_mean , <nl> - running_var . defined ( ) ? running_var . contiguous ( ) : running_var , <nl> - training , momentum , eps ) ) ; <nl> + return std : : tuple_cat ( <nl> + at : : cudnn_batch_norm ( <nl> + input . contiguous ( ) , weight . contiguous ( ) , <nl> + bias . contiguous ( ) , <nl> + running_mean . defined ( ) ? running_mean . contiguous ( ) : running_mean , <nl> + running_var . defined ( ) ? running_var . contiguous ( ) : running_var , <nl> + training , momentum , eps ) , <nl> + std : : make_tuple ( 1 ) ) ; <nl> } <nl> <nl> bool use_miopen = ( input . is_cuda ( ) <nl> Tensor batch_norm ( <nl> ) ; <nl> <nl> if ( use_miopen ) { <nl> - return std : : get < 0 > ( at : : miopen_batch_norm ( <nl> - input . contiguous ( ) , weight . contiguous ( ) , bias . contiguous ( ) , <nl> - running_mean . defined ( ) ? running_mean . contiguous ( ) : running_mean , <nl> - running_var . defined ( ) ? running_var . contiguous ( ) : running_var , <nl> - training , momentum , eps ) ) ; <nl> + return std : : tuple_cat ( <nl> + at : : miopen_batch_norm ( <nl> + input . contiguous ( ) , weight . contiguous ( ) , bias . contiguous ( ) , <nl> + running_mean . defined ( ) ? running_mean . contiguous ( ) : running_mean , <nl> + running_var . defined ( ) ? running_var . contiguous ( ) : running_var , <nl> + training , momentum , eps ) , <nl> + std : : make_tuple ( 2 ) ) ; <nl> + } <nl> + <nl> + return std : : tuple_cat ( <nl> + at : : native_batch_norm ( <nl> + input , weight , bias , running_mean , running_var , training , momentum , eps ) , <nl> + std : : make_tuple ( 0 ) ) ; <nl> + } <nl> + <nl> + std : : tuple < Tensor , Tensor , Tensor > _batch_norm_impl_index_backward ( <nl> + int64_t impl_index , <nl> + const Tensor & input , const Tensor & grad_output , const Tensor & weight / * optional * / , <nl> + const Tensor & running_mean / * optional * / , const Tensor & running_var / * optional * / , <nl> + const Tensor & save_mean / * optional * / , const Tensor & save_var_transform / * optional * / , <nl> + bool train , double epsilon , std : : array < bool , 3 > output_mask ) { <nl> + if ( impl_index = = 0 ) { <nl> + return at : : native_batch_norm_backward ( grad_output , input , weight , running_mean , running_var , save_mean , save_var_transform , train , epsilon , output_mask ) ; <nl> + } else if ( impl_index = = 1 ) { <nl> + return at : : cudnn_batch_norm_backward ( input , grad_output , weight , running_mean , running_var , save_mean , save_var_transform , epsilon ) ; <nl> + } else if ( impl_index = = 2 ) { <nl> + return at : : miopen_batch_norm_backward ( input , grad_output , weight , running_mean , running_var , save_mean , save_var_transform , epsilon ) ; <nl> } <nl> + AT_ASSERTM ( false , " Unsupported impl_index in _batch_norm_impl_index_backward : " , impl_index ) ; <nl> + } <nl> <nl> - return std : : get < 0 > ( at : : native_batch_norm ( input , weight , bias , <nl> - running_mean , running_var , training , momentum , eps ) ) ; <nl> + Tensor batch_norm ( <nl> + const Tensor & input , const Tensor & weight / * optional * / , const Tensor & bias / * optional * / , <nl> + const Tensor & running_mean / * optional * / , const Tensor & running_var / * optional * / , <nl> + bool training , double momentum , double eps , bool cudnn_enabled ) { <nl> + return std : : get < 0 > ( at : : _batch_norm_impl_index ( input , weight , bias , running_mean , running_var , <nl> + training , momentum , eps , cudnn_enabled ) ) ; <nl> } <nl> <nl> Tensor instance_norm ( <nl> mmm a / aten / src / ATen / native / native_functions . yaml <nl> ppp b / aten / src / ATen / native / native_functions . yaml <nl> <nl> - func : batch_norm ( Tensor input , Tensor ? weight , Tensor ? bias , Tensor ? running_mean , Tensor ? running_var , bool training , float momentum , float eps , bool cudnn_enabled ) - > Tensor <nl> matches_jit_signature : True <nl> <nl> + - func : _batch_norm_impl_index ( Tensor input , Tensor ? weight , Tensor ? bias , Tensor ? running_mean , Tensor ? running_var , bool training , double momentum , double eps , bool cudnn_enabled ) - > ( Tensor , Tensor , Tensor , int64_t ) <nl> + <nl> + - func : _batch_norm_impl_index_backward ( int64_t impl_index , Tensor input , Tensor grad_output , Tensor ? weight , Tensor ? running_mean , Tensor ? running_var , Tensor ? save_mean , Tensor ? save_var_transform , bool train , double eps , std : : array < bool , 3 > output_mask ) - > ( Tensor , Tensor , Tensor ) <nl> + <nl> # Sample bernoulli with values in ` self ` as probability . <nl> - func : bernoulli ( Tensor self , * , Generator ? generator = None ) - > Tensor <nl> matches_jit_signature : True <nl> mmm a / test / test_jit . py <nl> ppp b / test / test_jit . py <nl> def forward ( self , x , y ) : <nl> DISABLE_AUTODIFF_SUBGRAPH_INLINING = { <nl> ' test_nn_avg_pool2d ' , <nl> ' test_nn_adaptive_avg_pool2d ' , <nl> + ' test_nn_batch_norm ' , <nl> ' test_nn_embedding ' , <nl> ' test_nn_log_softmax ' , <nl> ' test_nn_threshold ' , <nl> mmm a / tools / autograd / gen_python_functions . py <nl> ppp b / tools / autograd / gen_python_functions . py <nl> <nl> # it ' s enough to just extend the list here . Before you do this , make sure <nl> # to add an appropriate wrap ( ) overload in torch / csrc / autograd / utils / wrap_outputs . h . <nl> SUPPORTED_RETURN_TYPES = { <nl> - ' Tensor ' , ' std : : tuple < Tensor , Tensor > ' , <nl> - ' std : : tuple < Tensor , Tensor , double , int64_t > ' , <nl> + ' Tensor ' , <nl> + ' std : : tuple < Tensor , Tensor > ' , <nl> ' std : : tuple < Tensor , Tensor , Tensor > ' , <nl> ' std : : tuple < Tensor , Tensor , Tensor , Tensor > ' , <nl> ' std : : tuple < Tensor , Tensor , Tensor , Tensor , Tensor > ' , <nl> + ' std : : tuple < Tensor , Tensor , Tensor , int64_t > ' , <nl> + ' std : : tuple < Tensor , Tensor , double , int64_t > ' , <nl> ' std : : vector < Tensor > ' , <nl> ' Scalar ' , ' bool ' , ' int64_t ' , ' void * ' , ' void ' <nl> } <nl> mmm a / torch / csrc / autograd / utils / wrap_outputs . h <nl> ppp b / torch / csrc / autograd / utils / wrap_outputs . h <nl> <nl> <nl> namespace torch { namespace autograd { namespace utils { <nl> <nl> + inline PyObject * wrap ( bool value ) { <nl> + if ( value ) { <nl> + Py_RETURN_TRUE ; <nl> + } else { <nl> + Py_RETURN_FALSE ; <nl> + } <nl> + } <nl> + <nl> + inline PyObject * wrap ( int64_t value ) { <nl> + return THPUtils_packInt64 ( value ) ; <nl> + } <nl> + <nl> + inline PyObject * wrap ( double value ) { <nl> + return PyFloat_FromDouble ( value ) ; <nl> + } <nl> + <nl> + inline PyObject * wrap ( std : : complex < double > value ) { <nl> + / / I could probably also use FromComplex with a reinterpret cast , <nl> + / / but . . . eh . <nl> + return PyComplex_FromDoubles ( value . real ( ) , value . imag ( ) ) ; <nl> + } <nl> + <nl> + inline PyObject * wrap ( void * value ) { <nl> + return THPUtils_packInt64 ( reinterpret_cast < intptr_t > ( value ) ) ; <nl> + } <nl> + <nl> + inline PyObject * wrap ( THPDtype * dtype ) { <nl> + Py_INCREF ( dtype ) ; <nl> + return ( PyObject * ) dtype ; <nl> + } <nl> + <nl> + inline PyObject * wrap ( THPLayout * layout ) { <nl> + Py_INCREF ( layout ) ; <nl> + return ( PyObject * ) layout ; <nl> + } <nl> + <nl> inline PyObject * wrap ( at : : Tensor tensor ) { <nl> return THPVariable_Wrap ( Variable ( std : : move ( tensor ) ) ) ; <nl> } <nl> <nl> + inline PyObject * wrap ( at : : Scalar scalar ) { <nl> + return wrap ( scalar_to_tensor ( scalar ) ) ; <nl> + } <nl> + <nl> inline PyObject * wrap ( std : : tuple < at : : Tensor , at : : Tensor > tensors ) { <nl> auto r = THPObjectPtr { PyTuple_New ( 2 ) } ; <nl> if ( ! r ) throw python_error ( ) ; <nl> inline PyObject * wrap ( PyTypeObject * type , std : : tuple < at : : Tensor , at : : Tensor , at : <nl> return r . release ( ) ; <nl> } <nl> <nl> - inline PyObject * wrap ( std : : tuple < at : : Tensor , at : : Tensor , at : : Tensor , at : : Tensor > tensors ) { <nl> + inline PyObject * wrap ( std : : tuple < at : : Tensor , at : : Tensor , at : : Tensor , int64_t > tensors ) { <nl> auto r = THPObjectPtr { PyTuple_New ( 4 ) } ; <nl> if ( ! r ) throw python_error ( ) ; <nl> PyTuple_SET_ITEM ( r . get ( ) , 0 , wrap ( std : : move ( std : : get < 0 > ( tensors ) ) ) ) ; <nl> PyTuple_SET_ITEM ( r . get ( ) , 1 , wrap ( std : : move ( std : : get < 1 > ( tensors ) ) ) ) ; <nl> PyTuple_SET_ITEM ( r . get ( ) , 2 , wrap ( std : : move ( std : : get < 2 > ( tensors ) ) ) ) ; <nl> - PyTuple_SET_ITEM ( r . get ( ) , 3 , wrap ( std : : move ( std : : get < 3 > ( tensors ) ) ) ) ; <nl> + PyTuple_SET_ITEM ( r . get ( ) , 3 , wrap ( std : : get < 3 > ( tensors ) ) ) ; <nl> return r . release ( ) ; <nl> } <nl> <nl> - inline PyObject * wrap ( std : : tuple < at : : Tensor , at : : Tensor , at : : Tensor , at : : Tensor , at : : Tensor > tensors ) { <nl> - auto r = THPObjectPtr { PyTuple_New ( 5 ) } ; <nl> + inline PyObject * wrap ( std : : tuple < at : : Tensor , at : : Tensor , float , int64_t > tensors ) { <nl> + auto r = THPObjectPtr { PyTuple_New ( 4 ) } ; <nl> if ( ! r ) throw python_error ( ) ; <nl> PyTuple_SET_ITEM ( r . get ( ) , 0 , wrap ( std : : move ( std : : get < 0 > ( tensors ) ) ) ) ; <nl> PyTuple_SET_ITEM ( r . get ( ) , 1 , wrap ( std : : move ( std : : get < 1 > ( tensors ) ) ) ) ; <nl> PyTuple_SET_ITEM ( r . get ( ) , 2 , wrap ( std : : move ( std : : get < 2 > ( tensors ) ) ) ) ; <nl> PyTuple_SET_ITEM ( r . get ( ) , 3 , wrap ( std : : move ( std : : get < 3 > ( tensors ) ) ) ) ; <nl> - PyTuple_SET_ITEM ( r . get ( ) , 4 , wrap ( std : : move ( std : : get < 4 > ( tensors ) ) ) ) ; <nl> return r . release ( ) ; <nl> } <nl> <nl> - inline PyObject * wrap ( at : : TensorList tl ) { <nl> - auto r = THPObjectPtr { PyTuple_New ( tl . size ( ) ) } ; <nl> + inline PyObject * wrap ( std : : tuple < at : : Tensor , at : : Tensor , at : : Tensor , at : : Tensor > tensors ) { <nl> + auto r = THPObjectPtr { PyTuple_New ( 4 ) } ; <nl> if ( ! r ) throw python_error ( ) ; <nl> - for ( size_t i = 0 ; i < tl . size ( ) ; + + i ) { <nl> - PyTuple_SET_ITEM ( r . get ( ) , i , wrap ( tl [ i ] ) ) ; <nl> - } <nl> + PyTuple_SET_ITEM ( r . get ( ) , 0 , wrap ( std : : move ( std : : get < 0 > ( tensors ) ) ) ) ; <nl> + PyTuple_SET_ITEM ( r . get ( ) , 1 , wrap ( std : : move ( std : : get < 1 > ( tensors ) ) ) ) ; <nl> + PyTuple_SET_ITEM ( r . get ( ) , 2 , wrap ( std : : move ( std : : get < 2 > ( tensors ) ) ) ) ; <nl> + PyTuple_SET_ITEM ( r . get ( ) , 3 , wrap ( std : : move ( std : : get < 3 > ( tensors ) ) ) ) ; <nl> return r . release ( ) ; <nl> } <nl> <nl> - inline PyObject * wrap ( bool value ) { <nl> - if ( value ) { <nl> - Py_RETURN_TRUE ; <nl> - } else { <nl> - Py_RETURN_FALSE ; <nl> - } <nl> - } <nl> - <nl> - inline PyObject * wrap ( int64_t value ) { <nl> - return THPUtils_packInt64 ( value ) ; <nl> - } <nl> - <nl> - inline PyObject * wrap ( double value ) { <nl> - return PyFloat_FromDouble ( value ) ; <nl> - } <nl> - <nl> - inline PyObject * wrap ( std : : complex < double > value ) { <nl> - / / I could probably also use FromComplex with a reinterpret cast , <nl> - / / but . . . eh . <nl> - return PyComplex_FromDoubles ( value . real ( ) , value . imag ( ) ) ; <nl> - } <nl> - <nl> - inline PyObject * wrap ( void * value ) { <nl> - return THPUtils_packInt64 ( reinterpret_cast < intptr_t > ( value ) ) ; <nl> - } <nl> - <nl> - inline PyObject * wrap ( at : : Scalar scalar ) { <nl> - return wrap ( scalar_to_tensor ( scalar ) ) ; <nl> - } <nl> - <nl> - inline PyObject * wrap ( std : : tuple < at : : Tensor , at : : Tensor , float , int64_t > tensors ) { <nl> - auto r = THPObjectPtr { PyTuple_New ( 4 ) } ; <nl> + inline PyObject * wrap ( std : : tuple < at : : Tensor , at : : Tensor , at : : Tensor , at : : Tensor , at : : Tensor > tensors ) { <nl> + auto r = THPObjectPtr { PyTuple_New ( 5 ) } ; <nl> if ( ! r ) throw python_error ( ) ; <nl> PyTuple_SET_ITEM ( r . get ( ) , 0 , wrap ( std : : move ( std : : get < 0 > ( tensors ) ) ) ) ; <nl> PyTuple_SET_ITEM ( r . get ( ) , 1 , wrap ( std : : move ( std : : get < 1 > ( tensors ) ) ) ) ; <nl> PyTuple_SET_ITEM ( r . get ( ) , 2 , wrap ( std : : move ( std : : get < 2 > ( tensors ) ) ) ) ; <nl> PyTuple_SET_ITEM ( r . get ( ) , 3 , wrap ( std : : move ( std : : get < 3 > ( tensors ) ) ) ) ; <nl> + PyTuple_SET_ITEM ( r . get ( ) , 4 , wrap ( std : : move ( std : : get < 4 > ( tensors ) ) ) ) ; <nl> return r . release ( ) ; <nl> } <nl> <nl> - inline PyObject * wrap ( THPDtype * dtype ) { <nl> - Py_INCREF ( dtype ) ; <nl> - return ( PyObject * ) dtype ; <nl> - } <nl> - <nl> - inline PyObject * wrap ( THPLayout * layout ) { <nl> - Py_INCREF ( layout ) ; <nl> - return ( PyObject * ) layout ; <nl> + inline PyObject * wrap ( at : : TensorList tl ) { <nl> + auto r = THPObjectPtr { PyTuple_New ( tl . size ( ) ) } ; <nl> + if ( ! r ) throw python_error ( ) ; <nl> + for ( size_t i = 0 ; i < tl . size ( ) ; + + i ) { <nl> + PyTuple_SET_ITEM ( r . get ( ) , i , wrap ( tl [ i ] ) ) ; <nl> + } <nl> + return r . release ( ) ; <nl> } <nl> <nl> } } } / / namespace torch : : autograd : : utils <nl> mmm a / torch / csrc / jit / constants . cpp <nl> ppp b / torch / csrc / jit / constants . cpp <nl> RegisterOperators reg ( { <nl> return 0 ; <nl> } ; <nl> } else if ( type - > isSubtypeOf ( ListType : : ofBools ( ) ) ) { <nl> - const auto & int_list = node - > is ( attr : : value ) ; <nl> - const std : : vector < bool > bs ( int_list . begin ( ) , int_list . end ( ) ) ; <nl> + const auto bs = fmap < bool > ( node - > is ( attr : : value ) ) ; <nl> return [ bs ] ( Stack & stack ) { <nl> push ( stack , bs ) ; <nl> return 0 ; <nl> mmm a / torch / csrc / jit / register_prim_ops . cpp <nl> ppp b / torch / csrc / jit / register_prim_ops . cpp <nl> RegisterOperators reg ( { <nl> return 0 ; <nl> } ; <nl> } ) , <nl> - <nl> Operator ( <nl> " prim : : Bool ( Tensor a ) - > bool " , <nl> [ ] ( const Node * node ) - > Operation { <nl> mmm a / torch / csrc / jit / symbolic_script . cpp <nl> ppp b / torch / csrc / jit / symbolic_script . cpp <nl> const std : : vector < std : : string > functions = { <nl> <nl> return torch . _adaptive_avg_pool2d ( self , output_size ) , backward <nl> <nl> + def batch_norm ( input : Tensor , <nl> + weight : Optional [ Tensor ] , <nl> + bias : Optional [ Tensor ] , <nl> + running_mean : Optional [ Tensor ] , <nl> + running_var : Optional [ Tensor ] , <nl> + training : bool , <nl> + momentum : float , <nl> + eps : float , <nl> + cudnn_enabled : bool ) : <nl> + <nl> + output , save1 , save2 , impl_idx = torch . _batch_norm_impl_index ( <nl> + input , weight , bias , running_mean , running_var , training , <nl> + momentum , eps , cudnn_enabled ) <nl> + has_weight = weight is not None <nl> + has_bias = bias is not None <nl> + <nl> + def backward ( grad_output ) : <nl> + dinput , dweight , dbias = torch . _batch_norm_impl_index_backward ( <nl> + impl_idx , input , grad_output , weight , running_mean , running_var , <nl> + save1 , save2 , training , eps , [ True , has_weight , has_bias ] ) <nl> + return dinput , dweight , dbias , None , None , None , None , None , None <nl> + <nl> + return output , backward <nl> + <nl> def embedding ( weight , <nl> indices , <nl> padding_idx : int , <nl> | Add special ops for BatchNorm symbolic differentiation ( ) | pytorch/pytorch | 7157be86226b79e3a698cbb0c524fc3ae59e28de | 2019-02-15T23:40:28Z |
mmm a / src / mongo / shell / dbshell . cpp <nl> ppp b / src / mongo / shell / dbshell . cpp <nl> int _main ( int argc , char * argv [ ] , char * * envp ) { <nl> # ifndef _WIN32 <nl> rcGlobalLocation = " / etc / mongorc . js " ; <nl> # else <nl> - wchar_t programDataPath [ MAX_PATH ] ; <nl> - if ( S_OK = = SHGetFolderPathW ( NULL , <nl> + wchar_t programDataPath [ MAX_PATH ] ; <nl> + if ( S_OK = = SHGetFolderPathW ( NULL , <nl> CSIDL_COMMON_APPDATA , <nl> NULL , <nl> 0 , <nl> | Whitespace - - tabs to spaces | mongodb/mongo | 8717795894c20ecd0fd4eb92f23c4ce0f295683d | 2013-06-24T18:06:40Z |
mmm a / xbmc / interfaces / json - rpc / FileOperations . cpp <nl> ppp b / xbmc / interfaces / json - rpc / FileOperations . cpp <nl> JSONRPC_STATUS CFileOperations : : SetFileDetails ( const std : : string & method , ITrans <nl> CVideoLibrary : : UpdateResumePoint ( parameterObject , infos , videodatabase ) ; <nl> <nl> videodatabase . GetFileInfo ( " " , infos , fileId ) ; <nl> - CJSONRPCUtils : : NotifyItemUpdated ( infos ) ; <nl> + CJSONRPCUtils : : NotifyItemUpdated ( infos , std : : map < std : : string , std : : string > { } ) ; <nl> return ACK ; <nl> } <nl> <nl> mmm a / xbmc / interfaces / json - rpc / JSONRPCUtils . h <nl> ppp b / xbmc / interfaces / json - rpc / JSONRPCUtils . h <nl> namespace JSONRPC <nl> CGUIMessage message ( GUI_MSG_NOTIFY_ALL , 0 , 0 , GUI_MSG_UPDATE , CServiceBroker : : GetGUI ( ) - > GetWindowManager ( ) . GetActiveWindow ( ) ) ; <nl> CServiceBroker : : GetGUI ( ) - > GetWindowManager ( ) . SendThreadMessage ( message ) ; <nl> } <nl> - static inline void NotifyItemUpdated ( const CVideoInfoTag & info ) <nl> + static inline void NotifyItemUpdated ( const CVideoInfoTag & info , <nl> + const std : : map < std : : string , std : : string > & artwork ) <nl> { <nl> CFileItemPtr msgItem ( new CFileItem ( info ) ) ; <nl> + if ( ! artwork . empty ( ) ) <nl> + msgItem - > SetArt ( artwork ) ; <nl> CGUIMessage message ( GUI_MSG_NOTIFY_ALL , CServiceBroker : : GetGUI ( ) - > GetWindowManager ( ) . GetActiveWindow ( ) , 0 , GUI_MSG_UPDATE_ITEM , 0 , msgItem ) ; <nl> CServiceBroker : : GetGUI ( ) - > GetWindowManager ( ) . SendThreadMessage ( message ) ; <nl> } <nl> mmm a / xbmc / interfaces / json - rpc / VideoLibrary . cpp <nl> ppp b / xbmc / interfaces / json - rpc / VideoLibrary . cpp <nl> JSONRPC_STATUS CVideoLibrary : : SetMovieDetails ( const std : : string & method , ITransp <nl> <nl> UpdateResumePoint ( parameterObject , infos , videodatabase ) ; <nl> <nl> - CJSONRPCUtils : : NotifyItemUpdated ( infos ) ; <nl> + CJSONRPCUtils : : NotifyItemUpdated ( infos , artwork ) ; <nl> return ACK ; <nl> } <nl> <nl> mmm a / xbmc / interfaces / json - rpc / schema / version . txt <nl> ppp b / xbmc / interfaces / json - rpc / schema / version . txt <nl> @ @ - 1 + 1 @ @ <nl> - JSONRPC_VERSION 11 . 2 . 0 <nl> + JSONRPC_VERSION 11 . 2 . 1 <nl> | Merge pull request from enen92 / jsonfixart | xbmc/xbmc | 8fa28e5f1161ae83353371dda1ba645de2eccfb8 | 2020-02-02T19:11:04Z |
mmm a / hphp / runtime / base / execution - context . h <nl> ppp b / hphp / runtime / base / execution - context . h <nl> struct VMState { <nl> MInstrState mInstrState ; <nl> ActRec * jitCalledFrame ; <nl> jit : : TCA jitReturnAddr ; <nl> - ObjectData * exn ; <nl> + Either < ObjectData * , Exception * > exn ; <nl> bool unwinderSideEnter ; <nl> } ; <nl> <nl> mmm a / hphp / runtime / vm / jit / unique - stubs . cpp <nl> ppp b / hphp / runtime / vm / jit / unique - stubs . cpp <nl> namespace { <nl> <nl> [ [ noreturn ] ] static void throw_exception_while_unwinding ( ) { <nl> assert_native_stack_aligned ( ) ; <nl> - assertx ( g_unwind_rds - > exn ! = nullptr ) ; <nl> - throw req : : root < Object > ( Object : : attach ( g_unwind_rds - > exn ) ) ; <nl> + assertx ( g_unwind_rds - > exn . left ( ) ) ; <nl> + throw req : : root < Object > ( Object : : attach ( g_unwind_rds - > exn . left ( ) ) ) ; <nl> } <nl> <nl> } / / namespace <nl> mmm a / hphp / runtime / vm / jit / unwind - itanium . cpp <nl> ppp b / hphp / runtime / vm / jit / unwind - itanium . cpp <nl> tc_unwind_personality ( int version , <nl> static_cast < InvalidSetMException * > ( exn ) ; <nl> __cxxabiv1 : : __cxa_begin_catch ( ue ) ; <nl> if ( tl_regState = = VMRegState : : DIRTY ) sync_regstate ( ip , context ) ; <nl> - g_unwind_rds - > exn = [ & ] ( ) - > ObjectData * { <nl> + g_unwind_rds - > exn = [ & ] ( ) - > Either < ObjectData * , Exception * > { <nl> if ( ti = = typeid ( Object ) ) return static_cast < Object * > ( exn ) - > get ( ) ; <nl> if ( ti = = typeid ( req : : root < Object > ) ) { <nl> return static_cast < req : : root < Object > * > ( exn ) - > get ( ) ; <nl> } <nl> + if ( ! ism ) return static_cast < Exception * > ( exn ) ; <nl> return nullptr ; <nl> } ( ) ; <nl> - assertx ( ! g_unwind_rds - > exn | | g_unwind_rds - > exn - > kindIsValid ( ) ) ; <nl> + assertx ( ( g_unwind_rds - > exn . left ( ) & & <nl> + g_unwind_rds - > exn . left ( ) - > kindIsValid ( ) ) | | <nl> + g_unwind_rds - > exn . right ( ) | | <nl> + ( g_unwind_rds - > exn . isNull ( ) & & ism ) ) ; <nl> <nl> auto const tv = ism ? ism - > tv ( ) : TypedValue { } ; <nl> <nl> TCUnwindInfo tc_unwind_resume ( ActRec * fp ) { <nl> auto savedRip = reinterpret_cast < TCA > ( fp - > m_savedRip ) ; <nl> <nl> tl_regState = VMRegState : : CLEAN ; <nl> - if ( g_unwind_rds - > exn ) lockObjectWhileUnwinding ( vmpc ( ) , vmStack ( ) ) ; <nl> + if ( g_unwind_rds - > exn . left ( ) ) lockObjectWhileUnwinding ( vmpc ( ) , vmStack ( ) ) ; <nl> <nl> if ( savedRip = = tc : : ustubs ( ) . callToExit ) { <nl> / / If we ' re the top VM frame , there ' s nothing we need to do ; we can just <nl> TCUnwindInfo tc_unwind_resume ( ActRec * fp ) { <nl> " by throwing \ n " ) ; <nl> / / Looks like we got here having skipped itanium unwinder , lets enter <nl> g_unwind_rds - > sideEnter = false ; <nl> - assertx ( g_unwind_rds - > exn ) ; <nl> + / / We can only side enter with a PHP exception <nl> + assertx ( g_unwind_rds - > exn . left ( ) ) ; <nl> return { tc : : ustubs ( ) . throwExceptionWhileUnwinding , sfp } ; <nl> } <nl> ITRACE ( 1 , " top VM frame , passing back to _Unwind_Resume \ n " ) ; <nl> TCUnwindInfo tc_unwind_resume ( ActRec * fp ) { <nl> fp , fp - > func ( ) - > fullName ( ) , sfp , sfp - > func ( ) - > fullName ( ) ) ; <nl> <nl> / / Unwind vm stack to sfp <nl> - if ( g_unwind_rds - > exn ) { <nl> + if ( g_unwind_rds - > exn . left ( ) ) { <nl> auto const result = unwindVM ( g_unwind_rds - > exn , sfp ) ; <nl> if ( ! ( result & UnwindReachedGoal ) ) { <nl> if ( ! g_unwind_rds - > sideEnter ) __cxxabiv1 : : __cxa_end_catch ( ) ; <nl> mmm a / hphp / runtime / vm / jit / unwind - itanium . h <nl> ppp b / hphp / runtime / vm / jit / unwind - itanium . h <nl> <nl> # include " hphp / runtime / vm / jit / types . h " <nl> <nl> # include " hphp / util / asm - x64 . h " <nl> + # include " hphp / util / either . h " <nl> <nl> # include < cstddef > <nl> <nl> namespace jit { <nl> * Used to pass values between unwinder code and catch traces . <nl> * / <nl> struct UnwindRDS { <nl> - / * PHP exception or nullptr if C + + / SetM exception * / <nl> - ObjectData * exn ; <nl> + / * PHP or C + + exception , nullptr if SetM exception * / <nl> + Either < ObjectData * , Exception * > exn ; <nl> <nl> / * Some helpers need to signal an error along with a TypedValue to be pushed <nl> * on the eval stack . When present , that value lives here . * / <nl> | Convert g_unwind_rds - > exn from ObjectData * to Either < ObjectData * , Exception * > | facebook/hhvm | b1582c01ab6dbe4f8f46f09aaa55e07328e0c577 | 2020-02-04T18:11:04Z |
mmm a / drivers / javascript / rethinkdb / query / expression . js <nl> ppp b / drivers / javascript / rethinkdb / query / expression . js <nl> goog . exportProperty ( rethinkdb . query . Expression . prototype , ' filter ' , <nl> / * * <nl> * Map a function over a list or a stream <nl> * / <nl> - rethinkdb . query . Expression . prototype . map = function ( mapping ) { <nl> - var mappingFunction ; <nl> - if ( mapping instanceof rethinkdb . query . FunctionExpression ) { <nl> - mappingFunction = mapping ; <nl> - } else if ( mapping instanceof rethinkdb . query . Expression ) { <nl> - mappingFunction = rethinkdb . query . fn ( ' ' , mapping ) ; <nl> - } else if ( typeof mapping = = = ' function ' ) { <nl> - mappingFunction = rethinkdb . query . fn ( mapping ) ; <nl> - } else { <nl> - / / invalid mapping <nl> - } <nl> - <nl> + rethinkdb . query . Expression . prototype . map = function ( mapFun ) { <nl> + mapFun = functionWrap_ ( mapFun ) ; <nl> return new rethinkdb . query . BuiltinExpression ( Builtin . BuiltinType . MAP , [ this ] , <nl> function ( builtin ) { <nl> var mapping = new Mapping ( ) ; <nl> - mapping . setArg ( mappingFunction . args [ 0 ] ) ; <nl> - mapping . setBody ( mappingFunction . body . compile ( ) ) ; <nl> + mapping . setArg ( mapFun . args [ 0 ] ) ; <nl> + mapping . setBody ( mapFun . body . compile ( ) ) ; <nl> <nl> var map = new Builtin . Map ( ) ; <nl> map . setMapping ( mapping ) ; <nl> goog . exportProperty ( rethinkdb . query . Expression . prototype , ' distinct ' , <nl> * @ param { rethinkdb . query . FunctionExpression | function ( . . . ) } reduce <nl> * / <nl> rethinkdb . query . Expression . prototype . reduce = function ( base , reduce ) { <nl> - if ( ! ( base instanceof rethinkdb . query . Expression ) ) { <nl> - base = rethinkdb . query . expr ( base ) ; <nl> - } <nl> - <nl> - if ( ! ( reduce instanceof rethinkdb . query . FunctionExpression ) ) { <nl> - if ( typeof reduce = = = ' function ' ) { <nl> - reduce = new rethinkdb . query . JSFunctionExpression ( reduce ) ; <nl> - } else { <nl> - throw TypeError ( ' reduce argument expected to be a function ' ) ; <nl> - } <nl> - } <nl> + base = wrapIf_ ( base ) ; <nl> + reduce = functionWrap_ ( reduce ) ; <nl> <nl> return new rethinkdb . query . BuiltinExpression ( Builtin . BuiltinType . REDUCE , [ this ] , <nl> function ( builtin ) { <nl> goog . exportProperty ( rethinkdb . query . Expression . prototype , ' extend ' , <nl> / * * <nl> * Apply mapping and then concat to an array . <nl> * / <nl> - rethinkdb . query . Expression . prototype . concatMap = function ( mapping ) { <nl> - var mappingFunction ; <nl> - if ( mapping instanceof rethinkdb . query . FunctionExpression ) { <nl> - mappingFunction = mapping ; <nl> - } else if ( mapping instanceof rethinkdb . query . Expression ) { <nl> - mappingFunction = rethinkdb . query . fn ( ' ' , mapping ) ; <nl> - } else { <nl> - / / invalid mapping <nl> - } <nl> - <nl> + rethinkdb . query . Expression . prototype . concatMap = function ( mapFun ) { <nl> + mapFun = functionWrap_ ( mapFun ) ; <nl> return new rethinkdb . query . BuiltinExpression ( Builtin . BuiltinType . CONCATMAP , [ this ] , <nl> function ( builtin ) { <nl> var mapping = new Mapping ( ) ; <nl> - mapping . setArg ( mappingFunction . args [ 0 ] ) ; <nl> - mapping . setBody ( mappingFunction . body . compile ( ) ) ; <nl> + mapping . setArg ( mapFun . args [ 0 ] ) ; <nl> + mapping . setBody ( mapFun . body . compile ( ) ) ; <nl> <nl> var concatmap = new Builtin . ConcatMap ( ) ; <nl> concatmap . setMapping ( mapping ) ; <nl> mmm a / drivers / javascript / rethinkdb / query / querybase . js <nl> ppp b / drivers / javascript / rethinkdb / query / querybase . js <nl> function wrapIf_ ( val ) { <nl> } else { <nl> return rethinkdb . query ( val ) ; <nl> } <nl> - } ; <nl> + } <nl> + <nl> + / * * <nl> + * Internal utility for wrapping API function arguments that <nl> + * are expected to be function expressions . <nl> + * / <nl> + function functionWrap_ ( fun ) { <nl> + if ( fun instanceof rethinkdb . query . FunctionExpression ) { <nl> + / / No wrap needed <nl> + } else if ( fun instanceof rethinkdb . query . Expression ) { <nl> + fun = rethinkdb . query . fn ( ' ' , fun ) ; <nl> + } else if ( typeof fun = = = ' function ' ) { <nl> + fun = rethinkdb . query . fn ( fun ) ; <nl> + } else { <nl> + throw TypeError ( " Argument expected to be a function expression " ) ; <nl> + } <nl> + <nl> + return fun ; <nl> + } <nl> mmm a / drivers / javascript / rethinkdb / query / table . js <nl> ppp b / drivers / javascript / rethinkdb / query / table . js <nl> rethinkdb . query . UpdateQuery . prototype . buildQuery = function ( ) { <nl> * Updates all rows according to the given mapping function <nl> * / <nl> rethinkdb . query . Expression . prototype . update = function ( mapping ) { <nl> - var mappingFunction = null ; <nl> - if ( mapping instanceof rethinkdb . query . FunctionExpression ) { <nl> - mappingFunction = mapping ; <nl> - } else if ( mapping instanceof rethinkdb . query . Expression ) { <nl> - mappingFunction = rethinkdb . query . fn ( ' ' , mapping ) ; <nl> - } else if ( typeof mapping = = = ' function ' ) { <nl> - mappingFunction = rethinkdb . query . fn ( mapping ) ; <nl> - } else { <nl> - / / invalid mapping <nl> - } <nl> - <nl> - return new rethinkdb . query . UpdateQuery ( this , mappingFunction ) ; <nl> + mapping = functionWrap_ ( mapping ) ; <nl> + return new rethinkdb . query . UpdateQuery ( this , mapping ) ; <nl> } ; <nl> goog . exportProperty ( rethinkdb . query . Expression . prototype , ' update ' , <nl> rethinkdb . query . Expression . prototype . update ) ; <nl> rethinkdb . query . MutateQuery . prototype . buildQuery = function ( ) { <nl> * Replcaces each row the the result of the mapping expression <nl> * / <nl> rethinkdb . query . Expression . prototype . mutate = function ( mapping ) { <nl> - var mappingFunction = null ; <nl> - if ( mapping instanceof rethinkdb . query . FunctionExpression ) { <nl> - mappingFunction = mapping ; <nl> - } else if ( mapping instanceof rethinkdb . query . Expression ) { <nl> - mappingFunction = rethinkdb . query . fn ( ' ' , mapping ) ; <nl> - } else if ( typeof mapping = = = ' function ' ) { <nl> - mappingFunction = rethinkdb . query . fn ( mapping ) ; <nl> - } else { <nl> - / / invalid mapping <nl> - } <nl> - <nl> - return new rethinkdb . query . MutateQuery ( this , mappingFunction ) ; <nl> + mapping = functionWrap_ ( mapping ) ; <nl> + return new rethinkdb . query . MutateQuery ( this , mapping ) ; <nl> } ; <nl> goog . exportProperty ( rethinkdb . query . Expression . prototype , ' mutate ' , <nl> rethinkdb . query . Expression . prototype . mutate ) ; <nl> mmm a / drivers / javascript / rethinkdb / test . js <nl> ppp b / drivers / javascript / rethinkdb / test . js <nl> function testJS ( ) { <nl> * / <nl> } <nl> <nl> + function testBetween ( ) { <nl> + tab . between ( 2 , 3 ) . length ( ) . run ( aeq ( 2 ) ) ; <nl> + } <nl> + <nl> function testUpdate1 ( ) { <nl> tab . update ( function ( a ) { <nl> a . updated = true ; <nl> runTests ( [ <nl> testTabMap , <nl> testTabReduce , <nl> testJS , <nl> + testBetween , <nl> testUpdate1 , <nl> testUpdate2 , <nl> testMutate1 , <nl> | js client adds function argument wrapper | rethinkdb/rethinkdb | 4305e6c472775a6b74a24b37454c0efe86394240 | 2012-09-04T18:05:00Z |
mmm a / configure . ac <nl> ppp b / configure . ac <nl> case $ host in <nl> AC_CHECK_LIB ( [ iphlpapi ] , [ main ] , , AC_MSG_ERROR ( lib missing ) ) <nl> AC_CHECK_LIB ( [ crypt32 ] , [ main ] , , AC_MSG_ERROR ( lib missing ) ) <nl> <nl> + AX_CHECK_LINK_FLAG ( [ [ - static ] ] , [ LDFLAGS = " $ LDFLAGS - static " ] ) <nl> AX_CHECK_LINK_FLAG ( [ [ - static - libgcc ] ] , [ LDFLAGS = " $ LDFLAGS - static - libgcc " ] ) <nl> AX_CHECK_LINK_FLAG ( [ [ - static - libstdc + + ] ] , [ LDFLAGS = " $ LDFLAGS - static - libstdc + + " ] ) <nl> <nl> | build : Add - static for mingw builds | bitcoin/bitcoin | 6ac0b3be2d5c5277805c16b56ee5b2e59ba9e84c | 2014-01-22T09:21:34Z |
mmm a / CHANGELOG . md <nl> ppp b / CHANGELOG . md <nl> <nl> <nl> # # ClickHouse release 20 . 6 <nl> <nl> - # # # ClickHouse release v20 . 6 . 2 . 15 - prestable FIXME as compared to v20 . 5 . 4 . 40 - stable <nl> + # # ClickHouse release v20 . 6 . 2 . 15 - prestable FIXME as compared to v20 . 5 . 4 . 40 - stable <nl> <nl> # # # # New Feature <nl> <nl> <nl> * Install ` ca - certificates ` before the first ` apt - get update ` in Dockerfile . [ # 12095 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12095 ) ( [ Ivan Blinkov ] ( https : / / github . com / blinkov ) ) . <nl> * Add new type of tests based on Testflows framework . [ # 12090 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12090 ) ( [ vzakaznikov ] ( https : / / github . com / vzakaznikov ) ) . <nl> <nl> - # # # # NO CL ENTRY <nl> - <nl> - * NO CL ENTRY : ' Backport [ # 12700 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12700 ) to 20 . 6 : Fix performance with large tuples ' . [ # 13187 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 13187 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 13075 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 13075 ) to 20 . 6 : Fix 11327 ' . [ # 13184 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 13184 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 13081 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 13081 ) to 20 . 6 : Fix wrong index analysis with functions ' . [ # 13146 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 13146 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 13009 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 13009 ) to 20 . 6 : Fix 12623 ' . [ # 13051 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 13051 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12989 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12989 ) to 20 . 6 : Fix ` Block structure mismatch ` error for queries with UNION and JOIN ' . [ # 13049 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 13049 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12982 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12982 ) to 20 . 6 : Merging [ # 12548 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12548 ) - Correction to ` merge_with_ttl_timeout ` logic by @ excitoon ' . [ # 13031 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 13031 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12760 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12760 ) to 20 . 6 : Sticking mutations bug ' . [ # 13011 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 13011 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12664 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12664 ) to 20 . 6 : Fix move_functions_out_of_any optimisation with lambda ' . [ # 12994 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12994 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12658 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12658 ) to 20 . 6 : Fix SIGSEGV in StorageKafka when broker is unavailable ' . [ # 12971 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12971 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12663 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12663 ) to 20 . 6 : ISSUES - 12293 allow push predicate when subquery contains with clause ' . [ # 12868 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12868 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12672 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12672 ) to 20 . 6 : Fix memory tracking for input_format_parallel_parsing ' . [ # 12864 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12864 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12492 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12492 ) to 20 . 6 : Make read buffer size lower , while reading from compact parts ' . [ # 12862 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12862 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12659 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12659 ) to 20 . 6 : ISSUES - 10572 fix bloom filter with const column ' . [ # 12858 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12858 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12480 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12480 ) to 20 . 6 : [ RFC ] Fix SETTINGS parse after FORMAT ' . [ # 12803 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12803 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12458 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12458 ) to 20 . 6 : Fix crash in ' JOIN dict ON expr ( dict_key ) = k ' ' . [ # 12725 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12725 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12302 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12302 ) to 20 . 6 : kafka : fix SIGSEGV if there is a message with error in the middle of the batch ' . [ # 12724 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12724 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12382 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12382 ) to 20 . 6 : Better errors for CLEAR / DROP columns ( possibly in partitions ) ' . [ # 12723 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12723 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12519 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12519 ) to 20 . 6 : Fixing race condition in live view tables which could cause data duplication and live view tests ' . [ # 12722 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12722 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12545 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12545 ) to 20 . 6 : Fix loading parts without checksums ' . [ # 12721 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12721 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12624 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12624 ) to 20 . 6 : Fix error message about granularity ' . [ # 12720 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12720 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12618 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12618 ) to 20 . 6 : Fix external sort pipeline stuck ' . [ # 12699 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12699 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12633 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12633 ) to 20 . 6 : Fix " There is no supertype " error on ALTER UPDATE [ # 7306 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 7306 ) ' . [ # 12698 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12698 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12648 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12648 ) to 20 . 6 : Add support for function if with Array ( UUID ) arguments ' . [ # 12697 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12697 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12625 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12625 ) to 20 . 6 : Better exception during directories creation ' . [ # 12696 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12696 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - * NO CL ENTRY : ' Backport [ # 12613 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 12613 ) to 20 . 6 : Remove sort description from streams ' . [ # 12695 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12695 ) ( [ robot - clickhouse ] ( https : / / github . com / robot - clickhouse ) ) . <nl> - <nl> # # # # NO CL CATEGORY <nl> <nl> * . . . [ # 12431 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12431 ) ( [ Tom Bombadil ] ( https : / / github . com / ithangzhou ) ) . <nl> * * Not for changelog . [ # 12265 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 12265 ) ( [ Alexander Kuzmenkov ] ( https : / / github . com / akuzm ) ) . <nl> <nl> - <nl> - <nl> # # ClickHouse release 20 . 5 <nl> <nl> # # # ClickHouse release v20 . 5 . 2 . 7 - stable 2020 - 07 - 02 <nl> | Update CHANGELOG . md | ClickHouse/ClickHouse | 7315eac887c622f6df2e2f90c3e2f1bd076d78cd | 2020-08-07T13:19:51Z |
mmm a / src / core / lib / debug / stats . c <nl> ppp b / src / core / lib / debug / stats . c <nl> void grpc_stats_diff ( const grpc_stats_data * b , const grpc_stats_data * a , <nl> } <nl> <nl> int grpc_stats_histo_find_bucket_slow ( grpc_exec_ctx * exec_ctx , double value , <nl> - const double * table , int table_size ) { <nl> + const int * table , int table_size ) { <nl> GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS ( exec_ctx ) ; <nl> if ( value < 0 . 0 ) return 0 ; <nl> if ( value > = table [ table_size - 1 ] ) return table_size - 1 ; <nl> size_t grpc_stats_histo_count ( const grpc_stats_data * stats , <nl> } <nl> <nl> static double threshold_for_count_below ( const gpr_atm * bucket_counts , <nl> - const double * bucket_boundaries , <nl> + const int * bucket_boundaries , <nl> int num_buckets , double count_below ) { <nl> double count_so_far ; <nl> double lower_bound ; <nl> char * grpc_stats_data_as_json ( const grpc_stats_data * data ) { <nl> gpr_asprintf ( & tmp , " ] , \ " % s_bkt \ " : [ " , grpc_stats_histogram_name [ i ] ) ; <nl> gpr_strvec_add ( & v , tmp ) ; <nl> for ( int j = 0 ; j < grpc_stats_histo_buckets [ i ] ; j + + ) { <nl> - gpr_asprintf ( & tmp , " % s % lf " , j = = 0 ? " " : " , " , <nl> + gpr_asprintf ( & tmp , " % s % d " , j = = 0 ? " " : " , " , <nl> grpc_stats_histo_bucket_boundaries [ i ] [ j ] ) ; <nl> gpr_strvec_add ( & v , tmp ) ; <nl> } <nl> mmm a / src / core / lib / debug / stats . h <nl> ppp b / src / core / lib / debug / stats . h <nl> void grpc_stats_diff ( const grpc_stats_data * b , const grpc_stats_data * a , <nl> grpc_stats_data * c ) ; <nl> char * grpc_stats_data_as_json ( const grpc_stats_data * data ) ; <nl> int grpc_stats_histo_find_bucket_slow ( grpc_exec_ctx * exec_ctx , double value , <nl> - const double * table , int table_size ) ; <nl> + const int * table , int table_size ) ; <nl> double grpc_stats_histo_percentile ( const grpc_stats_data * data , <nl> grpc_stats_histograms histogram , <nl> double percentile ) ; <nl> mmm a / src / core / lib / debug / stats_data . c <nl> ppp b / src / core / lib / debug / stats_data . c <nl> const char * grpc_stats_histogram_name [ GRPC_STATS_HISTOGRAM_COUNT ] = { <nl> " tcp_write_size " , " tcp_write_iov_size " , " tcp_read_size " , <nl> " tcp_read_offer " , " tcp_read_iov_size " , " http2_send_message_size " , <nl> } ; <nl> - const double grpc_stats_table_0 [ 64 ] = { 0 , <nl> - 1 , <nl> - 2 , <nl> - 3 , <nl> - 4 , <nl> - 5 . 17974600698 , <nl> - 6 . 70744217421 , <nl> - 8 . 68571170472 , <nl> - 11 . 2474451301 , <nl> - 14 . 5647272503 , <nl> - 18 . 8603969544 , <nl> - 24 . 4230164536 , <nl> - 31 . 6262554885 , <nl> - 40 . 9539926456 , <nl> - 53 . 032819969 , <nl> - 68 . 6741343683 , <nl> - 88 . 9286433193 , <nl> - 115 . 156946285 , <nl> - 149 . 120933174 , <nl> - 193 . 102139541 , <nl> - 250 . 055009057 , <nl> - 323 . 805358672 , <nl> - 419 . 307378404 , <nl> - 542 . 976429747 , <nl> - 703 . 119998467 , <nl> - 910 . 495751121 , <nl> - 1179 . 03418281 , <nl> - 1526 . 77440013 , <nl> - 1977 . 07590065 , <nl> - 2560 . 18775048 , <nl> - 3315 . 28056941 , <nl> - 4293 . 07782286 , <nl> - 5559 . 26317765 , <nl> - 7198 . 89281155 , <nl> - 9322 . 10907382 , <nl> - 12071 . 5393129 , <nl> - 15631 . 8768886 , <nl> - 20242 . 2879738 , <nl> - 26212 . 4775761 , <nl> - 33943 . 4940145 , <nl> - 43954 . 6693961 , <nl> - 56918 . 5058232 , <nl> - 73705 . 8508152 , <nl> - 95444 . 3966128 , <nl> - 123594 . 433061 , <nl> - 160046 . 942783 , <nl> - 207250 . 628202 , <nl> - 268376 . 403469 , <nl> - 347530 . 401059 , <nl> - 450029 . 801797 , <nl> - 582760 . 01722 , <nl> - 754637 . 218056 , <nl> - 977207 . 279236 , <nl> - 1265421 . 37565 , <nl> - 1638640 . 32942 , <nl> - 2121935 . 1758 , <nl> - 2747771 . 31348 , <nl> - 3558189 . 37227 , <nl> - 4607629 . 29828 , <nl> - 5966587 . 36485 , <nl> - 7726351 . 7696 , <nl> - 10005134 . 9318 , <nl> - 12956014 . 428 , <nl> - 16777216 . 0 } ; <nl> + const int grpc_stats_table_0 [ 64 ] = { <nl> + 0 , 1 , 2 , 3 , 4 , 6 , 8 , 11 , <nl> + 15 , 20 , 26 , 34 , 44 , 57 , 74 , 96 , <nl> + 124 , 160 , 206 , 265 , 341 , 439 , 565 , 727 , <nl> + 935 , 1202 , 1546 , 1988 , 2556 , 3286 , 4225 , 5432 , <nl> + 6983 , 8977 , 11540 , 14834 , 19069 , 24513 , 31510 , 40505 , <nl> + 52067 , 66929 , 86033 , 110590 , 142157 , 182734 , 234893 , 301940 , <nl> + 388125 , 498910 , 641316 , 824370 , 1059674 , 1362141 , 1750943 , 2250722 , <nl> + 2893155 , 3718960 , 4780478 , 6144988 , 7898976 , 10153611 , 13051794 , 16777216 } ; <nl> const uint8_t grpc_stats_table_1 [ 87 ] = { <nl> - 0 , 1 , 3 , 3 , 4 , 6 , 6 , 7 , 9 , 9 , 10 , 12 , 12 , 13 , 15 , 15 , 16 , 18 , <nl> - 18 , 19 , 21 , 21 , 22 , 24 , 24 , 25 , 27 , 27 , 28 , 30 , 30 , 31 , 32 , 34 , 34 , 36 , <nl> - 36 , 37 , 39 , 39 , 40 , 42 , 42 , 43 , 44 , 46 , 46 , 47 , 49 , 49 , 51 , 51 , 52 , 53 , <nl> - 55 , 55 , 56 , 58 , 58 , 59 , 61 , 61 , 63 , 63 , 64 , 65 , 67 , 67 , 68 , 70 , 70 , 71 , <nl> - 73 , 73 , 75 , 75 , 76 , 77 , 79 , 79 , 80 , 82 , 82 , 83 , 85 , 85 , 87 } ; <nl> - const double grpc_stats_table_2 [ 64 ] = { 0 , <nl> - 1 , <nl> - 2 , <nl> - 3 , <nl> - 4 , <nl> - 5 , <nl> - 6 , <nl> - 7 , <nl> - 8 , <nl> - 9 , <nl> - 10 , <nl> - 11 , <nl> - 12 . 0020736244 , <nl> - 13 . 0954337532 , <nl> - 14 . 2883963681 , <nl> - 15 . 5900350167 , <nl> - 17 . 0102498252 , <nl> - 18 . 5598427974 , <nl> - 20 . 2505999737 , <nl> - 22 . 0953810747 , <nl> - 24 . 1082173107 , <nl> - 26 . 3044181014 , <nl> - 28 . 7006875181 , <nl> - 31 . 315251333 , <nl> - 34 . 1679956422 , <nl> - 37 . 2806181177 , <nl> - 40 . 6767930374 , <nl> - 44 . 3823513489 , <nl> - 48 . 4254771375 , <nl> - 52 . 8369219909 , <nl> - 57 . 6502388927 , <nl> - 62 . 902037423 , <nl> - 68 . 6322622068 , <nl> - 74 . 8844967285 , <nl> - 81 . 7062948236 , <nl> - 89 . 1495423679 , <nl> - 97 . 2708519163 , <nl> - 106 . 131993291 , <nl> - 115 . 800363399 , <nl> - 126 . 34949884 , <nl> - 137 . 859635225 , <nl> - 150 . 418317437 , <nl> - 164 . 121065485 , <nl> - 179 . 072101023 , <nl> - 195 . 38514005 , <nl> - 213 . 184257818 , <nl> - 232 . 604832535 , <nl> - 253 . 794575043 , <nl> - 276 . 914652285 , <nl> - 302 . 140913126 , <nl> - 329 . 665225843 , <nl> - 359 . 696937452 , <nl> - 392 . 464465978 , <nl> - 428 . 217037783 , <nl> - 467 . 226583154 , <nl> - 509 . 78980457 , <nl> - 556 . 230433401 , <nl> - 606 . 901692163 , <nl> - 662 . 1889811 , <nl> - 722 . 512809492 , <nl> - 788 . 331994007 , <nl> - 860 . 147148411 , <nl> - 938 . 504491184 , <nl> - 1024 . 0 } ; <nl> - const uint8_t grpc_stats_table_3 [ 52 ] = { <nl> - 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , <nl> - 18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 , 32 , 33 , 34 , 35 , <nl> - 36 , 37 , 38 , 39 , 40 , 41 , 42 , 43 , 44 , 45 , 46 , 47 , 48 , 49 , 50 , 52 } ; <nl> + 1 , 1 , 3 , 3 , 4 , 6 , 6 , 8 , 8 , 9 , 11 , 11 , 12 , 14 , 14 , 15 , 17 , 17 , <nl> + 18 , 20 , 20 , 21 , 23 , 23 , 24 , 25 , 27 , 27 , 28 , 30 , 30 , 31 , 33 , 33 , 34 , 35 , <nl> + 37 , 37 , 39 , 39 , 40 , 41 , 43 , 43 , 44 , 46 , 46 , 47 , 48 , 50 , 50 , 51 , 53 , 53 , <nl> + 55 , 55 , 56 , 57 , 59 , 59 , 60 , 62 , 62 , 63 , 64 , 66 , 66 , 67 , 69 , 69 , 71 , 71 , <nl> + 72 , 73 , 75 , 75 , 76 , 78 , 78 , 79 , 80 , 82 , 82 , 83 , 85 , 85 , 87 } ; <nl> + const int grpc_stats_table_2 [ 64 ] = { <nl> + 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 13 , <nl> + 15 , 17 , 19 , 21 , 23 , 25 , 28 , 31 , 34 , 37 , 41 , 45 , 49 , <nl> + 54 , 59 , 64 , 70 , 76 , 83 , 90 , 98 , 106 , 115 , 125 , 136 , 147 , <nl> + 159 , 172 , 186 , 201 , 218 , 236 , 255 , 276 , 299 , 323 , 349 , 377 , 408 , <nl> + 441 , 477 , 515 , 556 , 601 , 649 , 701 , 757 , 817 , 881 , 950 , 1024 } ; <nl> + const uint8_t grpc_stats_table_3 [ 104 ] = { <nl> + 2 , 2 , 2 , 6 , 6 , 6 , 6 , 9 , 9 , 9 , 11 , 11 , 13 , 13 , 15 , 15 , 17 , 17 , <nl> + 20 , 20 , 20 , 23 , 23 , 23 , 25 , 25 , 26 , 28 , 28 , 30 , 30 , 32 , 32 , 35 , 35 , 35 , <nl> + 37 , 37 , 40 , 40 , 40 , 41 , 43 , 43 , 44 , 46 , 46 , 48 , 48 , 50 , 50 , 52 , 52 , 55 , <nl> + 55 , 55 , 57 , 57 , 58 , 59 , 61 , 61 , 63 , 63 , 65 , 65 , 67 , 67 , 69 , 69 , 71 , 71 , <nl> + 73 , 73 , 74 , 76 , 76 , 77 , 79 , 79 , 81 , 81 , 83 , 83 , 85 , 85 , 88 , 88 , 88 , 89 , <nl> + 90 , 92 , 92 , 93 , 95 , 95 , 97 , 97 , 99 , 99 , 101 , 101 , 104 , 104 } ; <nl> const int grpc_stats_histo_buckets [ 6 ] = { 64 , 64 , 64 , 64 , 64 , 64 } ; <nl> const int grpc_stats_histo_start [ 6 ] = { 0 , 64 , 128 , 192 , 256 , 320 } ; <nl> - const double * const grpc_stats_histo_bucket_boundaries [ 6 ] = { <nl> + const int * const grpc_stats_histo_bucket_boundaries [ 6 ] = { <nl> grpc_stats_table_0 , grpc_stats_table_2 , grpc_stats_table_0 , <nl> grpc_stats_table_0 , grpc_stats_table_2 , grpc_stats_table_0 } ; <nl> mmm a / src / core / lib / debug / stats_data . h <nl> ppp b / src / core / lib / debug / stats_data . h <nl> typedef enum { <nl> if ( _val . uint < 4652218415073722368ull ) { \ <nl> GRPC_STATS_INC_HISTOGRAM ( \ <nl> ( exec_ctx ) , GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE , \ <nl> - grpc_stats_table_3 [ ( ( _val . uint - 4622945017495814144ull ) > > 49 ) ] ) ; \ <nl> + grpc_stats_table_3 [ ( ( _val . uint - 4622945017495814144ull ) > > 48 ) ] ) ; \ <nl> } else { \ <nl> GRPC_STATS_INC_HISTOGRAM ( \ <nl> ( exec_ctx ) , GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE , \ <nl> typedef enum { <nl> if ( _val . uint < 4652218415073722368ull ) { \ <nl> GRPC_STATS_INC_HISTOGRAM ( \ <nl> ( exec_ctx ) , GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE , \ <nl> - grpc_stats_table_3 [ ( ( _val . uint - 4622945017495814144ull ) > > 49 ) ] ) ; \ <nl> + grpc_stats_table_3 [ ( ( _val . uint - 4622945017495814144ull ) > > 48 ) ] ) ; \ <nl> } else { \ <nl> GRPC_STATS_INC_HISTOGRAM ( \ <nl> ( exec_ctx ) , GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE , \ <nl> typedef enum { <nl> } \ <nl> } \ <nl> } while ( false ) <nl> - extern const double grpc_stats_table_0 [ 64 ] ; <nl> + extern const int grpc_stats_table_0 [ 64 ] ; <nl> extern const uint8_t grpc_stats_table_1 [ 87 ] ; <nl> - extern const double grpc_stats_table_2 [ 64 ] ; <nl> - extern const uint8_t grpc_stats_table_3 [ 52 ] ; <nl> + extern const int grpc_stats_table_2 [ 64 ] ; <nl> + extern const uint8_t grpc_stats_table_3 [ 104 ] ; <nl> extern const int grpc_stats_histo_buckets [ 6 ] ; <nl> extern const int grpc_stats_histo_start [ 6 ] ; <nl> - extern const double * const grpc_stats_histo_bucket_boundaries [ 6 ] ; <nl> + extern const int * const grpc_stats_histo_bucket_boundaries [ 6 ] ; <nl> <nl> # endif / * GRPC_CORE_LIB_DEBUG_STATS_DATA_H * / <nl> mmm a / tools / codegen / core / gen_stats_data . py <nl> ppp b / tools / codegen / core / gen_stats_data . py <nl> def gen_bucket_code ( histogram ) : <nl> first_nontrivial = None <nl> first_unmapped = None <nl> while len ( bounds ) < histogram . buckets : <nl> - mul = math . pow ( float ( histogram . max ) / bounds [ - 1 ] , <nl> - 1 . 0 / ( histogram . buckets - len ( bounds ) ) ) <nl> - nextb = bounds [ - 1 ] * mul <nl> - if nextb < bounds [ - 1 ] + 1 : <nl> + if len ( bounds ) = = histogram . buckets - 1 : <nl> + nextb = int ( histogram . max ) <nl> + else : <nl> + mul = math . pow ( float ( histogram . max ) / bounds [ - 1 ] , <nl> + 1 . 0 / ( histogram . buckets - len ( bounds ) ) ) <nl> + nextb = int ( math . ceil ( bounds [ - 1 ] * mul ) ) <nl> + if nextb < = bounds [ - 1 ] + 1 : <nl> nextb = bounds [ - 1 ] + 1 <nl> elif not done_trivial : <nl> done_trivial = True <nl> first_nontrivial = len ( bounds ) <nl> bounds . append ( nextb ) <nl> - bounds_idx = decl_static_table ( bounds , ' double ' ) <nl> + bounds_idx = decl_static_table ( bounds , ' int ' ) <nl> if done_trivial : <nl> first_nontrivial_code = dbl2u64 ( first_nontrivial ) <nl> code_bounds = [ dbl2u64 ( x ) - first_nontrivial_code for x in bounds ] <nl> def put_banner ( files , banner ) : <nl> <nl> print > > H , " extern const int grpc_stats_histo_buckets [ % d ] ; " % len ( inst_map [ ' Histogram ' ] ) <nl> print > > H , " extern const int grpc_stats_histo_start [ % d ] ; " % len ( inst_map [ ' Histogram ' ] ) <nl> - print > > H , " extern const double * const grpc_stats_histo_bucket_boundaries [ % d ] ; " % len ( inst_map [ ' Histogram ' ] ) <nl> + print > > H , " extern const int * const grpc_stats_histo_bucket_boundaries [ % d ] ; " % len ( inst_map [ ' Histogram ' ] ) <nl> <nl> print > > H <nl> print > > H , " # endif / * GRPC_CORE_LIB_DEBUG_STATS_DATA_H * / " <nl> def put_banner ( files , banner ) : <nl> len ( inst_map [ ' Histogram ' ] ) , ' , ' . join ( ' % s ' % x for x in histo_buckets ) ) <nl> print > > C , " const int grpc_stats_histo_start [ % d ] = { % s } ; " % ( <nl> len ( inst_map [ ' Histogram ' ] ) , ' , ' . join ( ' % s ' % x for x in histo_start ) ) <nl> - print > > C , " const double * const grpc_stats_histo_bucket_boundaries [ % d ] = { % s } ; " % ( <nl> + print > > C , " const int * const grpc_stats_histo_bucket_boundaries [ % d ] = { % s } ; " % ( <nl> len ( inst_map [ ' Histogram ' ] ) , ' , ' . join ( ' grpc_stats_table_ % d ' % x for x in histo_bucket_boundaries ) ) <nl> | Restrict histograms to integral boundaries | grpc/grpc | da5cd59ed3f383318fb01b1bf461489cc828d453 | 2017-09-01T17:03:40Z |
mmm a / tensorflow / contrib / autograph / converters / builtin_functions_test . py <nl> ppp b / tensorflow / contrib / autograph / converters / builtin_functions_test . py <nl> def test_fn ( a ) : <nl> <nl> with self . converted ( test_fn , builtin_functions , { ' len ' : len } , <nl> array_ops . shape ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> ops = result . test_fn ( constant_op . constant ( [ 0 , 0 , 0 ] ) ) <nl> self . assertEqual ( sess . run ( ops ) , 3 ) <nl> <nl> def test_fn ( a ) : <nl> return print ( a ) <nl> <nl> with self . converted ( test_fn , builtin_functions , { ' print ' : print } ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> with self . assertPrints ( ' a \ n ' ) : <nl> sess . run ( result . test_fn ( ' a ' ) ) <nl> <nl> def test_fn ( a , b , c ) : <nl> return print ( a , b , c ) <nl> <nl> with self . converted ( test_fn , builtin_functions , { ' print ' : print } ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> with self . assertPrints ( ' a 1 [ 2 , 3 ] \ n ' ) : <nl> sess . run ( <nl> result . test_fn ( <nl> mmm a / tensorflow / contrib / autograph / converters / call_trees_test . py <nl> ppp b / tensorflow / contrib / autograph / converters / call_trees_test . py <nl> def test_fn ( a ) : <nl> setattr ( a , ' foo ' , ' bar ' ) <nl> <nl> with self . converted ( test_fn , call_trees , { ' setattr ' : setattr } ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> <nl> class Dummy ( object ) : <nl> pass <nl> def test_fn ( ) : <nl> <nl> with self . converted ( test_fn , call_trees , { ' np ' : np } , <nl> dtypes . int64 ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> self . assertTrue ( isinstance ( result . test_fn ( ) , ops . Tensor ) ) <nl> self . assertIn ( sess . run ( result . test_fn ( ) ) , ( 0 , 1 , 2 ) ) <nl> <nl> def test_fn ( a ) : <nl> node = call_trees . transform ( node , ctx ) <nl> <nl> with self . compiled ( node , ns ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> result_tensor = result . test_fn ( constant_op . constant ( 1 ) ) <nl> self . assertEquals ( sess . run ( result_tensor ) , 3 ) <nl> <nl> mmm a / tensorflow / contrib / autograph / converters / control_flow_test . py <nl> ppp b / tensorflow / contrib / autograph / converters / control_flow_test . py <nl> def assertTransformedResult ( self , test_fn , inputs , expected ) : <nl> inputs = ( inputs , ) <nl> with self . converted ( test_fn , control_flow , { } , <nl> constant_op . constant ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> self . assertEqual ( sess . run ( result . test_fn ( * inputs ) ) , expected ) <nl> <nl> def test_while_basic ( self ) : <nl> def test_fn ( n , obj ) : <nl> return obj <nl> <nl> with self . converted ( test_fn , control_flow , { } ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> res_obj = result . test_fn ( constant_op . constant ( 1 ) , TestClass ( 0 , 0 ) ) <nl> self . assertEqual ( sess . run ( ( res_obj . a , res_obj . b ) ) , ( - 1 , 0 ) ) <nl> res_obj = result . test_fn ( constant_op . constant ( - 1 ) , TestClass ( 0 , 0 ) ) <nl> mmm a / tensorflow / contrib / autograph / converters / lists_test . py <nl> ppp b / tensorflow / contrib / autograph / converters / lists_test . py <nl> def test_fn ( ) : <nl> <nl> ns = { ' special_functions ' : special_functions } <nl> with self . converted ( test_fn , lists , ns ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> tl = result . test_fn ( ) <nl> r = list_ops . tensor_list_stack ( tl , dtypes . int32 ) <nl> self . assertAllEqual ( sess . run ( r ) , [ 1 , 2 , 3 ] ) <nl> def test_fn ( ) : <nl> node = lists . transform ( node , ctx ) <nl> <nl> with self . compiled ( node , ns , dtypes . int32 ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> ts , tl = result . test_fn ( ) <nl> r = list_ops . tensor_list_stack ( tl , dtypes . int32 ) <nl> self . assertAllEqual ( sess . run ( r ) , [ 1 , 2 ] ) <nl> def test_fn ( ) : <nl> node = lists . transform ( node , ctx ) <nl> <nl> with self . compiled ( node , { } , array_ops . stack , dtypes . int32 ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> self . assertAllEqual ( sess . run ( result . test_fn ( ) ) , [ 1 , 2 , 3 ] ) <nl> <nl> # TODO ( mdan ) : Add a test with tf . stack with axis kwarg . <nl> mmm a / tensorflow / contrib / autograph / converters / logical_expressions_test . py <nl> ppp b / tensorflow / contrib / autograph / converters / logical_expressions_test . py <nl> def test_fn ( a , b ) : <nl> <nl> with self . converted ( test_fn , logical_expressions , { } , <nl> math_ops . equal ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> self . assertTrue ( sess . run ( result . test_fn ( 1 , 1 ) ) ) <nl> self . assertFalse ( sess . run ( result . test_fn ( 1 , 2 ) ) ) <nl> <nl> def test_fn ( a , b , c ) : <nl> <nl> with self . converted ( test_fn , logical_expressions , { } , math_ops . logical_or , <nl> math_ops . logical_and ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> self . assertTrue ( sess . run ( result . test_fn ( True , False , True ) ) ) <nl> <nl> <nl> mmm a / tensorflow / contrib / autograph / converters / side_effect_guards_test . py <nl> ppp b / tensorflow / contrib / autograph / converters / side_effect_guards_test . py <nl> def test_fn ( a ) : <nl> self . assertEqual ( len ( node . body ) , 1 ) <nl> <nl> with self . compiled ( node , { } , state_ops . assign ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> v = variable_scope . get_variable ( ' test ' , initializer = 2 ) <nl> sess . run ( v . initializer ) <nl> sess . run ( result . test_fn ( v ) ) <nl> def test_fn ( a ) : <nl> self . assertEqual ( len ( node . body ) , 1 ) <nl> <nl> with self . compiled ( node , { } , state_ops . assign ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> v = variable_scope . get_variable ( ' test ' , initializer = 2 ) <nl> sess . run ( v . initializer ) <nl> sess . run ( result . test_fn ( v ) ) <nl> def test_fn ( a ) : <nl> self . assertEqual ( len ( node . body ) , 1 ) <nl> <nl> with self . compiled ( node , { } , control_flow_ops . Assert ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> with self . assertRaisesRegexp ( errors_impl . InvalidArgumentError , <nl> ' expected in throw ' ) : <nl> sess . run ( result . test_fn ( constant_op . constant ( - 1 ) ) ) <nl> def test_fn ( a ) : <nl> self . assertEqual ( len ( node . body ) , 1 ) <nl> <nl> with self . compiled ( node , { } , state_ops . assign_add ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> v = variable_scope . get_variable ( ' test ' , initializer = 2 ) <nl> sess . run ( v . initializer ) <nl> sess . run ( result . test_fn ( v ) ) <nl> def test_fn ( a ) : <nl> self . assertEqual ( len ( node . body [ 0 ] . body ) , 1 ) <nl> <nl> with self . compiled ( node , { } , state_ops . assign , ops . name_scope ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> v = variable_scope . get_variable ( ' test ' , initializer = 2 ) <nl> sess . run ( v . initializer ) <nl> sess . run ( result . test_fn ( v ) ) <nl> def test_fn ( a ) : <nl> <nl> with self . compiled ( node , { } , state_ops . assign , <nl> state_ops . assign_add ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> v = variable_scope . get_variable ( ' test ' , initializer = 2 ) <nl> sess . run ( v . initializer ) <nl> sess . run ( result . test_fn ( v ) ) <nl> mmm a / tensorflow / contrib / autograph / converters / slices_test . py <nl> ppp b / tensorflow / contrib / autograph / converters / slices_test . py <nl> def test_fn ( l ) : <nl> node = slices . transform ( node , ctx ) <nl> <nl> with self . compiled ( node , { } , dtypes . int32 ) as result : <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> tl = list_ops . tensor_list_from_tensor ( <nl> [ 1 , 2 ] , element_shape = constant_op . constant ( [ ] , dtype = dtypes . int32 ) ) <nl> y = result . test_fn ( tl ) <nl> mmm a / tensorflow / contrib / image / python / kernel_tests / dense_image_warp_test . py <nl> ppp b / tensorflow / contrib / image / python / kernel_tests / dense_image_warp_test . py <nl> def test_interpolate_small_grid_ij ( self ) : <nl> <nl> interp = dense_image_warp . _interpolate_bilinear ( grid , query_points ) <nl> <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> predicted = sess . run ( interp ) <nl> self . assertAllClose ( expected_results , predicted ) <nl> <nl> def test_interpolate_small_grid_xy ( self ) : <nl> interp = dense_image_warp . _interpolate_bilinear ( <nl> grid , query_points , indexing = ' xy ' ) <nl> <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> predicted = sess . run ( interp ) <nl> self . assertAllClose ( expected_results , predicted ) <nl> <nl> def test_interpolate_small_grid_batched ( self ) : <nl> <nl> interp = dense_image_warp . _interpolate_bilinear ( grid , query_points ) <nl> <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> predicted = sess . run ( interp ) <nl> self . assertAllClose ( expected_results , predicted ) <nl> <nl> def check_zero_flow_correctness ( self , shape , image_type , flow_type ) : <nl> flow_type ) <nl> interp = dense_image_warp . dense_image_warp ( image , flows ) <nl> <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> rand_image , rand_flows = self . get_random_image_and_flows ( <nl> shape , image_type , flow_type ) <nl> rand_flows * = 0 <nl> def check_interpolation_correctness ( self , <nl> flow_type ) <nl> interp = dense_image_warp . dense_image_warp ( image , flows ) <nl> low_precision = image_type = = ' float16 ' or flow_type = = ' float16 ' <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> rand_image , rand_flows = self . get_random_image_and_flows ( <nl> shape , image_type , flow_type ) <nl> <nl> def test_gradients_exist ( self ) : <nl> opt_func = optimizer . apply_gradients ( zip ( grad , [ flows ] ) ) <nl> init_op = variables . global_variables_initializer ( ) <nl> <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> sess . run ( init_op ) <nl> for _ in range ( 10 ) : <nl> sess . run ( opt_func ) <nl> mmm a / tensorflow / contrib / image / python / kernel_tests / distort_image_ops_test . py <nl> ppp b / tensorflow / contrib / image / python / kernel_tests / distort_image_ops_test . py <nl> def test_adjust_random_saturation_in_yiq ( self ) : <nl> ' gb_same ' , <nl> ' rgb_same ' , <nl> ] <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> for x_shape in x_shapes : <nl> for test_style in test_styles : <nl> x_np = np . random . rand ( * x_shape ) * 255 . <nl> mmm a / tensorflow / contrib / image / python / kernel_tests / image_ops_test . py <nl> ppp b / tensorflow / contrib / image / python / kernel_tests / image_ops_test . py <nl> class ImageOpsTest ( test_util . TensorFlowTestCase ) : <nl> <nl> def test_zeros ( self ) : <nl> for dtype in _DTYPES : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> for shape in [ ( 5 , 5 ) , ( 24 , 24 ) , ( 2 , 24 , 24 , 3 ) ] : <nl> for angle in [ 0 , 1 , np . pi / 2 . 0 ] : <nl> image = array_ops . zeros ( shape , dtype ) <nl> def test_zeros ( self ) : <nl> <nl> def test_rotate_even ( self ) : <nl> for dtype in _DTYPES : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> image = array_ops . reshape ( <nl> math_ops . cast ( math_ops . range ( 36 ) , dtype ) , ( 6 , 6 ) ) <nl> image_rep = array_ops . tile ( image [ None , : , : , None ] , [ 3 , 1 , 1 , 1 ] ) <nl> def test_rotate_even ( self ) : <nl> <nl> def test_rotate_odd ( self ) : <nl> for dtype in _DTYPES : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> image = array_ops . reshape ( <nl> math_ops . cast ( math_ops . range ( 25 ) , dtype ) , ( 5 , 5 ) ) <nl> image_rep = array_ops . tile ( image [ None , : , : , None ] , [ 3 , 1 , 1 , 1 ] ) <nl> def test_rotate_odd ( self ) : <nl> <nl> def test_translate ( self ) : <nl> for dtype in _DTYPES : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> image = constant_op . constant ( <nl> [ [ 1 , 0 , 1 , 0 ] , <nl> [ 0 , 1 , 0 , 1 ] , <nl> def test_translate ( self ) : <nl> <nl> def test_compose ( self ) : <nl> for dtype in _DTYPES : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> image = constant_op . constant ( <nl> [ [ 1 , 1 , 1 , 0 ] , <nl> [ 1 , 0 , 0 , 0 ] , <nl> def test_compose ( self ) : <nl> <nl> def test_extreme_projective_transform ( self ) : <nl> for dtype in _DTYPES : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> image = constant_op . constant ( <nl> [ [ 1 , 0 , 1 , 0 ] , <nl> [ 0 , 1 , 0 , 1 ] , <nl> def test_extreme_projective_transform ( self ) : <nl> [ 0 , 0 , 0 , 0 ] ] ) <nl> <nl> def test_bilinear ( self ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> image = constant_op . constant ( <nl> [ [ 0 , 0 , 0 , 0 , 0 ] , <nl> [ 0 , 1 , 1 , 1 , 0 ] , <nl> def test_bilinear ( self ) : <nl> [ 0 , 0 , 1 , 0 , 0 ] ] ) <nl> <nl> def test_bilinear_uint8 ( self ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> image = constant_op . constant ( <nl> np . asarray ( <nl> [ [ 0 . 0 , 0 . 0 , 0 . 0 , 0 . 0 , 0 . 0 ] , <nl> def test_transform_static_output_shape ( self ) : <nl> self . assertAllEqual ( [ 3 , 5 ] , result . get_shape ( ) ) <nl> <nl> def _test_grad ( self , shape_to_test ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> test_image_shape = shape_to_test <nl> test_image = np . random . randn ( * test_image_shape ) <nl> test_image_tensor = constant_op . constant ( <nl> def _test_grad ( self , shape_to_test ) : <nl> self . assertLess ( left_err , 1e - 10 ) <nl> <nl> def _test_grad_different_shape ( self , input_shape , output_shape ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> test_image_shape = input_shape <nl> test_image = np . random . randn ( * test_image_shape ) <nl> test_image_tensor = constant_op . constant ( <nl> def _BipartiteMatchTest ( self , distance_mat , distance_mat_shape , <nl> expected_col_to_row_match_np = np . array ( expected_col_to_row_match , <nl> dtype = np . int32 ) <nl> <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> distance_mat_tf = constant_op . constant ( distance_mat_np , <nl> shape = distance_mat_shape ) <nl> location_to_prior , prior_to_location = image_ops . bipartite_match ( <nl> mmm a / tensorflow / contrib / image / python / kernel_tests / interpolate_spline_test . py <nl> ppp b / tensorflow / contrib / image / python / kernel_tests / interpolate_spline_test . py <nl> def test_1d_linear_interpolation ( self ) : <nl> with ops . name_scope ( ' interpolator ' ) : <nl> interpolator = interpolate_spline . interpolate_spline ( <nl> train_points , train_values , query_points , interpolation_order ) <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> fetches = [ query_points , train_points , train_values , interpolator ] <nl> query_points_ , train_points_ , train_values_ , interp_ = sess . run ( fetches ) <nl> <nl> def test_1d_interpolation ( self ) : <nl> <nl> target_interpolation = tp . HARDCODED_QUERY_VALUES [ ( order , reg_weight ) ] <nl> target_interpolation = np . array ( target_interpolation ) <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> interp_val = sess . run ( interpolator ) <nl> self . assertAllClose ( interp_val [ 0 , : , 0 ] , target_interpolation ) <nl> <nl> def test_nd_linear_interpolation ( self ) : <nl> <nl> target_interpolation = tp . HARDCODED_QUERY_VALUES [ ( order , reg_weight ) ] <nl> target_interpolation = np . array ( target_interpolation ) <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> interp_val = sess . run ( interpolator ) <nl> self . assertAllClose ( interp_val [ 0 , : , 0 ] , target_interpolation ) <nl> <nl> def test_nd_linear_interpolation_unspecified_shape ( self ) : <nl> <nl> target_interpolation = tp . HARDCODED_QUERY_VALUES [ ( order , reg_weight ) ] <nl> target_interpolation = np . array ( target_interpolation ) <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> <nl> ( train_points_value , train_values_value , query_points_value ) = sess . run ( <nl> [ train_points , train_values , query_points ] ) <nl> def test_interpolation_gradient ( self ) : <nl> opt_func = optimizer . apply_gradients ( zip ( grad , [ train_points ] ) ) <nl> init_op = variables . global_variables_initializer ( ) <nl> <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> sess . run ( init_op ) <nl> for _ in range ( 100 ) : <nl> sess . run ( [ loss , opt_func ] ) <nl> mmm a / tensorflow / contrib / image / python / kernel_tests / segmentation_test . py <nl> ppp b / tensorflow / contrib / image / python / kernel_tests / segmentation_test . py <nl> def testDisconnected ( self ) : <nl> [ 7 , 0 , 8 , 0 , 0 , 0 , 9 , 0 , 0 ] , <nl> [ 0 , 0 , 0 , 0 , 10 , 0 , 0 , 0 , 0 ] , <nl> [ 0 , 0 , 11 , 0 , 0 , 0 , 0 , 0 , 0 ] ] ) # pyformat : disable <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> self . assertAllEqual ( image_ops . connected_components ( arr ) . eval ( ) , expected ) <nl> <nl> def testSimple ( self ) : <nl> arr = [ [ 0 , 1 , 0 ] , [ 1 , 1 , 1 ] , [ 0 , 1 , 0 ] ] <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> # Single component with id 1 . <nl> self . assertAllEqual ( <nl> image_ops . connected_components ( math_ops . cast ( <nl> arr , dtypes . bool ) ) . eval ( ) , arr ) <nl> <nl> def testSnake ( self ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> # Single component with id 1 . <nl> self . assertAllEqual ( <nl> image_ops . connected_components ( math_ops . cast ( <nl> def testSnake ( self ) : <nl> def testSnake_disconnected ( self ) : <nl> for i in range ( SNAKE . shape [ 0 ] ) : <nl> for j in range ( SNAKE . shape [ 1 ] ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> # If we disconnect any part of the snake except for the endpoints , <nl> # there will be 2 components . <nl> if SNAKE [ i , j ] and ( i , j ) not in [ ( 1 , 1 ) , ( 6 , 3 ) ] : <nl> def testMultipleImages ( self ) : <nl> [ 0 , 6 , 6 , 0 ] , <nl> [ 8 , 0 , 6 , 0 ] , <nl> [ 0 , 0 , 6 , 6 ] ] ] # pyformat : disable <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> self . assertAllEqual ( <nl> image_ops . connected_components ( math_ops . cast ( <nl> images , dtypes . bool ) ) . eval ( ) , expected ) <nl> <nl> def testZeros ( self ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> self . assertAllEqual ( <nl> image_ops . connected_components ( <nl> array_ops . zeros ( ( 100 , 20 , 50 ) , dtypes . bool ) ) . eval ( ) , <nl> np . zeros ( ( 100 , 20 , 50 ) ) ) <nl> <nl> def testOnes ( self ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> self . assertAllEqual ( <nl> image_ops . connected_components ( <nl> array_ops . ones ( ( 100 , 20 , 50 ) , dtypes . bool ) ) . eval ( ) , <nl> np . tile ( np . arange ( 100 ) [ : , None , None ] + 1 , [ 1 , 20 , 50 ] ) ) <nl> <nl> def testOnes_small ( self ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> self . assertAllEqual ( <nl> image_ops . connected_components ( array_ops . ones ( ( 3 , 5 ) , <nl> dtypes . bool ) ) . eval ( ) , <nl> def testRandom_scipy ( self ) : <nl> expected = connected_components_reference_implementation ( images ) <nl> if expected is None : <nl> return <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> self . assertAllEqual ( <nl> image_ops . connected_components ( images ) . eval ( ) , expected ) <nl> <nl> mmm a / tensorflow / contrib / image / python / kernel_tests / single_image_random_dot_stereograms_ops_test . py <nl> ppp b / tensorflow / contrib / image / python / kernel_tests / single_image_random_dot_stereograms_ops_test . py <nl> def test_shape_function_default ( self ) : <nl> normalize = True ) <nl> shape_1 = sirds_1 . get_shape ( ) . as_list ( ) <nl> self . assertEqual ( shape_1 , [ 768 , 1024 , 1 ] ) <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> r_tf_1 = sirds_1 . eval ( ) <nl> self . assertAllEqual ( shape_1 , r_tf_1 . shape ) <nl> <nl> def test_shape_function_default ( self ) : <nl> normalize = True ) <nl> shape_2 = sirds_2 . get_shape ( ) . as_list ( ) <nl> self . assertEqual ( shape_2 , [ 768 , 1024 , 3 ] ) <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> r_tf_2 = sirds_2 . eval ( ) <nl> self . assertAllEqual ( shape_2 , r_tf_2 . shape ) <nl> <nl> def test_shape_function_default ( self ) : <nl> output_image_shape = [ 1200 , 800 , 1 ] ) <nl> shape_3 = sirds_3 . get_shape ( ) . as_list ( ) <nl> self . assertEqual ( shape_3 , [ 800 , 1200 , 1 ] ) <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> r_tf_3 = sirds_3 . eval ( ) <nl> self . assertAllEqual ( shape_3 , r_tf_3 . shape ) <nl> <nl> mmm a / tensorflow / contrib / image / python / kernel_tests / sparse_image_warp_test . py <nl> ppp b / tensorflow / contrib / image / python / kernel_tests / sparse_image_warp_test . py <nl> def assertZeroShift ( self , order , regularization , num_boundary_points ) : <nl> regularization_weight = regularization , <nl> num_boundary_points = num_boundary_points ) <nl> <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> warped_image , input_image , _ = sess . run ( <nl> [ warped_image_op , input_image_op , flow_field ] ) <nl> <nl> def assertMoveSinglePixel ( self , order , num_boundary_points , type_to_use ) : <nl> interpolation_order = order , <nl> num_boundary_points = num_boundary_points ) <nl> <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> warped_image , input_image , flow = sess . run ( <nl> [ warped_image_op , input_image_op , flow_field ] ) <nl> # Check that it moved the pixel correctly . <nl> def testSmileyFace ( self ) : <nl> test_data_dir = test . test_src_dir_path ( ' contrib / image / python / ' <nl> ' kernel_tests / test_data / ' ) <nl> input_file = test_data_dir + ' Yellow_Smiley_Face . png ' <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> input_image = self . load_image ( input_file , sess ) <nl> control_points = np . asarray ( [ [ 64 , 59 ] , [ 180 - 64 , 59 ] , [ 39 , 111 ] , <nl> [ 180 - 39 , 111 ] , [ 90 , 143 ] , [ 58 , 134 ] , <nl> def testSmileyFace ( self ) : <nl> control_points_op + control_point_displacements_op , <nl> interpolation_order = interpolation_order , <nl> num_boundary_points = num_boundary_points ) <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> warped_image = sess . run ( warp_op ) <nl> out_image = np . uint8 ( warped_image [ 0 , : , : , : ] * 255 ) <nl> target_file = ( <nl> def testThatBackpropRuns ( self ) : <nl> opt_func = optimizer . apply_gradients ( zip ( grad , [ image ] ) ) <nl> init_op = variables . global_variables_initializer ( ) <nl> <nl> - with self . test_session ( ) as sess : <nl> + with self . cached_session ( ) as sess : <nl> sess . run ( init_op ) <nl> for _ in range ( 5 ) : <nl> sess . run ( [ loss , opt_func ] ) <nl> mmm a / tensorflow / contrib / optimizer_v2 / adadelta_test . py <nl> ppp b / tensorflow / contrib / optimizer_v2 / adadelta_test . py <nl> def doTestBasic ( self , use_resource = False ) : <nl> for dtype in [ dtypes . half , dtypes . float32 ] : <nl> for grad in [ 0 . 2 , 0 . 1 , 0 . 01 ] : <nl> for lr in [ 1 . 0 , 0 . 5 , 0 . 1 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0_init = [ 1 . 0 , 2 . 0 ] <nl> var1_init = [ 3 . 0 , 4 . 0 ] <nl> if use_resource : <nl> def testResourceBasic ( self ) : <nl> <nl> def testMinimizeSparseResourceVariable ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = resource_variable_ops . ResourceVariable ( [ [ 1 . 0 , 2 . 0 ] ] , dtype = dtype ) <nl> x = constant_op . constant ( [ [ 4 . 0 ] , [ 5 . 0 ] ] , dtype = dtype ) <nl> pred = math_ops . matmul ( embedding_ops . embedding_lookup ( [ var0 ] , [ 0 ] ) , x ) <nl> mmm a / tensorflow / contrib / optimizer_v2 / adagrad_test . py <nl> ppp b / tensorflow / contrib / optimizer_v2 / adagrad_test . py <nl> class AdagradOptimizerTest ( test . TestCase ) : <nl> <nl> def doTestBasic ( self , use_locking = False , use_resource = False ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> if use_resource : <nl> var0 = resource_variable_ops . ResourceVariable ( [ 1 . 0 , 2 . 0 ] , dtype = dtype ) <nl> var1 = resource_variable_ops . ResourceVariable ( [ 3 . 0 , 4 . 0 ] , dtype = dtype ) <nl> def testBasicLocked ( self ) : <nl> <nl> def testMinimizeSparseResourceVariable ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = resource_variable_ops . ResourceVariable ( <nl> [ [ 1 . 0 , 2 . 0 ] , [ 3 . 0 , 4 . 0 ] ] , dtype = dtype ) <nl> x = constant_op . constant ( [ [ 4 . 0 ] , [ 5 . 0 ] ] , dtype = dtype ) <nl> def testMinimizeSparseResourceVariable ( self ) : <nl> <nl> def testTensorLearningRate ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ 1 . 0 , 2 . 0 ] , dtype = dtype ) <nl> var1 = variables . Variable ( [ 3 . 0 , 4 . 0 ] , dtype = dtype ) <nl> grads0 = constant_op . constant ( [ 0 . 1 , 0 . 1 ] , dtype = dtype ) <nl> def testTensorLearningRate ( self ) : <nl> <nl> def testSparseBasic ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ [ 1 . 0 ] , [ 2 . 0 ] ] , dtype = dtype ) <nl> var1 = variables . Variable ( [ [ 3 . 0 ] , [ 4 . 0 ] ] , dtype = dtype ) <nl> grads0 = ops . IndexedSlices ( <nl> def testSparseBasic ( self ) : <nl> <nl> def testSparseRepeatedIndices ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> repeated_index_update_var = variables . Variable ( <nl> [ [ 1 . 0 ] , [ 2 . 0 ] ] , dtype = dtype ) <nl> aggregated_update_var = variables . Variable ( <nl> def testSparseRepeatedIndices ( self ) : <nl> <nl> def testSparseRepeatedIndicesResourceVariable ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var_repeated = resource_variable_ops . ResourceVariable ( <nl> [ 1 . 0 , 2 . 0 ] , dtype = dtype ) <nl> loss_repeated = math_ops . reduce_sum ( <nl> def testSparseRepeatedIndicesResourceVariable ( self ) : <nl> <nl> def testSparseStability ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> shape = [ 1 , 6 ] <nl> var0 = variables . Variable ( <nl> [ [ <nl> def testSparseStability ( self ) : <nl> <nl> def testSharing ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ 1 . 0 , 2 . 0 ] , dtype = dtype ) <nl> var1 = variables . Variable ( [ 3 . 0 , 4 . 0 ] , dtype = dtype ) <nl> grads0 = constant_op . constant ( [ 0 . 1 , 0 . 1 ] , dtype = dtype ) <nl> def testSharing ( self ) : <nl> np . array ( [ 2 . 715679168701172 , 3 . 715679168701172 ] ) , var1 . eval ( ) ) <nl> <nl> def testDynamicShapeVariable_Ok ( self ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> v = variable_scope . get_variable ( " v " , initializer = constant_op . constant ( 1 . ) , <nl> validate_shape = False ) <nl> self . assertFalse ( v . shape . is_fully_defined ( ) ) <nl> mmm a / tensorflow / contrib / optimizer_v2 / adam_test . py <nl> ppp b / tensorflow / contrib / optimizer_v2 / adam_test . py <nl> class AdamOptimizerTest ( test . TestCase ) : <nl> <nl> def doTestSparse ( self , use_resource = False ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> # Initialize variables for numpy implementation . <nl> m0 , v0 , m1 , v1 = 0 . 0 , 0 . 0 , 0 . 0 , 0 . 0 <nl> var0_np = np . array ( [ 1 . 0 , 2 . 0 ] , dtype = dtype . as_numpy_dtype ) <nl> def testSparseDevicePlacement ( self ) : <nl> <nl> def testSparseRepeatedIndices ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> repeated_index_update_var = variables . Variable ( <nl> [ [ 1 . 0 ] , [ 2 . 0 ] ] , dtype = dtype ) <nl> aggregated_update_var = variables . Variable ( <nl> def doTestBasic ( self , use_resource = False ) : <nl> opt . get_slot ( var = var0 , name = " m " ) . name ) <nl> <nl> def testBasic ( self ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> self . doTestBasic ( use_resource = False ) <nl> <nl> @ test_util . run_in_graph_and_eager_modes ( reset_test = True ) <nl> def testResourceBasic ( self ) : <nl> <nl> def testTensorLearningRate ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> # Initialize variables for numpy implementation . <nl> m0 , v0 , m1 , v1 = 0 . 0 , 0 . 0 , 0 . 0 , 0 . 0 <nl> var0_np = np . array ( [ 1 . 0 , 2 . 0 ] , dtype = dtype . as_numpy_dtype ) <nl> def testTensorLearningRate ( self ) : <nl> <nl> def testSharing ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> # Initialize variables for numpy implementation . <nl> m0 , v0 , m1 , v1 = 0 . 0 , 0 . 0 , 0 . 0 , 0 . 0 <nl> var0_np = np . array ( [ 1 . 0 , 2 . 0 ] , dtype = dtype . as_numpy_dtype ) <nl> mmm a / tensorflow / contrib / optimizer_v2 / gradient_descent_test . py <nl> ppp b / tensorflow / contrib / optimizer_v2 / gradient_descent_test . py <nl> class GradientDescentOptimizerTest ( test . TestCase ) : <nl> <nl> def testBasic ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ 1 . 0 , 2 . 0 ] , dtype = dtype ) <nl> var1 = variables . Variable ( [ 3 . 0 , 4 . 0 ] , dtype = dtype ) <nl> grads0 = constant_op . constant ( [ 0 . 1 , 0 . 1 ] , dtype = dtype ) <nl> def testBasic ( self ) : <nl> <nl> def testBasicResourceVariable ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = resource_variable_ops . ResourceVariable ( [ 1 . 0 , 2 . 0 ] , dtype = dtype ) <nl> var1 = resource_variable_ops . ResourceVariable ( [ 3 . 0 , 4 . 0 ] , dtype = dtype ) <nl> grads0 = constant_op . constant ( [ 0 . 1 , 0 . 1 ] , dtype = dtype ) <nl> def testBasicResourceVariable ( self ) : <nl> <nl> def testMinimizeResourceVariable ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = resource_variable_ops . ResourceVariable ( [ [ 1 . 0 , 2 . 0 ] ] , dtype = dtype ) <nl> var1 = resource_variable_ops . ResourceVariable ( [ 3 . 0 ] , dtype = dtype ) <nl> x = constant_op . constant ( [ [ 4 . 0 ] , [ 5 . 0 ] ] , dtype = dtype ) <nl> def testMinimizeResourceVariable ( self ) : <nl> <nl> def testMinimizeSparseResourceVariable ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = resource_variable_ops . ResourceVariable ( [ [ 1 . 0 , 2 . 0 ] ] , dtype = dtype ) <nl> var1 = resource_variable_ops . ResourceVariable ( [ 3 . 0 ] , dtype = dtype ) <nl> x = constant_op . constant ( [ [ 4 . 0 ] , [ 5 . 0 ] ] , dtype = dtype ) <nl> def testMinimizeSparseResourceVariable ( self ) : <nl> <nl> def testTensorLearningRate ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ 1 . 0 , 2 . 0 ] , dtype = dtype ) <nl> var1 = variables . Variable ( [ 3 . 0 , 4 . 0 ] , dtype = dtype ) <nl> grads0 = constant_op . constant ( [ 0 . 1 , 0 . 1 ] , dtype = dtype ) <nl> def testTensorLearningRate ( self ) : <nl> <nl> def testGradWrtRef ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> opt = gradient_descent . GradientDescentOptimizer ( 3 . 0 ) <nl> values = [ 1 . 0 , 3 . 0 ] <nl> vars_ = [ variables . Variable ( [ v ] , dtype = dtype ) for v in values ] <nl> def testGradWrtRef ( self ) : <nl> <nl> def testWithGlobalStep ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> global_step = variables . Variable ( 0 , trainable = False ) <nl> var0 = variables . Variable ( [ 1 . 0 , 2 . 0 ] , dtype = dtype ) <nl> var1 = variables . Variable ( [ 3 . 0 , 4 . 0 ] , dtype = dtype ) <nl> def testWithGlobalStep ( self ) : <nl> <nl> def testSparseBasic ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ [ 1 . 0 ] , [ 2 . 0 ] ] , dtype = dtype ) <nl> var1 = variables . Variable ( [ [ 3 . 0 ] , [ 4 . 0 ] ] , dtype = dtype ) <nl> grads0 = ops . IndexedSlices ( <nl> mmm a / tensorflow / contrib / optimizer_v2 / momentum_test . py <nl> ppp b / tensorflow / contrib / optimizer_v2 / momentum_test . py <nl> def doTestBasic ( self , use_resource = False , use_callable_params = False ) : <nl> ] ) , self . evaluate ( var1 ) ) <nl> <nl> def testBasic ( self ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> self . doTestBasic ( use_resource = False ) <nl> <nl> @ test_util . run_in_graph_and_eager_modes ( reset_test = True ) <nl> def testVariablesAcrossGraphs ( self ) : <nl> <nl> def testNesterovMomentum ( self ) : <nl> for dtype in [ dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ 1 . 0 , 2 . 0 ] , dtype = dtype ) <nl> var1 = variables . Variable ( [ 3 . 0 , 4 . 0 ] , dtype = dtype ) <nl> var0_np = np . array ( [ 1 . 0 , 2 . 0 ] , dtype = dtype . as_numpy_dtype ) <nl> def testNesterovMomentum ( self ) : <nl> <nl> def testSparseNesterovMomentum ( self ) : <nl> for dtype in [ dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0_np = np . array ( [ 1 . 0 , 2 . 0 ] , dtype = dtype . as_numpy_dtype ) <nl> var1_np = np . array ( [ 3 . 0 , 4 . 0 ] , dtype = dtype . as_numpy_dtype ) <nl> accum0_np = np . array ( [ 0 . 0 , 0 . 0 ] , dtype = dtype . as_numpy_dtype ) <nl> def loss ( ) : <nl> <nl> def testTensorLearningRateAndMomentum ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ 1 . 0 , 2 . 0 ] , dtype = dtype ) <nl> var1 = variables . Variable ( [ 3 . 0 , 4 . 0 ] , dtype = dtype ) <nl> grads0 = constant_op . constant ( [ 0 . 1 , 0 . 1 ] , dtype = dtype ) <nl> def _dbParamsMom01 ( self ) : <nl> return db_grad , db_out <nl> <nl> def testLikeDistBeliefMom01 ( self ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> db_grad , db_out = self . _dbParamsMom01 ( ) <nl> num_samples = len ( db_grad ) <nl> var0 = variables . Variable ( [ 0 . 0 ] * num_samples ) <nl> def testLikeDistBeliefMom01 ( self ) : <nl> <nl> def testSparse ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( array_ops . zeros ( [ 4 , 2 ] , dtype = dtype ) ) <nl> var1 = variables . Variable ( constant_op . constant ( 1 . 0 , dtype , [ 4 , 2 ] ) ) <nl> grads0 = ops . IndexedSlices ( <nl> def testSparse ( self ) : <nl> <nl> def testSharing ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ 1 . 0 , 2 . 0 ] , dtype = dtype ) <nl> var1 = variables . Variable ( [ 3 . 0 , 4 . 0 ] , dtype = dtype ) <nl> grads0 = constant_op . constant ( [ 0 . 1 , 0 . 1 ] , dtype = dtype ) <nl> mmm a / tensorflow / contrib / optimizer_v2 / optimizer_v2_test . py <nl> ppp b / tensorflow / contrib / optimizer_v2 / optimizer_v2_test . py <nl> def loss ( ) : <nl> <nl> def testAggregationMethod ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ 1 . 0 , 2 . 0 ] , dtype = dtype ) <nl> var1 = variables . Variable ( [ 3 . 0 , 4 . 0 ] , dtype = dtype ) <nl> cost = 5 * var0 + 3 * var1 <nl> def testAggregationMethod ( self ) : <nl> <nl> def testPrecomputedGradient ( self ) : <nl> for dtype in [ dtypes . half , dtypes . float32 , dtypes . float64 ] : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ 1 . 0 , 2 . 0 ] , dtype = dtype ) <nl> var1 = variables . Variable ( [ 3 . 0 , 4 . 0 ] , dtype = dtype ) <nl> cost = 5 * var0 + 3 * var1 <nl> def f ( ) : <nl> sgd_op . apply_gradients ( grads_and_vars ) <nl> <nl> def testTrainOp ( self ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ 1 . 0 , 2 . 0 ] ) <nl> var1 = variables . Variable ( [ 3 . 0 , 4 . 0 ] ) <nl> cost = 5 * var0 + 3 * var1 <nl> def testTrainOp ( self ) : <nl> def testConstraint ( self ) : <nl> constraint_01 = lambda x : clip_ops . clip_by_value ( x , - 0 . 1 , 0 . ) <nl> constraint_0 = lambda x : clip_ops . clip_by_value ( x , 0 . , 1 . ) <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ 1 . 0 , 2 . 0 ] , <nl> constraint = constraint_01 ) <nl> var1 = variables . Variable ( [ 3 . 0 , 4 . 0 ] , <nl> def testConstraint ( self ) : <nl> self . assertAllClose ( [ 0 . , 0 . ] , var1 . eval ( ) ) <nl> <nl> def testStopGradients ( self ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = variables . Variable ( [ 1 . 0 , 2 . 0 ] , name = ' var0 ' ) <nl> var1 = variables . Variable ( [ 3 . 0 , 4 . 0 ] , name = ' var1 ' ) <nl> var0_id = array_ops . identity ( var0 ) <nl> mmm a / tensorflow / contrib / optimizer_v2 / rmsprop_test . py <nl> ppp b / tensorflow / contrib / optimizer_v2 / rmsprop_test . py <nl> def testDense ( self , dtype , param_value ) : <nl> <nl> @ parameterized . parameters ( [ dtypes . float32 , dtypes . float64 ] ) <nl> def testMinimizeSparseResourceVariable ( self , dtype ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = resource_variable_ops . ResourceVariable ( [ [ 1 . 0 , 2 . 0 ] ] , dtype = dtype ) <nl> x = constant_op . constant ( [ [ 4 . 0 ] , [ 5 . 0 ] ] , dtype = dtype ) <nl> pred = math_ops . matmul ( embedding_ops . embedding_lookup ( [ var0 ] , [ 0 ] ) , x ) <nl> def testMinimizeSparseResourceVariable ( self , dtype ) : <nl> <nl> @ parameterized . parameters ( [ dtypes . float32 , dtypes . float64 ] ) <nl> def testMinimizeSparseResourceVariableCentered ( self , dtype ) : <nl> - with self . test_session ( ) : <nl> + with self . cached_session ( ) : <nl> var0 = resource_variable_ops . ResourceVariable ( [ [ 1 . 0 , 2 . 0 ] ] , dtype = dtype ) <nl> x = constant_op . constant ( [ [ 4 . 0 ] , [ 5 . 0 ] ] , dtype = dtype ) <nl> pred = math_ops . matmul ( embedding_ops . embedding_lookup ( [ var0 ] , [ 0 ] ) , x ) <nl> | Move from deprecated self . test_session ( ) to self . cached_session ( ) . | tensorflow/tensorflow | ba9501e0a6c457a0bb051760bf9312d31c6211bf | 2018-08-22T03:03:09Z |
mmm a / src / share / event_queue . hpp <nl> ppp b / src / share / event_queue . hpp <nl> class event_queue final { <nl> original_event_ ( original_event ) { <nl> } <nl> <nl> + nlohmann : : json to_json ( void ) const { <nl> + return nlohmann : : json ( { <nl> + { " device_id " , static_cast < uint32_t > ( device_id_ ) } , <nl> + { " time_stamp " , time_stamp_ } , <nl> + { " valid " , valid_ } , <nl> + { " lazy " , lazy_ } , <nl> + { " event " , event_ } , <nl> + { " event_type " , event_type_ } , <nl> + { " original_event " , original_event_ } , <nl> + } ) ; <nl> + } <nl> + <nl> device_id get_device_id ( void ) const { <nl> return device_id_ ; <nl> } <nl> inline std : : ostream & operator < < ( std : : ostream & stream , const event_queue : : queued_ <nl> return stream ; <nl> } <nl> <nl> + inline void to_json ( nlohmann : : json & json , const event_queue : : queued_event : : event & value ) { <nl> + json = value . to_json ( ) ; <nl> + } <nl> + <nl> + inline void to_json ( nlohmann : : json & json , const event_queue : : queued_event & value ) { <nl> + json = value . to_json ( ) ; <nl> + } <nl> } / / namespace krbn <nl> mmm a / src / share / types . hpp <nl> ppp b / src / share / types . hpp <nl> inline std : : ostream & operator < < ( std : : ostream & stream , const container < input_sour <nl> return stream ; <nl> } <nl> <nl> + inline void to_json ( nlohmann : : json & json , const event_type & value ) { <nl> + switch ( value ) { <nl> + case event_type : : key_down : <nl> + json = " key_down " ; <nl> + break ; <nl> + case event_type : : key_up : <nl> + json = " key_up " ; <nl> + break ; <nl> + case event_type : : single : <nl> + json = " single " ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> inline void to_json ( nlohmann : : json & json , const device_identifiers & identifiers ) { <nl> json = identifiers . to_json ( ) ; <nl> } <nl> | add event_queue : : queued_event : : to_json | pqrs-org/Karabiner-Elements | 2f26f367f34a4602bcdf40003adf42babee08002 | 2017-10-29T06:20:13Z |
new file mode 100644 <nl> index 0000000000 . . 97777fa36e <nl> mmm / dev / null <nl> ppp b / change / react - native - windows - 2020 - 01 - 21 - 11 - 30 - 55 - stubs . json <nl> <nl> + { <nl> + " type " : " prerelease " , <nl> + " comment " : " Remove OSS_RN Specialization in unistd . h Stub " , <nl> + " packageName " : " react - native - windows " , <nl> + " email " : " nick @ nickgerleman . com " , <nl> + " commit " : " e77ad6640e5c76b31acc508fa8ed2a6c75640085 " , <nl> + " dependentChangeType " : " patch " , <nl> + " date " : " 2020 - 01 - 21T19 : 30 : 55 . 735Z " <nl> + } <nl> \ No newline at end of file <nl> mmm a / vnext / stubs / unistd . h <nl> ppp b / vnext / stubs / unistd . h <nl> @ @ - 1 , 6 + 1 @ @ <nl> / / react - native includes unistd . h , which isn ' t available in windows . <nl> - / / This shouldn ' t be needed after <nl> - / / https : / / github . com / facebook / react - native / pull / 25107 is merged <nl> - # if ! defined ( OSS_RN ) <nl> - # error This stub should not be used unless building against the non - microsoft version of react - native <nl> - # endif <nl> \ No newline at end of file <nl> | Remove OSS_RN Specialization in unistd . h Stub ( ) | microsoft/react-native-windows | 5c692f84abf4662cc7daa8394e618cd5afcc645d | 2020-01-22T03:11:51Z |
mmm a / utils / gyb . py <nl> ppp b / utils / gyb . py <nl> def expand ( filename , line_directive = _default_line_directive , * * local_bindings ) : <nl> > > > # manually handle closing and deleting this file to allow us to open <nl> > > > # the file by its name across all platforms . <nl> > > > f = NamedTemporaryFile ( delete = False ) <nl> - > > > f . write ( <nl> - . . . r ' ' ' mmm <nl> + > > > _ = f . write ( <nl> + . . . br ' ' ' mmm <nl> . . . % for i in range ( int ( x ) ) : <nl> . . . a pox on $ { i } for epoxy <nl> . . . % end <nl> | validation : fix nosetest in gyb for Python 3 | apple/swift | fedf81f5445da85374438fe453259d5be7a34066 | 2020-07-13T20:43:23Z |
mmm a / scene / 2d / light_2d . cpp <nl> ppp b / scene / 2d / light_2d . cpp <nl> void Light2D : : _bind_methods ( ) { <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : BOOL , " editor_only " ) , " set_editor_only " , " is_editor_only " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : OBJECT , " texture " , PROPERTY_HINT_RESOURCE_TYPE , " Texture " ) , " set_texture " , " get_texture " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : VECTOR2 , " offset " ) , " set_texture_offset " , " get_texture_offset " ) ; <nl> - ADD_PROPERTY ( PropertyInfo ( Variant : : REAL , " scale " , PROPERTY_HINT_RANGE , " 0 . 01 , 50 , 0 . 01 " ) , " set_texture_scale " , " get_texture_scale " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : REAL , " texture_scale " , PROPERTY_HINT_RANGE , " 0 . 01 , 50 , 0 . 01 " ) , " set_texture_scale " , " get_texture_scale " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : COLOR , " color " ) , " set_color " , " get_color " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : REAL , " energy " , PROPERTY_HINT_RANGE , " 0 . 01 , 100 , 0 . 01 " ) , " set_energy " , " get_energy " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : INT , " mode " , PROPERTY_HINT_ENUM , " Add , Sub , Mix , Mask " ) , " set_mode " , " get_mode " ) ; <nl> | Merge pull request from bojidar - bg / x - fix - scale - conflict | godotengine/godot | 7fc3c51169957f0907a13dd07d4004496e6ebd56 | 2017-08-14T20:16:39Z |
deleted file mode 100644 <nl> index 51769745b2c3 . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / . classpath <nl> ppp / dev / null <nl> <nl> - < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> - < classpath > <nl> - < classpathentry kind = " con " path = " com . android . ide . eclipse . adt . ANDROID_FRAMEWORK " / > <nl> - < classpathentry exported = " true " kind = " con " path = " com . android . ide . eclipse . adt . LIBRARIES " / > <nl> - < classpathentry exported = " true " kind = " con " path = " com . android . ide . eclipse . adt . DEPENDENCIES " / > <nl> - < classpathentry kind = " src " path = " src " / > <nl> - < classpathentry kind = " src " path = " gen " / > <nl> - < classpathentry kind = " output " path = " bin / classes " / > <nl> - < / classpath > <nl> deleted file mode 100644 <nl> index c49a03793999 . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / . project <nl> ppp / dev / null <nl> <nl> - < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> - < projectDescription > <nl> - < name > libControllerAutoAdapter < / name > <nl> - < comment > < / comment > <nl> - < projects > <nl> - < / projects > <nl> - < buildSpec > <nl> - < buildCommand > <nl> - < name > com . android . ide . eclipse . adt . ResourceManagerBuilder < / name > <nl> - < arguments > <nl> - < / arguments > <nl> - < / buildCommand > <nl> - < buildCommand > <nl> - < name > com . android . ide . eclipse . adt . PreCompilerBuilder < / name > <nl> - < arguments > <nl> - < / arguments > <nl> - < / buildCommand > <nl> - < buildCommand > <nl> - < name > org . eclipse . jdt . core . javabuilder < / name > <nl> - < arguments > <nl> - < / arguments > <nl> - < / buildCommand > <nl> - < buildCommand > <nl> - < name > com . android . ide . eclipse . adt . ApkBuilder < / name > <nl> - < arguments > <nl> - < / arguments > <nl> - < / buildCommand > <nl> - < / buildSpec > <nl> - < natures > <nl> - < nature > com . android . ide . eclipse . adt . AndroidNature < / nature > <nl> - < nature > org . eclipse . jdt . core . javanature < / nature > <nl> - < / natures > <nl> - < / projectDescription > <nl> deleted file mode 100644 <nl> index b080d2ddc88f . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / . settings / org . eclipse . jdt . core . prefs <nl> ppp / dev / null <nl> <nl> - eclipse . preferences . version = 1 <nl> - org . eclipse . jdt . core . compiler . codegen . targetPlatform = 1 . 6 <nl> - org . eclipse . jdt . core . compiler . compliance = 1 . 6 <nl> - org . eclipse . jdt . core . compiler . source = 1 . 6 <nl> deleted file mode 100644 <nl> index d1f4a837d81d . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / AndroidManifest . xml <nl> ppp / dev / null <nl> <nl> - < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> - < manifest xmlns : android = " http : / / schemas . android . com / apk / res / android " <nl> - package = " org . cocos2dx . lib " <nl> - android : versionCode = " 1 " <nl> - android : versionName = " 1 . 0 " > <nl> - <nl> - < uses - sdk android : minSdkVersion = " 10 " / > <nl> - <nl> - < / manifest > <nl> deleted file mode 100644 <nl> index b0971e891efd . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / ant . properties <nl> ppp / dev / null <nl> <nl> - # This file is used to override default values used by the Ant build system . <nl> - # <nl> - # This file must be checked into Version Control Systems , as it is <nl> - # integral to the build system of your project . <nl> - <nl> - # This file is only used by the Ant script . <nl> - <nl> - # You can use this to override default values such as <nl> - # ' source . dir ' for the location of your java source folder and <nl> - # ' out . dir ' for the location of your output folder . <nl> - <nl> - # You can also use it define how the release builds are signed by declaring <nl> - # the following properties : <nl> - # ' key . store ' for the location of your keystore and <nl> - # ' key . alias ' for the name of the key to use . <nl> - # The password will be asked during the build when you use the ' release ' target . <nl> - <nl> deleted file mode 100644 <nl> index 413effd90c74 . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / build . xml <nl> ppp / dev / null <nl> <nl> - < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> - < project name = " androidControllerAutoAdapter " default = " help " > <nl> - <nl> - < ! - - The local . properties file is created and updated by the ' android ' tool . <nl> - It contains the path to the SDK . It should * NOT * be checked into <nl> - Version Control Systems . - - > <nl> - < property file = " local . properties " / > <nl> - <nl> - < ! - - The ant . properties file can be created by you . It is only edited by the <nl> - ' android ' tool to add properties to it . <nl> - This is the place to change some Ant specific build properties . <nl> - Here are some properties you may want to change / update : <nl> - <nl> - source . dir <nl> - The name of the source directory . Default is ' src ' . <nl> - out . dir <nl> - The name of the output directory . Default is ' bin ' . <nl> - <nl> - For other overridable properties , look at the beginning of the rules <nl> - files in the SDK , at tools / ant / build . xml <nl> - <nl> - Properties related to the SDK location or the project target should <nl> - be updated using the ' android ' tool with the ' update ' action . <nl> - <nl> - This file is an integral part of the build system for your <nl> - application and should be checked into Version Control Systems . <nl> - <nl> - - - > <nl> - < property file = " ant . properties " / > <nl> - <nl> - < ! - - The project . properties file is created and updated by the ' android ' <nl> - tool , as well as ADT . <nl> - <nl> - This contains project specific properties such as project target , and library <nl> - dependencies . Lower level build properties are stored in ant . properties <nl> - ( or in . classpath for Eclipse projects ) . <nl> - <nl> - This file is an integral part of the build system for your <nl> - application and should be checked into Version Control Systems . - - > <nl> - < loadproperties srcFile = " project . properties " / > <nl> - <nl> - < ! - - quick check on sdk . dir - - > <nl> - < fail <nl> - message = " sdk . dir is missing . Make sure to generate local . properties using ' android update project ' or to inject it through an env var " <nl> - unless = " sdk . dir " <nl> - / > <nl> - <nl> - < ! - - <nl> - Import per project custom build rules if present at the root of the project . <nl> - This is the place to put custom intermediary targets such as : <nl> - - pre - build <nl> - - pre - compile <nl> - - post - compile ( This is typically used for code obfuscation . <nl> - Compiled code location : $ { out . classes . absolute . dir } <nl> - If this is not done in place , override $ { out . dex . input . absolute . dir } ) <nl> - - post - package <nl> - - post - build <nl> - - pre - clean <nl> - - - > <nl> - < import file = " custom_rules . xml " optional = " true " / > <nl> - <nl> - < ! - - Import the actual build file . <nl> - <nl> - To customize existing targets , there are two options : <nl> - - Customize only one target : <nl> - - copy / paste the target into this file , * before * the <nl> - < import > task . <nl> - - customize it to your needs . <nl> - - Customize the whole content of build . xml <nl> - - copy / paste the content of the rules files ( minus the top node ) <nl> - into this file , replacing the < import > task . <nl> - - customize to your needs . <nl> - <nl> - * * * * * * * * * * * * * * * * * * * * * * * <nl> - * * * * * * IMPORTANT * * * * * * <nl> - * * * * * * * * * * * * * * * * * * * * * * * <nl> - In all cases you must update the value of version - tag below to read ' custom ' instead of an integer , <nl> - in order to avoid having your file be overridden by tools such as " android update project " <nl> - - - > <nl> - < ! - - version - tag : 1 - - > <nl> - < import file = " $ { sdk . dir } / tools / ant / build . xml " / > <nl> - <nl> - < / project > <nl> deleted file mode 100644 <nl> index 75af7015db5f . . 000000000000 <nl> Binary files a / cocos / platform / android / ControllerAutoAdapter / libs / android - async - http - 1 . 4 . 4 . jar and / dev / null differ <nl> deleted file mode 100644 <nl> index 55e8455701a4 . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / lint . xml <nl> ppp / dev / null <nl> <nl> - < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> - < lint > <nl> - < issue id = " NewApi " > <nl> - < ignore path = " src / org / cocos2dx / lib / GameControllerHelper . java " / > <nl> - < / issue > <nl> - < / lint > <nl> \ No newline at end of file <nl> deleted file mode 100644 <nl> index f2fe1559a217 . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / proguard - project . txt <nl> ppp / dev / null <nl> <nl> - # To enable ProGuard in your project , edit project . properties <nl> - # to define the proguard . config property as described in that file . <nl> - # <nl> - # Add project specific ProGuard rules here . <nl> - # By default , the flags in this file are appended to flags specified <nl> - # in $ { sdk . dir } / tools / proguard / proguard - android . txt <nl> - # You can edit the include path and order by changing the ProGuard <nl> - # include property in project . properties . <nl> - # <nl> - # For more details , see <nl> - # http : / / developer . android . com / guide / developing / tools / proguard . html <nl> - <nl> - # Add any project specific keep options here : <nl> - <nl> - # If your project uses WebView with JS , uncomment the following <nl> - # and specify the fully qualified class name to the JavaScript interface <nl> - # class : <nl> - # - keepclassmembers class fqcn . of . javascript . interface . for . webview { <nl> - # public * ; <nl> - # } <nl> deleted file mode 100644 <nl> index 9990d6c11445 . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / project . properties <nl> ppp / dev / null <nl> <nl> - # This file is automatically generated by Android Tools . <nl> - # Do not modify this file - - YOUR CHANGES WILL BE ERASED ! <nl> - # <nl> - # This file must be checked in Version Control Systems . <nl> - # <nl> - # To customize properties used by the Ant build system edit <nl> - # " ant . properties " , and override values to adapt the script to your <nl> - # project structure . <nl> - # <nl> - # To enable ProGuard to shrink and obfuscate your code , uncomment this ( available properties : sdk . dir , user . home ) : <nl> - # proguard . config = $ { sdk . dir } / tools / proguard / proguard - android . txt : proguard - project . txt <nl> - <nl> - android . library = true <nl> - # Project target . <nl> - target = android - 16 <nl> - android . library . reference . 1 = . . / java <nl> deleted file mode 100644 <nl> index e69de29bb2d1 . . 000000000000 <nl> deleted file mode 100644 <nl> index c38c34665c62 . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / src / org / cocos2dx / lib / GameControllerActivity . java <nl> ppp / dev / null <nl> <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> - Copyright ( c ) 2010 - 2013 cocos2d - x . org <nl> - <nl> - http : / / www . cocos2d - x . org <nl> - <nl> - Permission is hereby granted , free of charge , to any person obtaining a copy <nl> - of this software and associated documentation files ( the " Software " ) , to deal <nl> - in the Software without restriction , including without limitation the rights <nl> - to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> - copies of the Software , and to permit persons to whom the Software is <nl> - furnished to do so , subject to the following conditions : <nl> - <nl> - The above copyright notice and this permission notice shall be included in <nl> - all copies or substantial portions of the Software . <nl> - <nl> - THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> - IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> - LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN <nl> - THE SOFTWARE . <nl> - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - package org . cocos2dx . lib ; <nl> - <nl> - import org . cocos2dx . lib . GameControllerDelegate . ControllerEventListener ; <nl> - import org . cocos2dx . lib . inputmanagercompat . InputManagerCompat ; <nl> - import org . cocos2dx . lib . inputmanagercompat . InputManagerCompat . InputDeviceListener ; <nl> - import org . cocos2dx . lib . Cocos2dxActivity ; <nl> - <nl> - import android . os . Bundle ; <nl> - import android . view . InputDevice ; <nl> - import android . view . KeyEvent ; <nl> - import android . view . MotionEvent ; <nl> - import android . util . Log ; <nl> - <nl> - public abstract class GameControllerActivity extends Cocos2dxActivity implements InputDeviceListener { <nl> - / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - / / Constants <nl> - / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - <nl> - private final static String TAG = GameControllerActivity . class . getSimpleName ( ) ; <nl> - <nl> - public static final int DRIVERTYPE_NIBIRU = 0 ; <nl> - public static final int DRIVERTYPE_MOGA = 1 ; <nl> - public static final int DRIVERTYPE_OUYA = 2 ; <nl> - public static final int DRIVERTYPE_STANDARD = 3 ; <nl> - public static final int DRIVERTYPE_UNKNOWN = 4 ; <nl> - <nl> - / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - / / Fields <nl> - / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - private static GameControllerActivity sGameControllerActivity ; <nl> - private InputManagerCompat mInputManager = null ; <nl> - <nl> - protected GameControllerHelper mControllerHelper = null ; <nl> - <nl> - protected GameControllerDelegate mControllerNibiru = null ; <nl> - protected GameControllerDelegate mControllerMoga = null ; <nl> - protected GameControllerDelegate mControllerOuya = null ; <nl> - <nl> - public void connectController ( ) { <nl> - mControllerHelper . connectController ( ) ; <nl> - } <nl> - <nl> - public void setGameControllerInstance ( GameControllerDelegate controllerDelegate , int driveType ) { <nl> - if ( driveType = = DRIVERTYPE_NIBIRU ) { <nl> - mControllerNibiru = controllerDelegate ; <nl> - } else if ( driveType = = DRIVERTYPE_MOGA ) { <nl> - mControllerMoga = controllerDelegate ; <nl> - } <nl> - else if ( driveType = = DRIVERTYPE_OUYA ) { <nl> - mControllerOuya = controllerDelegate ; <nl> - } <nl> - controllerDelegate . setControllerEventListener ( mControllerEventListener ) ; <nl> - controllerDelegate . onCreate ( sGameControllerActivity ) ; <nl> - } <nl> - <nl> - public GameControllerDelegate getGameControllerDelegate ( int driveType ) { <nl> - if ( driveType = = DRIVERTYPE_NIBIRU ) { <nl> - return mControllerNibiru ; <nl> - } else if ( driveType = = DRIVERTYPE_MOGA ) { <nl> - return mControllerMoga ; <nl> - } <nl> - else if ( driveType = = DRIVERTYPE_OUYA ) { <nl> - return mControllerOuya ; <nl> - } <nl> - <nl> - return null ; <nl> - } <nl> - <nl> - ControllerEventListener mControllerEventListener = new ControllerEventListener ( ) { <nl> - <nl> - @ Override <nl> - public void onButtonEvent ( String vendorName , int controller , int button , <nl> - boolean isPressed , float value , boolean isAnalog ) { <nl> - GameControllerAdapter . onButtonEvent ( vendorName , controller , button , isPressed , value , isAnalog ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public void onAxisEvent ( String vendorName , int controller , int axisID , <nl> - float value , boolean isAnalog ) { <nl> - GameControllerAdapter . onAxisEvent ( vendorName , controller , axisID , value , isAnalog ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public void onConnected ( String vendorName , int controller ) { <nl> - GameControllerAdapter . onConnected ( vendorName , controller ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public void onDisconnected ( String vendorName , int controller ) { <nl> - GameControllerAdapter . onDisconnected ( vendorName , controller ) ; <nl> - } <nl> - } ; <nl> - <nl> - / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - / / Constructors <nl> - / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - <nl> - @ Override <nl> - protected void onCreate ( final Bundle savedInstanceState ) { <nl> - super . onCreate ( savedInstanceState ) ; <nl> - <nl> - sGameControllerActivity = this ; <nl> - mInputManager = InputManagerCompat . Factory . getInputManager ( this ) ; <nl> - mInputManager . registerInputDeviceListener ( this , null ) ; <nl> - <nl> - if ( mControllerNibiru ! = null ) { <nl> - mControllerNibiru . onCreate ( this ) ; <nl> - } <nl> - if ( mControllerMoga ! = null ) { <nl> - mControllerMoga . onCreate ( this ) ; <nl> - } <nl> - if ( mControllerOuya ! = null ) { <nl> - mControllerOuya . onCreate ( this ) ; <nl> - } <nl> - if ( mControllerHelper = = null ) { <nl> - mControllerHelper = new GameControllerHelper ( this ) ; <nl> - } <nl> - } <nl> - <nl> - / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - / / Getter & Setter <nl> - / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - <nl> - / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - / / Methods for / from SuperClass / Interfaces <nl> - / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - <nl> - @ Override <nl> - public boolean dispatchKeyEvent ( KeyEvent event ) { <nl> - boolean handled = false ; <nl> - if ( mControllerNibiru ! = null ) { <nl> - handled | = mControllerNibiru . dispatchKeyEvent ( event ) ; <nl> - } <nl> - if ( mControllerMoga ! = null ) { <nl> - handled | = mControllerMoga . dispatchKeyEvent ( event ) ; <nl> - } <nl> - if ( mControllerOuya ! = null ) { <nl> - handled | = mControllerOuya . dispatchKeyEvent ( event ) ; <nl> - } <nl> - <nl> - handled | = mControllerHelper . dispatchKeyEvent ( event ) ; <nl> - Log . d ( TAG , " dispatchKeyEvent : " + handled ) ; <nl> - <nl> - return handled | | super . dispatchKeyEvent ( event ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public boolean dispatchGenericMotionEvent ( MotionEvent event ) { <nl> - boolean handled = false ; <nl> - if ( mControllerNibiru ! = null ) { <nl> - handled | = mControllerNibiru . dispatchGenericMotionEvent ( event ) ; <nl> - } <nl> - if ( mControllerMoga ! = null ) { <nl> - handled | = mControllerMoga . dispatchGenericMotionEvent ( event ) ; <nl> - } <nl> - if ( mControllerOuya ! = null ) { <nl> - handled | = mControllerOuya . dispatchGenericMotionEvent ( event ) ; <nl> - } <nl> - <nl> - handled | = mControllerHelper . dispatchGenericMotionEvent ( event ) ; <nl> - <nl> - return handled | | super . dispatchGenericMotionEvent ( event ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public void onInputDeviceAdded ( int deviceId ) { <nl> - <nl> - Log . d ( TAG , " onInputDeviceAdded : " + deviceId ) ; <nl> - <nl> - InputDevice device = InputDevice . getDevice ( deviceId ) ; <nl> - int deviceSource = device . getSources ( ) ; <nl> - <nl> - if ( ( ( deviceSource & InputDevice . SOURCE_GAMEPAD ) = = InputDevice . SOURCE_GAMEPAD ) <nl> - | | ( ( deviceSource & InputDevice . SOURCE_JOYSTICK ) = = InputDevice . SOURCE_JOYSTICK ) ) <nl> - { <nl> - GameControllerAdapter . onConnected ( " Standard " , deviceId ) ; <nl> - } <nl> - } <nl> - / * <nl> - * This is an unusual case . Input devices don ' t typically change , but they <nl> - * certainly can mmm for example a device may have different modes . We use <nl> - * this to make sure that the ship has an up - to - date InputDevice . <nl> - * <nl> - * @ see <nl> - * com . example . inputmanagercompat . InputManagerCompat . InputDeviceListener <nl> - * # onInputDeviceChanged ( int ) <nl> - * / <nl> - @ Override <nl> - public void onInputDeviceChanged ( int deviceId ) { <nl> - Log . d ( TAG , " onInputDeviceChanged : " + deviceId ) ; <nl> - } <nl> - <nl> - / * <nl> - * Remove any ship associated with the ID . <nl> - * <nl> - * @ see <nl> - * com . example . inputmanagercompat . InputManagerCompat . InputDeviceListener <nl> - * # onInputDeviceRemoved ( int ) <nl> - * / <nl> - @ Override <nl> - public void onInputDeviceRemoved ( int deviceId ) { <nl> - Log . d ( TAG , " onInputDeviceRemoved : " + deviceId ) ; <nl> - <nl> - InputDevice device = InputDevice . getDevice ( deviceId ) ; <nl> - int deviceSource = device . getSources ( ) ; <nl> - <nl> - if ( ( ( deviceSource & InputDevice . SOURCE_GAMEPAD ) = = InputDevice . SOURCE_GAMEPAD ) <nl> - | | ( ( deviceSource & InputDevice . SOURCE_JOYSTICK ) = = InputDevice . SOURCE_JOYSTICK ) ) <nl> - { <nl> - GameControllerAdapter . onDisconnected ( " Standard " , deviceId ) ; <nl> - } <nl> - } <nl> - <nl> - @ Override <nl> - protected void onResume ( ) { <nl> - super . onResume ( ) ; <nl> - <nl> - if ( mControllerNibiru ! = null ) { <nl> - mControllerNibiru . onResume ( ) ; <nl> - } <nl> - if ( mControllerMoga ! = null ) { <nl> - mControllerMoga . onResume ( ) ; <nl> - } <nl> - if ( mControllerOuya ! = null ) { <nl> - mControllerOuya . onResume ( ) ; <nl> - } <nl> - } <nl> - <nl> - @ Override <nl> - protected void onPause ( ) { <nl> - if ( mControllerNibiru ! = null ) { <nl> - mControllerNibiru . onPause ( ) ; <nl> - } <nl> - if ( mControllerMoga ! = null ) { <nl> - mControllerMoga . onPause ( ) ; <nl> - } <nl> - if ( mControllerOuya ! = null ) { <nl> - mControllerOuya . onPause ( ) ; <nl> - } <nl> - <nl> - super . onPause ( ) ; <nl> - } <nl> - <nl> - @ Override <nl> - protected void onDestroy ( ) { <nl> - if ( mControllerNibiru ! = null ) { <nl> - mControllerNibiru . onDestroy ( ) ; <nl> - } <nl> - if ( mControllerMoga ! = null ) { <nl> - mControllerMoga . onDestroy ( ) ; <nl> - } <nl> - if ( mControllerOuya ! = null ) { <nl> - mControllerOuya . onDestroy ( ) ; <nl> - } <nl> - <nl> - mControllerHelper . destrory ( ) ; <nl> - <nl> - super . onDestroy ( ) ; <nl> - } <nl> - <nl> - } <nl> \ No newline at end of file <nl> deleted file mode 100644 <nl> index 52480b752487 . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / src / org / cocos2dx / lib / GameControllerHelper . java <nl> ppp / dev / null <nl> <nl> - package org . cocos2dx . lib ; <nl> - <nl> - import java . io . File ; <nl> - import java . io . FileOutputStream ; <nl> - import java . lang . reflect . Method ; <nl> - import java . util . ArrayList ; <nl> - import java . util . Iterator ; <nl> - import java . util . List ; <nl> - import java . util . Set ; <nl> - <nl> - import org . json . JSONArray ; <nl> - import org . json . JSONObject ; <nl> - import org . apache . http . Header ; <nl> - <nl> - import com . loopj . android . http . AsyncHttpClient ; <nl> - import com . loopj . android . http . FileAsyncHttpResponseHandler ; <nl> - import com . loopj . android . http . JsonHttpResponseHandler ; <nl> - <nl> - import dalvik . system . DexClassLoader ; <nl> - import android . bluetooth . BluetoothAdapter ; <nl> - import android . bluetooth . BluetoothDevice ; <nl> - import android . content . BroadcastReceiver ; <nl> - import android . content . Context ; <nl> - import android . content . Intent ; <nl> - import android . content . IntentFilter ; <nl> - import android . content . pm . ApplicationInfo ; <nl> - import android . content . pm . PackageManager ; <nl> - import android . content . pm . PackageManager . NameNotFoundException ; <nl> - import android . net . Uri ; <nl> - import android . os . Environment ; <nl> - import android . util . Log ; <nl> - import android . util . SparseIntArray ; <nl> - import android . view . InputDevice ; <nl> - import android . view . KeyEvent ; <nl> - import android . view . MotionEvent ; <nl> - import android . widget . Toast ; <nl> - <nl> - public class GameControllerHelper { <nl> - <nl> - private final static String TAG = GameControllerHelper . class . getSimpleName ( ) ; <nl> - <nl> - public static final String StandardControllerName = " Standard " ; <nl> - public static final String [ ] DRIVERS_NAME = { " nibiru " , " moga " , " ouya " , StandardControllerName } ; <nl> - <nl> - public static final String SPP_UUID = " 00001101 - 0000 - 1000 - 8000 - 00805F9B34FB " ; <nl> - <nl> - SparseIntArray ControllerKeyMap ; <nl> - <nl> - private static final String COCOS_CONTROLLER_URL = " http : / / 115 . 28 . 134 . 83 : 9000 / download / cocoscontroller / " ; <nl> - private static final String COCOS_CONTROLLER_CONFIG = " controller . json " ; <nl> - <nl> - private static final String NIBIRU_DEP_PACKAGE = " com . nibiru " ; <nl> - private static final String MOGA__DEP_PACKAGE = " com . bda . pivot . mogapgp " ; <nl> - <nl> - private static GameControllerActivity sGameControllerActivity ; <nl> - private static GameControllerHelper sControllerHelper ; <nl> - <nl> - private String mLibAdapterFileName ; <nl> - private int mLibAdapterFileSize ; <nl> - <nl> - private List < String > mNibiruSupportedDrives ; <nl> - private String mNibiruDepFileName ; <nl> - private int mNibiruDepFileSize ; <nl> - <nl> - private List < String > mMogaSupportedDrives ; <nl> - private String mMogaDepFileName ; <nl> - private int mMogaDepFileSize ; <nl> - <nl> - private List < String > mOuyaSupportedDrives ; <nl> - <nl> - private AsyncHttpClient mDownDepsHttpClient = null ; <nl> - <nl> - private BluetoothAdapter mBluetoothAdapter = null ; <nl> - private ArrayList < BluetoothDevice > mBluetoothDevices = null ; <nl> - private SparseIntArray mDevicesDriver ; <nl> - private int mClearDevices = 0 ; <nl> - <nl> - private String mConfigFilePath ; <nl> - private String mLocalSavePath = null ; <nl> - <nl> - private boolean mLazyInit = true ; <nl> - private boolean mLazyConfigInit = true ; <nl> - private static ControllerListener mControllerListener = null ; <nl> - <nl> - public static interface ControllerListener { <nl> - void onDownloadConfigStarted ( ) ; <nl> - void onDownloadConfigFinished ( boolean isSuccess ) ; <nl> - <nl> - void onControllerDiscoveryStarted ( ) ; <nl> - / / <nl> - void onControllerDiscoveryFinish ( ArrayList < BluetoothDevice > devices ) ; <nl> - <nl> - void onDownloadDepsStarted ( ) ; <nl> - void onDownloadDepsProgress ( int bytesWritten , int totalSize ) ; <nl> - void onDownloadDepsFinished ( boolean isSuccess ) ; <nl> - <nl> - void onInstallDriver ( String filePath ) ; <nl> - void onConnectController ( ) ; <nl> - } <nl> - <nl> - public void setControllerListener ( ControllerListener listener ) { <nl> - mControllerListener = listener ; <nl> - } <nl> - <nl> - private static final int AXIS_X = 0 ; <nl> - private static final int AXIS_Y = 1 ; <nl> - private static final int AXIS_Z = 11 ; <nl> - private static final int AXIS_RZ = 14 ; <nl> - public static final int AXIS_LTRIGGER = 17 ; <nl> - public static final int AXIS_RTRIGGER = 18 ; <nl> - public static final int AXIS_BRAKE = 23 ; <nl> - public static final int AXIS_THROTTLE = 19 ; <nl> - <nl> - public GameControllerHelper ( GameControllerActivity activity ) { <nl> - sGameControllerActivity = activity ; <nl> - sControllerHelper = this ; <nl> - <nl> - ControllerKeyMap = new SparseIntArray ( 25 ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_A , GameControllerDelegate . BUTTON_A ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_B , GameControllerDelegate . BUTTON_B ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_C , GameControllerDelegate . BUTTON_C ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_X , GameControllerDelegate . BUTTON_X ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_Y , GameControllerDelegate . BUTTON_Y ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_Z , GameControllerDelegate . BUTTON_Z ) ; <nl> - <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_DPAD_UP , GameControllerDelegate . BUTTON_DPAD_UP ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_DPAD_DOWN , GameControllerDelegate . BUTTON_DPAD_DOWN ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_DPAD_LEFT , GameControllerDelegate . BUTTON_DPAD_LEFT ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_DPAD_RIGHT , GameControllerDelegate . BUTTON_DPAD_RIGHT ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_DPAD_CENTER , GameControllerDelegate . BUTTON_DPAD_CENTER ) ; <nl> - <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_THUMBL , GameControllerDelegate . BUTTON_LEFT_THUMBSTICK ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_THUMBR , GameControllerDelegate . BUTTON_RIGHT_THUMBSTICK ) ; <nl> - <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_L1 , GameControllerDelegate . BUTTON_LEFT_SHOULDER ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_R1 , GameControllerDelegate . BUTTON_RIGHT_SHOULDER ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_L2 , GameControllerDelegate . BUTTON_LEFT_TRIGGER ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_R2 , GameControllerDelegate . BUTTON_RIGHT_TRIGGER ) ; <nl> - <nl> - ControllerKeyMap . put ( AXIS_X , GameControllerDelegate . THUMBSTICK_LEFT_X ) ; <nl> - ControllerKeyMap . put ( AXIS_Y , GameControllerDelegate . THUMBSTICK_LEFT_Y ) ; <nl> - ControllerKeyMap . put ( AXIS_Z , GameControllerDelegate . THUMBSTICK_RIGHT_X ) ; <nl> - ControllerKeyMap . put ( AXIS_RZ , GameControllerDelegate . THUMBSTICK_RIGHT_Y ) ; <nl> - <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_START , GameControllerDelegate . BUTTON_START ) ; <nl> - ControllerKeyMap . put ( KeyEvent . KEYCODE_BUTTON_SELECT , GameControllerDelegate . BUTTON_SELECT ) ; <nl> - / / KEYCODE_BUTTON_MODE <nl> - mDownDepsHttpClient = new AsyncHttpClient ( ) ; <nl> - mDownDepsHttpClient . setTimeout ( 360 * 1000 ) ; <nl> - } <nl> - <nl> - public void connectController ( ) { <nl> - if ( mLazyInit ) { <nl> - mLazyInit = false ; <nl> - mNibiruSupportedDrives = new ArrayList < String > ( 30 ) ; <nl> - mMogaSupportedDrives = new ArrayList < String > ( 5 ) ; <nl> - mOuyaSupportedDrives = new ArrayList < String > ( 5 ) ; <nl> - <nl> - mLocalSavePath = Environment . getExternalStorageDirectory ( ) + File . separator + " CocosGameController " + File . separator ; <nl> - Log . i ( TAG , " mLocalSavePath : " + mLocalSavePath ) ; <nl> - mConfigFilePath = sGameControllerActivity . getFilesDir ( ) . getAbsolutePath ( ) + File . separator + COCOS_CONTROLLER_CONFIG ; <nl> - Log . i ( TAG , " mConfigFilePath : " + mConfigFilePath ) ; <nl> - <nl> - } <nl> - <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onDownloadConfigStarted ( ) ; <nl> - } <nl> - if ( mLazyConfigInit ) { <nl> - / / if ( mDownDepsHttpClient ! = null ) { <nl> - / / mDownDepsHttpClient . cancelRequests ( sGameControllerActivity , true ) ; <nl> - / / } <nl> - requestControllerConfig ( ) ; <nl> - } <nl> - else { <nl> - scanBluetoothDrive ( ) ; <nl> - } <nl> - } <nl> - <nl> - public Set < BluetoothDevice > getBondedDevices ( ) { <nl> - if ( mBluetoothAdapter = = null ) { <nl> - mBluetoothAdapter = BluetoothAdapter . getDefaultAdapter ( ) ; <nl> - if ( mBluetoothAdapter = = null ) { <nl> - return null ; <nl> - } <nl> - } <nl> - <nl> - return mBluetoothAdapter . getBondedDevices ( ) ; <nl> - } <nl> - <nl> - public void destrory ( ) { <nl> - if ( mDownDepsHttpClient ! = null ) { <nl> - mDownDepsHttpClient . cancelRequests ( sGameControllerActivity , true ) ; <nl> - } <nl> - } <nl> - <nl> - private boolean scanBluetoothDrive ( ) { <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onDownloadConfigFinished ( true ) ; <nl> - } <nl> - <nl> - if ( mBluetoothAdapter = = null ) { <nl> - mBluetoothAdapter = BluetoothAdapter . getDefaultAdapter ( ) ; <nl> - if ( mBluetoothAdapter = = null ) { <nl> - return false ; <nl> - } <nl> - <nl> - if ( mBluetoothDevices = = null ) { <nl> - mBluetoothDevices = new ArrayList < BluetoothDevice > ( 5 ) ; <nl> - mDevicesDriver = new SparseIntArray ( ) ; <nl> - } <nl> - <nl> - IntentFilter filter = new IntentFilter ( BluetoothDevice . ACTION_FOUND ) ; <nl> - / / filter . addAction ( BluetoothDevice . ACTION_BOND_STATE_CHANGED ) ; <nl> - / / filter . addAction ( BluetoothAdapter . ACTION_SCAN_MODE_CHANGED ) ; <nl> - / / filter . addAction ( BluetoothAdapter . ACTION_STATE_CHANGED ) ; <nl> - filter . addAction ( BluetoothAdapter . ACTION_DISCOVERY_FINISHED ) ; <nl> - filter . addAction ( BluetoothAdapter . ACTION_DISCOVERY_STARTED ) ; <nl> - sGameControllerActivity . registerReceiver ( mBluetoothReceiver , filter ) ; <nl> - <nl> - IntentFilter appFilter = new IntentFilter ( ) ; <nl> - appFilter . addAction ( " android . intent . action . PACKAGE_ADDED " ) ; <nl> - appFilter . addDataScheme ( " package " ) ; <nl> - sGameControllerActivity . registerReceiver ( mAppReceiver , appFilter ) ; <nl> - } <nl> - <nl> - if ( ! mBluetoothAdapter . isEnabled ( ) ) { <nl> - mBluetoothAdapter . enable ( ) ; <nl> - } <nl> - <nl> - if ( mBluetoothAdapter . isDiscovering ( ) ) { <nl> - mBluetoothAdapter . cancelDiscovery ( ) ; <nl> - } <nl> - <nl> - mBluetoothAdapter . startDiscovery ( ) ; <nl> - <nl> - return true ; <nl> - } <nl> - <nl> - public int checkDriverType ( BluetoothDevice device ) { <nl> - String deviceName = device . getName ( ) ; <nl> - <nl> - if ( mNibiruSupportedDrives . contains ( deviceName ) ) { <nl> - return GameControllerActivity . DRIVERTYPE_NIBIRU ; <nl> - } <nl> - else if ( mMogaSupportedDrives . contains ( deviceName ) ) { <nl> - return GameControllerActivity . DRIVERTYPE_MOGA ; <nl> - } <nl> - else if ( mOuyaSupportedDrives . contains ( deviceName ) ) { <nl> - return GameControllerActivity . DRIVERTYPE_OUYA ; <nl> - } <nl> - else { <nl> - <nl> - } <nl> - <nl> - return GameControllerActivity . DRIVERTYPE_UNKNOWN ; <nl> - } <nl> - <nl> - public static void installApplication ( String filePath ) { <nl> - if ( sGameControllerActivity ! = null ) { <nl> - Intent intent = new Intent ( ) ; <nl> - intent . setAction ( Intent . ACTION_VIEW ) ; <nl> - intent . setDataAndType ( Uri . parse ( " file : / / " + filePath ) , " application / vnd . android . package - archive " ) ; <nl> - intent . setFlags ( Intent . FLAG_ACTIVITY_NEW_TASK ) ; <nl> - sGameControllerActivity . startActivity ( intent ) ; <nl> - } <nl> - } <nl> - <nl> - public static boolean checkApplication ( String packName ) { <nl> - try { <nl> - ApplicationInfo applicationInfo = sGameControllerActivity . getPackageManager ( ) . getApplicationInfo ( packName , PackageManager . GET_UNINSTALLED_PACKAGES ) ; <nl> - Log . d ( TAG , applicationInfo . toString ( ) ) ; <nl> - return true ; <nl> - } catch ( NameNotFoundException e ) { <nl> - return false ; <nl> - } <nl> - } <nl> - <nl> - private BroadcastReceiver mBluetoothReceiver = new BroadcastReceiver ( ) { <nl> - <nl> - @ Override <nl> - public void onReceive ( Context context , Intent intent ) { <nl> - String action = intent . getAction ( ) ; <nl> - <nl> - if ( BluetoothDevice . ACTION_FOUND . equals ( action ) ) { <nl> - BluetoothDevice device = intent . getParcelableExtra ( BluetoothDevice . EXTRA_DEVICE ) ; <nl> - if ( mBluetoothDevices . contains ( device ) ) { <nl> - return ; <nl> - } <nl> - Log . d ( TAG , " Remote device discovered : " + device . getName ( ) ) ; <nl> - / / We can ' t ensure non - controller can be filtered out . Some game controller marked with computer class . <nl> - / * String deviceName = device . getName ( ) ; <nl> - <nl> - if ( device . getBluetoothClass ( ) . getMajorDeviceClass ( ) = = BluetoothClass . Device . Major . COMPUTER <nl> - | | device . getBluetoothClass ( ) . getMajorDeviceClass ( ) = = BluetoothClass . Device . Major . PHONE ) <nl> - { <nl> - Log . w ( TAG , " Remote device discovered : " + deviceName + " is computer or phone . " + device . getBluetoothClass ( ) . getMajorDeviceClass ( ) ) ; <nl> - return ; <nl> - } * / <nl> - <nl> - mBluetoothDevices . add ( device ) ; <nl> - int type = checkDriverType ( device ) ; <nl> - if ( type ! = GameControllerActivity . DRIVERTYPE_UNKNOWN ) { <nl> - mTargetDriverType = type ; <nl> - mClearDevices + = 1 ; <nl> - } <nl> - mDevicesDriver . append ( mBluetoothDevices . size ( ) - 1 , type ) ; <nl> - } <nl> - else if ( BluetoothAdapter . ACTION_DISCOVERY_FINISHED . equals ( action ) ) { <nl> - Log . d ( TAG , " The local Bluetooth adapter has finished the device discovery process . " ) ; <nl> - <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onControllerDiscoveryFinish ( mBluetoothDevices ) ; <nl> - } <nl> - else { <nl> - if ( mBluetoothDevices . size ( ) = = 0 ) { <nl> - if ( checkApplication ( NIBIRU_DEP_PACKAGE ) ) { <nl> - downControllerDeps ( GameControllerActivity . DRIVERTYPE_NIBIRU ) ; <nl> - } <nl> - Log . w ( TAG , " Not found any supported bluetooth game controller ! " ) ; <nl> - } else { <nl> - if ( mClearDevices = = 1 ) { <nl> - downControllerDeps ( mTargetDriverType ) ; <nl> - } <nl> - else { <nl> - Log . i ( TAG , " Not clear target ! " ) ; <nl> - if ( checkApplication ( NIBIRU_DEP_PACKAGE ) ) { <nl> - downControllerDeps ( GameControllerActivity . DRIVERTYPE_NIBIRU ) ; <nl> - } <nl> - / / todo : show sel <nl> - } <nl> - } <nl> - } <nl> - } <nl> - else if ( BluetoothAdapter . ACTION_DISCOVERY_STARTED . equals ( action ) ) { <nl> - Log . d ( TAG , " The local Bluetooth adapter has started the remote device discovery process . " ) ; <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onControllerDiscoveryStarted ( ) ; <nl> - } <nl> - <nl> - mBluetoothDevices . clear ( ) ; <nl> - mDevicesDriver . clear ( ) ; <nl> - mClearDevices = 0 ; <nl> - mTargetDriverType = GameControllerActivity . DRIVERTYPE_UNKNOWN ; <nl> - <nl> - / / check moga controller <nl> - Iterator < BluetoothDevice > it = mBluetoothAdapter . getBondedDevices ( ) . iterator ( ) ; <nl> - while ( it . hasNext ( ) ) { <nl> - BluetoothDevice device = it . next ( ) ; <nl> - / / if ( device . getBluetoothClass ( ) . getMajorDeviceClass ( ) ! = BluetoothClass . Device . Major . COMPUTER <nl> - / / & & device . getBluetoothClass ( ) . getMajorDeviceClass ( ) ! = BluetoothClass . Device . Major . PHONE ) <nl> - { <nl> - if ( ! mBluetoothDevices . contains ( device ) ) { <nl> - mBluetoothDevices . add ( device ) ; <nl> - int type = checkDriverType ( device ) ; <nl> - Log . d ( TAG , " BluetoothDevice objects that are bonded ( paired ) to the local adapter : " + device . getName ( ) ) ; <nl> - if ( type ! = GameControllerActivity . DRIVERTYPE_UNKNOWN ) { <nl> - mClearDevices + = 1 ; <nl> - mTargetDriverType = type ; <nl> - } <nl> - mDevicesDriver . append ( mBluetoothDevices . size ( ) - 1 , type ) ; <nl> - } <nl> - } <nl> - } <nl> - } <nl> - } <nl> - } ; <nl> - <nl> - private static int depsCount = 0 ; <nl> - private static int mTargetDriverType = GameControllerActivity . DRIVERTYPE_UNKNOWN ; <nl> - <nl> - private static int mDownloadTotalSize ; <nl> - private static int mDownloadLibSize ; <nl> - private static int mLibDownloadCompletedSize ; <nl> - private static int mDepDownloadCompletedSize ; <nl> - <nl> - public void downControllerDeps ( int driverType ) { <nl> - if ( driverType ! = GameControllerActivity . DRIVERTYPE_NIBIRU <nl> - & & driverType ! = GameControllerActivity . DRIVERTYPE_MOGA <nl> - & & driverType ! = GameControllerActivity . DRIVERTYPE_OUYA ) { <nl> - return ; <nl> - } <nl> - <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onDownloadDepsStarted ( ) ; <nl> - } <nl> - <nl> - mDownloadTotalSize = 0 ; <nl> - mLibDownloadCompletedSize = 0 ; <nl> - mDepDownloadCompletedSize = 0 ; <nl> - <nl> - mTargetDriverType = driverType ; <nl> - depsCount = 1 ; <nl> - <nl> - GameControllerUtils . ensureDirectoryExist ( mLocalSavePath ) ; <nl> - <nl> - String remoteDir = COCOS_CONTROLLER_URL + DRIVERS_NAME [ driverType ] + File . separator ; <nl> - <nl> - if ( driverType = = GameControllerActivity . DRIVERTYPE_NIBIRU ) { <nl> - if ( ! checkApplication ( NIBIRU_DEP_PACKAGE ) ) { <nl> - depsCount + = 1 ; <nl> - mDownloadTotalSize + = mNibiruDepFileSize ; <nl> - mDownDepsHttpClient . get ( remoteDir + mNibiruDepFileName , new MyDepsAsyncHandler ( <nl> - new File ( mLocalSavePath + mNibiruDepFileName ) , MyDepsAsyncHandler . FILETYPE_DEP_APK ) ) ; <nl> - } <nl> - } <nl> - else if ( driverType = = GameControllerActivity . DRIVERTYPE_MOGA ) { <nl> - if ( ! checkApplication ( MOGA__DEP_PACKAGE ) ) { <nl> - mDownloadTotalSize + = mMogaDepFileSize ; <nl> - depsCount + = 1 ; <nl> - mDownDepsHttpClient . get ( remoteDir + mMogaDepFileName , new MyDepsAsyncHandler ( <nl> - new File ( mLocalSavePath + mMogaDepFileName ) , MyDepsAsyncHandler . FILETYPE_DEP_APK ) ) ; <nl> - } <nl> - } <nl> - else if ( driverType = = GameControllerActivity . DRIVERTYPE_OUYA ) { <nl> - <nl> - } <nl> - <nl> - File libFile = new File ( mLocalSavePath + mLibAdapterFileName ) ; <nl> - if ( libFile . exists ( ) & & libFile . length ( ) = = mLibAdapterFileSize ) { <nl> - depsCount - = 1 ; <nl> - if ( depsCount = = 0 ) { <nl> - onDepsReady ( ) ; <nl> - } <nl> - } else { <nl> - mDownloadLibSize = mLibAdapterFileSize ; <nl> - mDownloadTotalSize + = mDownloadLibSize ; <nl> - mDownDepsHttpClient . get ( COCOS_CONTROLLER_URL + mLibAdapterFileName , new MyDepsAsyncHandler ( <nl> - new File ( mLocalSavePath + mLibAdapterFileName ) , MyDepsAsyncHandler . FILETYPE_JAR ) ) ; <nl> - } <nl> - } <nl> - <nl> - static class MyDepsAsyncHandler extends FileAsyncHttpResponseHandler { <nl> - <nl> - public static final int FILETYPE_DEP_APK = 0 ; <nl> - public static final int FILETYPE_JAR = 1 ; <nl> - <nl> - private int mFileType = FILETYPE_DEP_APK ; <nl> - <nl> - public MyDepsAsyncHandler ( File file , int fileType ) { <nl> - super ( file ) ; <nl> - mFileType = fileType ; <nl> - } <nl> - <nl> - @ Override <nl> - public void onFailure ( int statusCode , Header [ ] headers , <nl> - Throwable e , File file ) { <nl> - if ( mFileType = = FILETYPE_JAR ) { <nl> - if ( file . exists ( ) & & file . length ( ) = = mDownloadLibSize ) { <nl> - depsCount - = 1 ; <nl> - if ( depsCount = = 0 ) { <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onDownloadDepsFinished ( true ) ; <nl> - } <nl> - sControllerHelper . onDepsReady ( ) ; <nl> - } <nl> - } <nl> - } <nl> - else if ( mFileType = = FILETYPE_DEP_APK ) { <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onDownloadDepsFinished ( false ) ; <nl> - } <nl> - } <nl> - <nl> - Log . e ( TAG , " Failed to download : " + file . getName ( ) ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public void onProgress ( int bytesWritten , int totalSize ) { <nl> - if ( mFileType = = FILETYPE_JAR ) { <nl> - mLibDownloadCompletedSize = bytesWritten ; <nl> - } else { <nl> - mDepDownloadCompletedSize = bytesWritten ; <nl> - } <nl> - if ( mControllerListener ! = null ) { <nl> - <nl> - mControllerListener . onDownloadDepsProgress ( mLibDownloadCompletedSize + mDepDownloadCompletedSize , mDownloadTotalSize ) ; <nl> - } <nl> - Log . d ( TAG , " totalSize : " + totalSize + " , bytesWritten : " + bytesWritten ) ; <nl> - } <nl> - <nl> - <nl> - public void onSuccess ( File file ) { <nl> - Log . d ( TAG , " 11Down file success : " + file . getName ( ) ) ; <nl> - <nl> - depsCount - = 1 ; <nl> - if ( depsCount = = 0 ) { <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onDownloadDepsFinished ( true ) ; <nl> - } <nl> - sControllerHelper . onDepsReady ( ) ; <nl> - } <nl> - super . onSuccess ( file ) ; <nl> - } <nl> - <nl> - / * @ Override <nl> - public void onSuccess ( int arg0 , Header [ ] arg1 , File file ) { <nl> - Log . d ( TAG , " 22Down file success : " + file . getName ( ) ) ; <nl> - <nl> - depsCount - = 1 ; <nl> - if ( depsCount = = 0 ) { <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onDownloadDepsFinished ( true ) ; <nl> - } <nl> - sControllerHelper . onDepsReady ( ) ; <nl> - } <nl> - } * / <nl> - } <nl> - <nl> - private void onDepsReady ( ) { <nl> - Log . d ( TAG , " onDepsReady : " + mTargetDriverType ) ; <nl> - <nl> - if ( mTargetDriverType = = GameControllerActivity . DRIVERTYPE_NIBIRU ) { <nl> - if ( checkApplication ( NIBIRU_DEP_PACKAGE ) ) { <nl> - createControllerInstance ( mLocalSavePath + mLibAdapterFileName , mTargetDriverType ) ; <nl> - } <nl> - else { <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onInstallDriver ( mLocalSavePath + mMogaDepFileName ) ; <nl> - } <nl> - installApplication ( mLocalSavePath + mNibiruDepFileName ) ; <nl> - } <nl> - } <nl> - else if ( mTargetDriverType = = GameControllerActivity . DRIVERTYPE_MOGA ) { <nl> - if ( checkApplication ( MOGA__DEP_PACKAGE ) ) { <nl> - createControllerInstance ( mLocalSavePath + mLibAdapterFileName , mTargetDriverType ) ; <nl> - } <nl> - else { <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onInstallDriver ( mLocalSavePath + mMogaDepFileName ) ; <nl> - } <nl> - installApplication ( mLocalSavePath + mMogaDepFileName ) ; <nl> - } <nl> - } <nl> - else if ( mTargetDriverType = = GameControllerActivity . DRIVERTYPE_OUYA ) { <nl> - createControllerInstance ( mLocalSavePath + mLibAdapterFileName , mTargetDriverType ) ; <nl> - } <nl> - } <nl> - <nl> - private static final String CONFIGKEY_DRIVES = " drives " ; <nl> - private static final String CONFIGKEY_LIBADAPTER_FILENAME = " adapter - file " ; <nl> - private static final String CONFIGKEY_LIBADAPTER_FILESIZE = " adapter - filesize " ; <nl> - private static final String CONFIGKEY_DEP_FILENAME = " dep - apk " ; <nl> - private static final String CONFIGKEY_DEP_FILESIZE = " dep - size " ; <nl> - <nl> - private boolean parseConfig ( String jsonString ) { <nl> - mMogaSupportedDrives . clear ( ) ; <nl> - mNibiruSupportedDrives . clear ( ) ; <nl> - mOuyaSupportedDrives . clear ( ) ; <nl> - <nl> - try { <nl> - int tint = 9879 ; <nl> - JSONObject jsonObject = new JSONObject ( ) ; <nl> - jsonObject . put ( " intvalue " , tint ) ; <nl> - Log . w ( TAG , " intJson : " + jsonObject ) ; <nl> - <nl> - JSONObject configObject = new JSONObject ( jsonString ) ; <nl> - mLibAdapterFileName = configObject . getString ( CONFIGKEY_LIBADAPTER_FILENAME ) ; <nl> - mLibAdapterFileSize = configObject . getInt ( CONFIGKEY_LIBADAPTER_FILESIZE ) ; <nl> - <nl> - JSONObject nibiruObject = configObject . getJSONObject ( " nibiru " ) ; <nl> - JSONArray drives = nibiruObject . getJSONArray ( CONFIGKEY_DRIVES ) ; <nl> - int count = drives . length ( ) ; <nl> - for ( int i = 0 ; i < count ; i + + ) { <nl> - mNibiruSupportedDrives . add ( drives . getString ( i ) ) ; <nl> - } <nl> - mNibiruDepFileName = nibiruObject . getString ( CONFIGKEY_DEP_FILENAME ) ; <nl> - mNibiruDepFileSize = nibiruObject . getInt ( CONFIGKEY_DEP_FILESIZE ) ; <nl> - <nl> - JSONObject mogaObject = configObject . getJSONObject ( " moga " ) ; <nl> - drives = mogaObject . getJSONArray ( CONFIGKEY_DRIVES ) ; <nl> - count = drives . length ( ) ; <nl> - for ( int i = 0 ; i < count ; i + + ) { <nl> - mMogaSupportedDrives . add ( drives . getString ( i ) ) ; <nl> - } <nl> - mMogaDepFileName = mogaObject . getString ( CONFIGKEY_DEP_FILENAME ) ; <nl> - mMogaDepFileSize = mogaObject . getInt ( CONFIGKEY_DEP_FILESIZE ) ; <nl> - <nl> - JSONObject ouyaObject = configObject . getJSONObject ( " ouya " ) ; <nl> - drives = ouyaObject . getJSONArray ( CONFIGKEY_DRIVES ) ; <nl> - count = drives . length ( ) ; <nl> - for ( int i = 0 ; i < count ; i + + ) { <nl> - mOuyaSupportedDrives . add ( drives . getString ( i ) ) ; <nl> - } <nl> - <nl> - mLazyConfigInit = false ; <nl> - return true ; <nl> - } catch ( Exception e1 ) { <nl> - e1 . printStackTrace ( ) ; <nl> - return false ; <nl> - } <nl> - } <nl> - <nl> - private void requestControllerConfig ( ) { <nl> - <nl> - final JsonHttpResponseHandler configResponseHandler = new JsonHttpResponseHandler ( ) { <nl> - <nl> - @ Override <nl> - public void onFailure ( int statusCode , Header [ ] headers , <nl> - String responseBody , Throwable e ) { <nl> - <nl> - Log . e ( TAG , " Failed to download game controller config ! " ) ; <nl> - <nl> - String configJSON = GameControllerUtils . readJsonFile ( mConfigFilePath ) ; <nl> - if ( configJSON ! = null ) { <nl> - if ( parseConfig ( configJSON ) ) { <nl> - scanBluetoothDrive ( ) ; <nl> - return ; <nl> - } <nl> - } <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onDownloadConfigFinished ( false ) ; <nl> - } <nl> - / * new AlertDialog . Builder ( sCocos2dxActivity ) <nl> - . setTitle ( " Loading controller config failed ! " ) <nl> - . setMessage ( <nl> - " Please make sure internet connection works ok ! " ) <nl> - . setPositiveButton ( " Retry " , <nl> - new DialogInterface . OnClickListener ( ) { <nl> - public void onClick ( DialogInterface dialog , <nl> - int which ) { <nl> - dialog . dismiss ( ) ; <nl> - requestControllerConfig ( ) ; <nl> - } <nl> - } ) . setCancelable ( false ) . show ( ) ; * / <nl> - } <nl> - <nl> - @ Override <nl> - public void onSuccess ( int statusCode , Header [ ] headers , <nl> - String responseBody ) { <nl> - <nl> - String jsonString = responseBody . trim ( ) ; <nl> - <nl> - try { <nl> - if ( parseConfig ( jsonString ) ) { <nl> - scanBluetoothDrive ( ) ; <nl> - showToast ( " Get controller config succeed ! " ) ; <nl> - <nl> - File configFile = new File ( mConfigFilePath ) ; <nl> - FileOutputStream outputStream = new FileOutputStream ( configFile ) ; <nl> - byte [ ] contentString = jsonString . getBytes ( ) ; <nl> - outputStream . write ( contentString , 0 , contentString . length ) ; <nl> - outputStream . flush ( ) ; <nl> - outputStream . close ( ) ; <nl> - return ; <nl> - } <nl> - else { <nl> - String jsonStr = GameControllerUtils . readJsonFile ( mConfigFilePath ) ; <nl> - if ( jsonStr ! = null ) { <nl> - if ( parseConfig ( jsonStr ) ) { <nl> - scanBluetoothDrive ( ) ; <nl> - showToast ( " Get controller config succeed ! " ) ; <nl> - return ; <nl> - } <nl> - } <nl> - } <nl> - } catch ( Exception e1 ) { <nl> - e1 . printStackTrace ( ) ; <nl> - } <nl> - <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onDownloadConfigFinished ( false ) ; <nl> - } <nl> - } <nl> - } ; <nl> - <nl> - mDownDepsHttpClient . get ( COCOS_CONTROLLER_URL + COCOS_CONTROLLER_CONFIG , configResponseHandler ) ; <nl> - } <nl> - <nl> - private static void showToast ( String message ) { <nl> - Toast . makeText ( sGameControllerActivity , message , Toast . LENGTH_SHORT ) . show ( ) ; <nl> - } <nl> - <nl> - private static void createControllerInstance ( String libFilePath , int driveType ) { <nl> - / / if ( sGameControllerActivity . getGameControllerDelegate ( driveType ) ! = null ) { <nl> - / / return ; <nl> - / / } <nl> - <nl> - File libFile = new File ( libFilePath ) ; <nl> - if ( ! libFile . exists ( ) ) { <nl> - Log . w ( TAG , libFile . toString ( ) + " not exist ! " ) ; <nl> - return ; <nl> - } <nl> - <nl> - DexClassLoader classLoader = null ; <nl> - <nl> - try { <nl> - File dexOutputDir = sGameControllerActivity . getDir ( " dex " , Context . MODE_PRIVATE ) ; <nl> - classLoader = new DexClassLoader ( libFile . getCanonicalPath ( ) , dexOutputDir . getCanonicalPath ( ) , <nl> - null , sGameControllerActivity . getClassLoader ( ) ) ; <nl> - } catch ( Exception e1 ) { <nl> - e1 . printStackTrace ( ) ; <nl> - } <nl> - <nl> - try { <nl> - Class < ? > controllerDelegate = null ; <nl> - if ( driveType = = GameControllerActivity . DRIVERTYPE_MOGA ) { <nl> - controllerDelegate = classLoader . loadClass ( " org . cocos2dx . lib . GameControllerMoga " ) ; <nl> - } else if ( driveType = = GameControllerActivity . DRIVERTYPE_NIBIRU ) { <nl> - controllerDelegate = classLoader . loadClass ( " org . cocos2dx . lib . GameControllerNibiru " ) ; <nl> - } else if ( driveType = = GameControllerActivity . DRIVERTYPE_OUYA ) { <nl> - controllerDelegate = classLoader . loadClass ( " org . cocos2dx . lib . GameControllerOuya " ) ; <nl> - } <nl> - <nl> - GameControllerDelegate instance = ( GameControllerDelegate ) controllerDelegate . newInstance ( ) ; <nl> - <nl> - if ( mControllerListener ! = null ) { <nl> - mControllerListener . onConnectController ( ) ; <nl> - } <nl> - sGameControllerActivity . setGameControllerInstance ( instance , driveType ) ; <nl> - if ( driveType = = GameControllerActivity . DRIVERTYPE_NIBIRU ) { <nl> - Method method = controllerDelegate . getDeclaredMethod ( " onResume " ) ; <nl> - method . invoke ( instance ) ; <nl> - } <nl> - } <nl> - catch ( Exception e ) { <nl> - e . printStackTrace ( ) ; <nl> - } <nl> - } <nl> - <nl> - private BroadcastReceiver mAppReceiver = new BroadcastReceiver ( ) { <nl> - <nl> - @ Override <nl> - public void onReceive ( Context context , Intent intent ) { <nl> - String packageName = intent . getDataString ( ) ; <nl> - Log . d ( TAG , " mAppReceiver : " + intent ) ; <nl> - if ( packageName . contains ( NIBIRU_DEP_PACKAGE ) ) { <nl> - createControllerInstance ( mLocalSavePath + mLibAdapterFileName , GameControllerActivity . DRIVERTYPE_NIBIRU ) ; <nl> - } <nl> - else if ( packageName . contains ( MOGA__DEP_PACKAGE ) ) { <nl> - createControllerInstance ( mLocalSavePath + mLibAdapterFileName , GameControllerActivity . DRIVERTYPE_MOGA ) ; <nl> - } <nl> - } <nl> - } ; <nl> - <nl> - private float mOldLeftThumbstickX = 0 . 0f ; <nl> - private float mOldLeftThumbstickY = 0 . 0f ; <nl> - private float mOldRightThumbstickX = 0 . 0f ; <nl> - private float mOldRightThumbstickY = 0 . 0f ; <nl> - <nl> - private float mOldLeftTrigger = 0 . 0f ; <nl> - private float mOldRightTrigger = 0 . 0f ; <nl> - private float mOldThrottle = 0 . 0f ; <nl> - private float mOldBrake = 0 . 0f ; <nl> - <nl> - public boolean dispatchGenericMotionEvent ( MotionEvent event ) { <nl> - boolean handled = false ; <nl> - <nl> - int eventSource = event . getSource ( ) ; <nl> - <nl> - if ( ( ( eventSource & InputDevice . SOURCE_GAMEPAD ) = = InputDevice . SOURCE_GAMEPAD ) <nl> - | | ( ( eventSource & InputDevice . SOURCE_JOYSTICK ) = = InputDevice . SOURCE_JOYSTICK ) ) <nl> - { <nl> - if ( event . getAction ( ) = = MotionEvent . ACTION_MOVE ) { <nl> - int devicedId = event . getDeviceId ( ) ; <nl> - <nl> - float newAXIS_LX = event . getAxisValue ( AXIS_X ) ; <nl> - if ( Float . compare ( newAXIS_LX , mOldLeftThumbstickX ) ! = 0 ) { <nl> - GameControllerAdapter . onAxisEvent ( StandardControllerName , devicedId , GameControllerDelegate . THUMBSTICK_LEFT_X , newAXIS_LX , true ) ; <nl> - mOldLeftThumbstickX = newAXIS_LX ; <nl> - handled = true ; <nl> - } <nl> - <nl> - float newAXIS_LY = event . getAxisValue ( AXIS_Y ) ; <nl> - if ( Float . compare ( newAXIS_LY , mOldLeftThumbstickY ) ! = 0 ) { <nl> - GameControllerAdapter . onAxisEvent ( StandardControllerName , devicedId , GameControllerDelegate . THUMBSTICK_LEFT_Y , newAXIS_LY , true ) ; <nl> - mOldLeftThumbstickY = newAXIS_LY ; <nl> - handled = true ; <nl> - } <nl> - <nl> - float newAXIS_RX = event . getAxisValue ( AXIS_Z ) ; <nl> - if ( Float . compare ( newAXIS_RX , mOldRightThumbstickX ) ! = 0 ) { <nl> - GameControllerAdapter . onAxisEvent ( StandardControllerName , devicedId , GameControllerDelegate . THUMBSTICK_RIGHT_X , newAXIS_RX , true ) ; <nl> - mOldRightThumbstickX = newAXIS_RX ; <nl> - handled = true ; <nl> - } <nl> - <nl> - float newAXIS_RY = event . getAxisValue ( AXIS_RZ ) ; <nl> - if ( Float . compare ( newAXIS_RY , mOldRightThumbstickY ) ! = 0 ) { <nl> - GameControllerAdapter . onAxisEvent ( StandardControllerName , devicedId , GameControllerDelegate . THUMBSTICK_RIGHT_Y , newAXIS_RY , true ) ; <nl> - mOldRightThumbstickY = newAXIS_RY ; <nl> - handled = true ; <nl> - } <nl> - <nl> - float newAXIS_LTRIGGER = event . getAxisValue ( AXIS_LTRIGGER ) ; <nl> - if ( Float . compare ( newAXIS_LTRIGGER , mOldLeftTrigger ) ! = 0 ) { <nl> - if ( Float . compare ( newAXIS_LTRIGGER , 0 . 0f ) = = 0 ) { <nl> - GameControllerAdapter . onButtonEvent ( StandardControllerName , devicedId , GameControllerDelegate . BUTTON_LEFT_TRIGGER , false , 0 . 0f , true ) ; <nl> - } else { <nl> - GameControllerAdapter . onButtonEvent ( StandardControllerName , devicedId , GameControllerDelegate . BUTTON_LEFT_TRIGGER , true , newAXIS_LTRIGGER , true ) ; <nl> - } <nl> - mOldLeftTrigger = newAXIS_LTRIGGER ; <nl> - handled = true ; <nl> - } <nl> - <nl> - float newAXIS_RTRIGGER = event . getAxisValue ( AXIS_RTRIGGER ) ; <nl> - if ( Float . compare ( newAXIS_RTRIGGER , mOldRightTrigger ) ! = 0 ) { <nl> - if ( Float . compare ( newAXIS_RTRIGGER , 0 . 0f ) = = 0 ) { <nl> - GameControllerAdapter . onButtonEvent ( StandardControllerName , devicedId , GameControllerDelegate . BUTTON_RIGHT_TRIGGER , false , 0 . 0f , true ) ; <nl> - } else { <nl> - GameControllerAdapter . onButtonEvent ( StandardControllerName , devicedId , GameControllerDelegate . BUTTON_RIGHT_TRIGGER , true , newAXIS_RTRIGGER , true ) ; <nl> - } <nl> - mOldRightTrigger = newAXIS_RTRIGGER ; <nl> - handled = true ; <nl> - } <nl> - <nl> - float newAXIS_BRAKE = event . getAxisValue ( AXIS_BRAKE ) ; <nl> - if ( Float . compare ( newAXIS_BRAKE , mOldBrake ) ! = 0 ) { <nl> - if ( Float . compare ( newAXIS_BRAKE , 0 . 0f ) = = 0 ) { <nl> - GameControllerAdapter . onButtonEvent ( StandardControllerName , devicedId , GameControllerDelegate . BUTTON_LEFT_TRIGGER , false , 0 . 0f , true ) ; <nl> - } else { <nl> - GameControllerAdapter . onButtonEvent ( StandardControllerName , devicedId , GameControllerDelegate . BUTTON_LEFT_TRIGGER , true , newAXIS_BRAKE , true ) ; <nl> - } <nl> - mOldBrake = newAXIS_BRAKE ; <nl> - handled = true ; <nl> - } <nl> - <nl> - float newAXIS_THROTTLE = event . getAxisValue ( AXIS_THROTTLE ) ; <nl> - if ( Float . compare ( newAXIS_THROTTLE , mOldThrottle ) ! = 0 ) { <nl> - if ( Float . compare ( newAXIS_THROTTLE , 0 . 0f ) = = 0 ) { <nl> - GameControllerAdapter . onButtonEvent ( StandardControllerName , devicedId , GameControllerDelegate . BUTTON_RIGHT_TRIGGER , false , 0 . 0f , true ) ; <nl> - } else { <nl> - GameControllerAdapter . onButtonEvent ( StandardControllerName , devicedId , GameControllerDelegate . BUTTON_RIGHT_TRIGGER , true , newAXIS_THROTTLE , true ) ; <nl> - } <nl> - mOldThrottle = newAXIS_THROTTLE ; <nl> - handled = true ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - return handled ; <nl> - } <nl> - <nl> - public boolean dispatchKeyEvent ( KeyEvent event ) { <nl> - boolean handled = false ; <nl> - <nl> - int eventSource = event . getSource ( ) ; <nl> - int controllerKey = ControllerKeyMap . get ( event . getKeyCode ( ) ) ; <nl> - <nl> - if ( controllerKey ! = 0 & & ( ( ( eventSource & InputDevice . SOURCE_GAMEPAD ) = = InputDevice . SOURCE_GAMEPAD ) <nl> - | | ( ( eventSource & InputDevice . SOURCE_JOYSTICK ) = = InputDevice . SOURCE_JOYSTICK ) <nl> - | | ( ( eventSource & InputDevice . SOURCE_DPAD ) = = InputDevice . SOURCE_DPAD ) ) ) <nl> - { <nl> - int action = event . getAction ( ) ; <nl> - if ( action = = KeyEvent . ACTION_DOWN ) { <nl> - handled = true ; <nl> - GameControllerAdapter . onButtonEvent ( StandardControllerName , event . getDeviceId ( ) , controllerKey , true , 1 . 0f , false ) ; <nl> - } else if ( action = = KeyEvent . ACTION_UP ) { <nl> - handled = true ; <nl> - GameControllerAdapter . onButtonEvent ( StandardControllerName , event . getDeviceId ( ) , controllerKey , false , 0 . 0f , false ) ; <nl> - } <nl> - } <nl> - <nl> - return handled ; <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index 65b919bdae46 . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / src / org / cocos2dx / lib / inputmanagercompat / InputManagerCompat . java <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2013 The Android Open Source Project <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * / <nl> - <nl> - package org . cocos2dx . lib . inputmanagercompat ; <nl> - <nl> - import android . content . Context ; <nl> - import android . os . Build ; <nl> - import android . os . Handler ; <nl> - import android . view . InputDevice ; <nl> - import android . view . MotionEvent ; <nl> - <nl> - public interface InputManagerCompat { <nl> - / * * <nl> - * Gets information about the input device with the specified id . <nl> - * <nl> - * @ param id The device id <nl> - * @ return The input device or null if not found <nl> - * / <nl> - public InputDevice getInputDevice ( int id ) ; <nl> - <nl> - / * * <nl> - * Gets the ids of all input devices in the system . <nl> - * <nl> - * @ return The input device ids . <nl> - * / <nl> - public int [ ] getInputDeviceIds ( ) ; <nl> - <nl> - / * * <nl> - * Registers an input device listener to receive notifications about when <nl> - * input devices are added , removed or changed . <nl> - * <nl> - * @ param listener The listener to register . <nl> - * @ param handler The handler on which the listener should be invoked , or <nl> - * null if the listener should be invoked on the calling thread ' s <nl> - * looper . <nl> - * / <nl> - public void registerInputDeviceListener ( InputManagerCompat . InputDeviceListener listener , <nl> - Handler handler ) ; <nl> - <nl> - / * * <nl> - * Unregisters an input device listener . <nl> - * <nl> - * @ param listener The listener to unregister . <nl> - * / <nl> - public void unregisterInputDeviceListener ( InputManagerCompat . InputDeviceListener listener ) ; <nl> - <nl> - / * <nl> - * The following three calls are to simulate V16 behavior on pre - Jellybean <nl> - * devices . If you don ' t call them , your callback will never be called <nl> - * pre - API 16 . <nl> - * / <nl> - <nl> - / * * <nl> - * Pass the motion events to the InputManagerCompat . This is used to <nl> - * optimize for polling for controllers . If you do not pass these events in , <nl> - * polling will cause regular object creation . <nl> - * <nl> - * @ param event the motion event from the app <nl> - * / <nl> - public void onGenericMotionEvent ( MotionEvent event ) ; <nl> - <nl> - / * * <nl> - * Tell the V9 input manager that it should stop polling for disconnected <nl> - * devices . You can call this during onPause in your activity , although you <nl> - * might want to call it whenever your game is not active ( or whenever you <nl> - * don ' t care about being notified of new input devices ) <nl> - * / <nl> - public void onPause ( ) ; <nl> - <nl> - / * * <nl> - * Tell the V9 input manager that it should start polling for disconnected <nl> - * devices . You can call this during onResume in your activity , although you <nl> - * might want to call it less often ( only when the gameplay is actually <nl> - * active ) <nl> - * / <nl> - public void onResume ( ) ; <nl> - <nl> - public interface InputDeviceListener { <nl> - / * * <nl> - * Called whenever the input manager detects that a device has been <nl> - * added . This will only be called in the V9 version when a motion event <nl> - * is detected . <nl> - * <nl> - * @ param deviceId The id of the input device that was added . <nl> - * / <nl> - void onInputDeviceAdded ( int deviceId ) ; <nl> - <nl> - / * * <nl> - * Called whenever the properties of an input device have changed since <nl> - * they were last queried . This will not be called for the V9 version of <nl> - * the API . <nl> - * <nl> - * @ param deviceId The id of the input device that changed . <nl> - * / <nl> - void onInputDeviceChanged ( int deviceId ) ; <nl> - <nl> - / * * <nl> - * Called whenever the input manager detects that a device has been <nl> - * removed . For the V9 version , this can take some time depending on the <nl> - * poll rate . <nl> - * <nl> - * @ param deviceId The id of the input device that was removed . <nl> - * / <nl> - void onInputDeviceRemoved ( int deviceId ) ; <nl> - } <nl> - <nl> - / * * <nl> - * Use this to construct a compatible InputManager . <nl> - * / <nl> - public static class Factory { <nl> - <nl> - / * * <nl> - * Constructs and returns a compatible InputManger <nl> - * <nl> - * @ param context the Context that will be used to get the system <nl> - * service from <nl> - * @ return a compatible implementation of InputManager <nl> - * / <nl> - public static InputManagerCompat getInputManager ( Context context ) { <nl> - if ( Build . VERSION . SDK_INT > = Build . VERSION_CODES . JELLY_BEAN ) { <nl> - return new InputManagerV16 ( context ) ; <nl> - } else { <nl> - return new InputManagerV9 ( ) ; <nl> - } <nl> - } <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index f0d265adb050 . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / src / org / cocos2dx / lib / inputmanagercompat / InputManagerV16 . java <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2013 The Android Open Source Project <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * / <nl> - <nl> - package org . cocos2dx . lib . inputmanagercompat ; <nl> - <nl> - import android . annotation . TargetApi ; <nl> - import android . content . Context ; <nl> - import android . hardware . input . InputManager ; <nl> - import android . os . Build ; <nl> - import android . os . Handler ; <nl> - import android . view . InputDevice ; <nl> - import android . view . MotionEvent ; <nl> - <nl> - import java . util . HashMap ; <nl> - import java . util . Map ; <nl> - <nl> - @ TargetApi ( Build . VERSION_CODES . JELLY_BEAN ) <nl> - public class InputManagerV16 implements InputManagerCompat { <nl> - <nl> - private final InputManager mInputManager ; <nl> - private final Map < InputManagerCompat . InputDeviceListener , V16InputDeviceListener > mListeners ; <nl> - <nl> - public InputManagerV16 ( Context context ) { <nl> - mInputManager = ( InputManager ) context . getSystemService ( Context . INPUT_SERVICE ) ; <nl> - mListeners = new HashMap < InputManagerCompat . InputDeviceListener , V16InputDeviceListener > ( ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public InputDevice getInputDevice ( int id ) { <nl> - return mInputManager . getInputDevice ( id ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public int [ ] getInputDeviceIds ( ) { <nl> - return mInputManager . getInputDeviceIds ( ) ; <nl> - } <nl> - <nl> - static class V16InputDeviceListener implements InputManager . InputDeviceListener { <nl> - final InputManagerCompat . InputDeviceListener mIDL ; <nl> - <nl> - public V16InputDeviceListener ( InputDeviceListener idl ) { <nl> - mIDL = idl ; <nl> - } <nl> - <nl> - @ Override <nl> - public void onInputDeviceAdded ( int deviceId ) { <nl> - mIDL . onInputDeviceAdded ( deviceId ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public void onInputDeviceChanged ( int deviceId ) { <nl> - mIDL . onInputDeviceChanged ( deviceId ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public void onInputDeviceRemoved ( int deviceId ) { <nl> - mIDL . onInputDeviceRemoved ( deviceId ) ; <nl> - } <nl> - <nl> - } <nl> - <nl> - @ Override <nl> - public void registerInputDeviceListener ( InputDeviceListener listener , Handler handler ) { <nl> - V16InputDeviceListener v16Listener = new V16InputDeviceListener ( listener ) ; <nl> - mInputManager . registerInputDeviceListener ( v16Listener , handler ) ; <nl> - mListeners . put ( listener , v16Listener ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public void unregisterInputDeviceListener ( InputDeviceListener listener ) { <nl> - V16InputDeviceListener curListener = mListeners . remove ( listener ) ; <nl> - if ( null ! = curListener ) <nl> - { <nl> - mInputManager . unregisterInputDeviceListener ( curListener ) ; <nl> - } <nl> - <nl> - } <nl> - <nl> - @ Override <nl> - public void onGenericMotionEvent ( MotionEvent event ) { <nl> - / / unused in V16 <nl> - } <nl> - <nl> - @ Override <nl> - public void onPause ( ) { <nl> - / / unused in V16 <nl> - } <nl> - <nl> - @ Override <nl> - public void onResume ( ) { <nl> - / / unused in V16 <nl> - } <nl> - <nl> - } <nl> deleted file mode 100644 <nl> index abb47a8719c8 . . 000000000000 <nl> mmm a / cocos / platform / android / ControllerAutoAdapter / src / org / cocos2dx / lib / inputmanagercompat / InputManagerV9 . java <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2013 The Android Open Source Project <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * / <nl> - <nl> - package org . cocos2dx . lib . inputmanagercompat ; <nl> - <nl> - import android . os . Handler ; <nl> - import android . os . Message ; <nl> - import android . os . SystemClock ; <nl> - import android . util . Log ; <nl> - import android . util . SparseArray ; <nl> - import android . view . InputDevice ; <nl> - import android . view . MotionEvent ; <nl> - <nl> - import java . lang . ref . WeakReference ; <nl> - import java . util . ArrayDeque ; <nl> - import java . util . HashMap ; <nl> - import java . util . Map ; <nl> - import java . util . Queue ; <nl> - <nl> - public class InputManagerV9 implements InputManagerCompat { <nl> - private static final String LOG_TAG = " InputManagerV9 " ; <nl> - private static final int MESSAGE_TEST_FOR_DISCONNECT = 101 ; <nl> - private static final long CHECK_ELAPSED_TIME = 3000L ; <nl> - <nl> - private static final int ON_DEVICE_ADDED = 0 ; <nl> - private static final int ON_DEVICE_CHANGED = 1 ; <nl> - private static final int ON_DEVICE_REMOVED = 2 ; <nl> - <nl> - private final SparseArray < long [ ] > mDevices ; <nl> - private final Map < InputDeviceListener , Handler > mListeners ; <nl> - private final Handler mDefaultHandler ; <nl> - <nl> - private static class PollingMessageHandler extends Handler { <nl> - private final WeakReference < InputManagerV9 > mInputManager ; <nl> - <nl> - PollingMessageHandler ( InputManagerV9 im ) { <nl> - mInputManager = new WeakReference < InputManagerV9 > ( im ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public void handleMessage ( Message msg ) { <nl> - super . handleMessage ( msg ) ; <nl> - switch ( msg . what ) { <nl> - case MESSAGE_TEST_FOR_DISCONNECT : <nl> - InputManagerV9 imv = mInputManager . get ( ) ; <nl> - if ( null ! = imv ) { <nl> - long time = SystemClock . elapsedRealtime ( ) ; <nl> - int size = imv . mDevices . size ( ) ; <nl> - for ( int i = 0 ; i < size ; i + + ) { <nl> - long [ ] lastContact = imv . mDevices . valueAt ( i ) ; <nl> - if ( null ! = lastContact ) { <nl> - if ( time - lastContact [ 0 ] > CHECK_ELAPSED_TIME ) { <nl> - / / check to see if the device has been <nl> - / / disconnected <nl> - int id = imv . mDevices . keyAt ( i ) ; <nl> - if ( null = = InputDevice . getDevice ( id ) ) { <nl> - / / disconnected ! <nl> - imv . notifyListeners ( ON_DEVICE_REMOVED , id ) ; <nl> - imv . mDevices . remove ( id ) ; <nl> - } else { <nl> - lastContact [ 0 ] = time ; <nl> - } <nl> - } <nl> - } <nl> - } <nl> - sendEmptyMessageDelayed ( MESSAGE_TEST_FOR_DISCONNECT , <nl> - CHECK_ELAPSED_TIME ) ; <nl> - } <nl> - break ; <nl> - } <nl> - } <nl> - <nl> - } <nl> - <nl> - public InputManagerV9 ( ) { <nl> - mDevices = new SparseArray < long [ ] > ( ) ; <nl> - mListeners = new HashMap < InputDeviceListener , Handler > ( ) ; <nl> - mDefaultHandler = new PollingMessageHandler ( this ) ; <nl> - / / as a side - effect , populates our collection of watched <nl> - / / input devices <nl> - getInputDeviceIds ( ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public InputDevice getInputDevice ( int id ) { <nl> - return InputDevice . getDevice ( id ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public int [ ] getInputDeviceIds ( ) { <nl> - / / add any hitherto unknown devices to our <nl> - / / collection of watched input devices <nl> - int [ ] activeDevices = InputDevice . getDeviceIds ( ) ; <nl> - long time = SystemClock . elapsedRealtime ( ) ; <nl> - for ( int id : activeDevices ) { <nl> - long [ ] lastContact = mDevices . get ( id ) ; <nl> - if ( null = = lastContact ) { <nl> - / / we have a new device <nl> - mDevices . put ( id , new long [ ] { time } ) ; <nl> - } <nl> - } <nl> - return activeDevices ; <nl> - } <nl> - <nl> - @ Override <nl> - public void registerInputDeviceListener ( InputDeviceListener listener , Handler handler ) { <nl> - mListeners . remove ( listener ) ; <nl> - if ( handler = = null ) { <nl> - handler = mDefaultHandler ; <nl> - } <nl> - mListeners . put ( listener , handler ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public void unregisterInputDeviceListener ( InputDeviceListener listener ) { <nl> - mListeners . remove ( listener ) ; <nl> - } <nl> - <nl> - private void notifyListeners ( int why , int deviceId ) { <nl> - / / the state of some device has changed <nl> - if ( ! mListeners . isEmpty ( ) ) { <nl> - / / yes . . . this will cause an object to get created . . . hopefully <nl> - / / it won ' t happen very often <nl> - for ( InputDeviceListener listener : mListeners . keySet ( ) ) { <nl> - Handler handler = mListeners . get ( listener ) ; <nl> - DeviceEvent odc = DeviceEvent . getDeviceEvent ( why , deviceId , listener ) ; <nl> - handler . post ( odc ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - private static class DeviceEvent implements Runnable { <nl> - private int mMessageType ; <nl> - private int mId ; <nl> - private InputDeviceListener mListener ; <nl> - private static Queue < DeviceEvent > sEventQueue = new ArrayDeque < DeviceEvent > ( ) ; <nl> - <nl> - private DeviceEvent ( ) { <nl> - } <nl> - <nl> - static DeviceEvent getDeviceEvent ( int messageType , int id , <nl> - InputDeviceListener listener ) { <nl> - DeviceEvent curChanged = sEventQueue . poll ( ) ; <nl> - if ( null = = curChanged ) { <nl> - curChanged = new DeviceEvent ( ) ; <nl> - } <nl> - curChanged . mMessageType = messageType ; <nl> - curChanged . mId = id ; <nl> - curChanged . mListener = listener ; <nl> - return curChanged ; <nl> - } <nl> - <nl> - @ Override <nl> - public void run ( ) { <nl> - switch ( mMessageType ) { <nl> - case ON_DEVICE_ADDED : <nl> - mListener . onInputDeviceAdded ( mId ) ; <nl> - break ; <nl> - case ON_DEVICE_CHANGED : <nl> - mListener . onInputDeviceChanged ( mId ) ; <nl> - break ; <nl> - case ON_DEVICE_REMOVED : <nl> - mListener . onInputDeviceRemoved ( mId ) ; <nl> - break ; <nl> - default : <nl> - Log . e ( LOG_TAG , " Unknown Message Type " ) ; <nl> - break ; <nl> - } <nl> - / / dump this runnable back in the queue <nl> - sEventQueue . offer ( this ) ; <nl> - } <nl> - } <nl> - <nl> - @ Override <nl> - public void onGenericMotionEvent ( MotionEvent event ) { <nl> - / / detect new devices <nl> - int id = event . getDeviceId ( ) ; <nl> - long [ ] timeArray = mDevices . get ( id ) ; <nl> - if ( null = = timeArray ) { <nl> - notifyListeners ( ON_DEVICE_ADDED , id ) ; <nl> - timeArray = new long [ 1 ] ; <nl> - mDevices . put ( id , timeArray ) ; <nl> - } <nl> - long time = SystemClock . elapsedRealtime ( ) ; <nl> - timeArray [ 0 ] = time ; <nl> - } <nl> - <nl> - @ Override <nl> - public void onPause ( ) { <nl> - mDefaultHandler . removeMessages ( MESSAGE_TEST_FOR_DISCONNECT ) ; <nl> - } <nl> - <nl> - @ Override <nl> - public void onResume ( ) { <nl> - mDefaultHandler . sendEmptyMessage ( MESSAGE_TEST_FOR_DISCONNECT ) ; <nl> - } <nl> - <nl> - } <nl> | remove ControllerAutoAdapter project . | cocos2d/cocos2d-x | 097f782c3826e49dfaf64b6e76b95908490253e5 | 2014-07-02T07:17:29Z |
mmm a / lib / Sema / MiscDiagnostics . cpp <nl> ppp b / lib / Sema / MiscDiagnostics . cpp <nl> static void diagSyntacticUseRestrictions ( TypeChecker & TC , const Expr * E , <nl> / / / Keep track of InOutExprs <nl> SmallPtrSet < InOutExpr * , 2 > AcceptableInOutExprs ; <nl> <nl> + / / / Keep track of the arguments to CallExprs . <nl> + SmallPtrSet < Expr * , 2 > CallArgs ; <nl> + <nl> bool IsExprStmt ; <nl> <nl> public : <nl> static void diagSyntacticUseRestrictions ( TypeChecker & TC , const Expr * E , <nl> while ( auto Conv = dyn_cast < ImplicitConversionExpr > ( Base ) ) <nl> Base = Conv - > getSubExpr ( ) ; <nl> <nl> + / / Record call arguments . <nl> + if ( auto Call = dyn_cast < CallExpr > ( Base ) ) { <nl> + CallArgs . insert ( Call - > getArg ( ) ) ; <nl> + } <nl> + <nl> if ( auto * DRE = dyn_cast < DeclRefExpr > ( Base ) ) { <nl> / / Verify metatype uses . <nl> if ( isa < TypeDecl > ( DRE - > getDecl ( ) ) ) { <nl> static void diagSyntacticUseRestrictions ( TypeChecker & TC , const Expr * E , <nl> isa < OpenExistentialExpr > ( ParentExpr ) ) { <nl> return ; <nl> } <nl> + <nl> + / / FIXME : As a specific hack , we white - list parenthesized <nl> + / / expressions that are call arguments . This allows some <nl> + / / ill - formed code to omit " . self " due to a historical bug . We <nl> + / / keep that code working until we have a decision on SE - 0090 , <nl> + / / rather than potentially breaking code twice . <nl> + if ( isa < ParenExpr > ( ParentExpr ) & & CallArgs . count ( ParentExpr ) > 0 ) <nl> + return ; <nl> } <nl> <nl> / / Is this a protocol metatype ? <nl> mmm a / lib / Sema / TypeCheckConstraints . cpp <nl> ppp b / lib / Sema / TypeCheckConstraints . cpp <nl> namespace { <nl> / / / node when visited . <nl> Expr * UnresolvedCtorRebindTarget = nullptr ; <nl> <nl> + / / / The expressions that are direct arguments of call expressions . <nl> + llvm : : SmallPtrSet < Expr * , 4 > CallArgs ; <nl> + <nl> public : <nl> PreCheckExpression ( TypeChecker & tc , DeclContext * dc ) : TC ( tc ) , DC ( dc ) { } <nl> <nl> bool walkToClosureExprPre ( ClosureExpr * expr ) ; <nl> <nl> std : : pair < bool , Expr * > walkToExprPre ( Expr * expr ) override { <nl> + / / If this is a call , record the argument expression . <nl> + if ( auto call = dyn_cast < CallExpr > ( expr ) ) { <nl> + CallArgs . insert ( call - > getArg ( ) ) ; <nl> + } <nl> + <nl> + / / If this is an unresolved member with a call argument ( e . g . , <nl> + / / . some ( x ) ) , record the argument expression . <nl> + if ( auto unresolvedMember = dyn_cast < UnresolvedMemberExpr > ( expr ) ) { <nl> + if ( auto arg = unresolvedMember - > getArgument ( ) ) <nl> + CallArgs . insert ( arg ) ; <nl> + } <nl> + <nl> / / Local function used to finish up processing before returning . Every <nl> / / return site should call through here . <nl> auto finish = [ & ] ( bool recursive , Expr * expr ) { <nl> bool PreCheckExpression : : walkToClosureExprPre ( ClosureExpr * closure ) { <nl> / / / as expressions due to the parser not knowing which identifiers are <nl> / / / type names . <nl> TypeExpr * PreCheckExpression : : simplifyTypeExpr ( Expr * E ) { <nl> + / / Don ' t try simplifying a call argument , because we don ' t want to <nl> + / / simplify away the required ParenExpr / TupleExpr . <nl> + if ( CallArgs . count ( E ) > 0 ) return nullptr ; <nl> <nl> / / Fold ' T . Type ' or ' T . Protocol ' into a metatype when T is a TypeExpr . <nl> if ( auto * MRE = dyn_cast < UnresolvedDotExpr > ( E ) ) { <nl> | [ Type checker ] Don ' t fold the parentheses around call arguments into a TypeExpr . | apple/swift | 4a60b6cbf4fa2a410aebf7f725865395c99ef6ca | 2016-06-03T20:44:09Z |
mmm a / src / brpc / server . cpp <nl> ppp b / src / brpc / server . cpp <nl> int Server : : AddServiceInternal ( google : : protobuf : : Service * service , <nl> } <nl> if ( sp = = NULL ) { <nl> ServiceProperty ss = <nl> - { false , SERVER_DOESNT_OWN_SERVICE , NULL , m } ; <nl> + { is_builtin_service , SERVER_DOESNT_OWN_SERVICE , NULL , m } ; <nl> _fullname_service_map [ svc_name ] = ss ; <nl> _service_map [ svc_name ] = ss ; <nl> + + _virtual_service_count ; <nl> | Fix bug in inserting RestfulMap service | apache/incubator-brpc | 981310aa5dbf368f9a09bf2222a050532ecbbee3 | 2019-06-17T06:24:17Z |
mmm a / tensorflow / lite / g3doc / guide / build_ios . md <nl> ppp b / tensorflow / lite / g3doc / guide / build_ios . md <nl> <nl> - <nl> # Build TensorFlow Lite for iOS <nl> <nl> - This document describes how to build TensorFlow Lite iOS library . If you just <nl> - want to use it , the easiest way is using the TensorFlow Lite CocoaPod releases . <nl> - See [ TensorFlow Lite iOS Demo ] ( ios . md ) for examples . <nl> + This document describes how to build TensorFlow Lite iOS library on your own . <nl> + Normally , you do not need to locally build TensorFlow Lite iOS library . If you <nl> + just want to use it , the easiest way is using the prebuilt stable or nightly <nl> + releases of the TensorFlow Lite CocoaPods . See [ iOS quickstart ] ( ios . md ) for more <nl> + details on how to use them in your iOS projects . <nl> + <nl> + # # Building locally <nl> <nl> + In some cases , you might wish to use a local build of TensorFlow Lite , for <nl> + example when you want to make local changes to TensorFlow Lite and test those <nl> + changes in your iOS app . To create a universal iOS framework for TensorFlow Lite <nl> + locally , you need to build it using Bazel on a macOS machine . <nl> <nl> - # # Building <nl> + # # # Install Xcode <nl> <nl> - To create a universal iOS library for TensorFlow Lite , you need to build it <nl> - using Xcode ' s command line tools on a MacOS machine . If you have not already , <nl> - you will need to install Xcode 8 or later and the tools using ` xcode - select ` : <nl> + If you have not already , you will need to install Xcode 8 or later and the tools <nl> + using ` xcode - select ` : <nl> <nl> - ` ` ` bash <nl> + ` ` ` sh <nl> xcode - select - - install <nl> ` ` ` <nl> <nl> - If this is a new install , you will need to run XCode once to agree to the <nl> - license before continuing . <nl> + If this is a new install , you will need to accept the license agreement for all <nl> + users with the following command : <nl> <nl> - ( You will also need to have [ Homebrew ] ( http : / / brew . sh / ) installed . ) <nl> + ` ` ` sh <nl> + sudo xcodebuild - license accept <nl> + ` ` ` <nl> <nl> - Then install <nl> - [ automake ] ( https : / / en . wikipedia . org / wiki / Automake ) / [ libtool ] ( https : / / en . wikipedia . org / wiki / GNU_Libtool ) : <nl> + # # # Install Bazel <nl> <nl> - ` ` ` bash <nl> - brew install automake <nl> - brew install libtool <nl> - ` ` ` <nl> - If you get an error where either automake or libtool install but do not link correctly , you ' ll first need to : <nl> - ` ` ` bash <nl> - sudo chown - R $ ( whoami ) / usr / local / * <nl> - ` ` ` <nl> - Then follow the instructions to perform the linking : <nl> - ` ` ` bash <nl> - brew link automake <nl> - brew link libtool <nl> - ` ` ` <nl> + Bazel is the primary build system for TensorFlow . Install Bazel as per the <nl> + [ instructions on the Bazel website ] [ bazel - install ] . Make sure to choose a <nl> + version between ` _TF_MIN_BAZEL_VERSION ` and ` _TF_MAX_BAZEL_VERSION ` in <nl> + [ ` configure . py ` file ] [ configure - py ] at the root of ` tensorflow ` repository . <nl> <nl> - Then you need to run a shell script to download the dependencies you need : <nl> + # # # Configure WORKSPACE and . bazelrc <nl> <nl> - ` ` ` bash <nl> - tensorflow / lite / tools / make / download_dependencies . sh <nl> - ` ` ` <nl> + Run the ` . / configure ` script in the root TensorFlow checkout directory , and <nl> + answer " Yes " when the script asks if you wish to build TensorFlow with iOS <nl> + support . <nl> + <nl> + # # # Build TensorFlowLiteC framework <nl> + <nl> + Note : This step is not necessary if ( 1 ) you are using Bazel for your app , or ( 2 ) <nl> + you only want to test local changes to the Swift or Objective - C APIs . In these <nl> + cases , skip to the [ Use in your own application ] ( # use_in_your_own_application ) <nl> + section below . <nl> <nl> - This will fetch copies of libraries and data from the web and install them in <nl> - ` tensorflow / lite / downloads ` . <nl> + Once Bazel is properly configured with iOS support , you can build the <nl> + ` TensorFlowLiteC ` framework with the following command . <nl> <nl> - With all of the dependencies set up , you can now build the library for all five <nl> - supported architectures on iOS : <nl> + ` ` ` sh <nl> + bazel build - - config = ios_fat - c opt \ <nl> + / / tensorflow / lite / experimental / ios : TensorFlowLiteC_framework <nl> + ` ` ` <nl> <nl> - ` ` ` bash <nl> - tensorflow / lite / tools / make / build_ios_universal_lib . sh <nl> + This command will generate the ` TensorFlowLiteC_framework . zip ` file under <nl> + ` bazel - bin / tensorflow / lite / experimental / ios / ` directory under your TensorFlow <nl> + root directory . By default , the generated framework contains a " fat " binary , <nl> + containing armv7 , arm64 , and x86_64 ( but no i386 ) . To see the full list of build <nl> + flags used when you specify ` - - config = ios_fat ` , please refer to the iOS configs <nl> + section in the [ ` . bazelrc ` file ] [ bazelrc ] . <nl> + <nl> + # # Use in your own application <nl> + <nl> + # # # CocoaPods developers <nl> + <nl> + There are three CocoaPods for TensorFlow Lite : <nl> + <nl> + * ` TensorFlowLiteSwift ` : Provides the Swift APIs for TensorFlow Lite . <nl> + * ` TensorFlowLiteObjC ` : Provides the Objective - C APIs for TensorFlow Lite . <nl> + * ` TensorFlowLiteC ` : Common base pod , which embeds the TensorFlow Lite core <nl> + runtime and exposes the base C APIs used by the above two pods . Not meant to <nl> + be directly used by users . <nl> + <nl> + As a developer , you should choose either ` TensorFlowLiteSwift ` or <nl> + ` TensorFlowLiteObjC ` pod based on the language in which your app is written , but <nl> + not both . The exact steps for using local builds of TensorFlow Lite differ , <nl> + depending on which exact part you would like to build . <nl> + <nl> + # # # # Using local Swift or Objective - C APIs <nl> + <nl> + If you are using CocoaPods , and only wish to test some local changes to the <nl> + TensorFlow Lite ' s [ Swift APIs ] [ swift - api ] or [ Objective - C APIs ] [ objc - api ] , <nl> + follow the steps here . <nl> + <nl> + 1 . Make changes to the Swift or Objective - C APIs in your ` tensorflow ` checkout . <nl> + <nl> + 1 . Open the ` TensorFlowLite ( Swift | ObjC ) . podspec ` file , and update this line : \ <nl> + ` s . dependency ' TensorFlowLiteC ' , " # { s . version } " ` \ <nl> + to be : \ <nl> + ` s . dependency ' TensorFlowLiteC ' , " ~ > 0 . 0 . 1 - nightly " ` \ <nl> + This is to ensure that you are building your Swift or Objective - C APIs <nl> + against the latest available nightly version of ` TensorFlowLiteC ` APIs <nl> + ( built every night between 1 - 4AM Pacific Time ) rather than the stable <nl> + version , which may be outdated compared to your local ` tensorflow ` checkout . <nl> + Alternatively , you could choose to publish your own version of <nl> + ` TensorFlowLiteC ` and use that version ( see <nl> + [ Using local TensorFlow Lite core ] ( # using_local_tensorflow_lite_core ) <nl> + section below ) . <nl> + <nl> + 1 . In the ` Podfile ` of your iOS project , change the dependency as follows to <nl> + point to the local path to your ` tensorflow ` root directory . \ <nl> + For Swift : \ <nl> + ` pod ' TensorFlowLiteSwift ' , : path = > ' < your_tensorflow_root_dir > ' ` \ <nl> + For Objective - C : \ <nl> + ` pod ' TensorFlowLiteObjC ' , : path = > ' < your_tensorflow_root_dir > ' ` <nl> + <nl> + 1 . Update your pod installation from your iOS project root directory . \ <nl> + ` $ pod update ` <nl> + <nl> + 1 . Reopen the generated workspace ( ` < project > . xcworkspace ` ) and rebuild your <nl> + app within Xcode . <nl> + <nl> + # # # # Using local TensorFlow Lite core <nl> + <nl> + You can set up a private CocoaPods specs repository , and publish your custom <nl> + ` TensorFlowLiteC ` framework to your private repo . You can copy this [ podspec <nl> + file ] [ tflite - podspec ] and modify a few values : <nl> + <nl> + ` ` ` ruby <nl> + . . . <nl> + s . version = < your_desired_version_tag > <nl> + . . . <nl> + s . source = { : http = > " file : / / < path_to_TensorFlowLiteC_framework . zip > " } <nl> + . . . <nl> + s . vendored_frameworks = ' TensorFlowLiteC . framework ' <nl> + . . . <nl> ` ` ` <nl> <nl> - Under the hood this uses a makefile in ` tensorflow / lite ` to build the <nl> - different versions of the library , followed by a call to ` lipo ` to bundle them <nl> - into a universal file containing armv7 , armv7s , arm64 , i386 , and x86_64 <nl> - architectures . The resulting library is in <nl> - ` tensorflow / lite / tools / make / gen / lib / libtensorflow - lite . a ` . <nl> + After creating your own ` TensorFlowLiteC . podspec ` file , you can follow the <nl> + [ instructions on using private CocoaPods ] [ private - cocoapods ] to use it in your <nl> + own project . You can also modify the ` TensorFlowLite ( Swift | ObjC ) . podspec ` to <nl> + point to your custom ` TensorFlowLiteC ` pod and use either Swift or Objective - C <nl> + pod in your app project . <nl> <nl> - If you get an error such as ` no such file or directory : ' x86_64 ' ` when running <nl> - ` build_ios_universal_lib . sh ` : open Xcode > Preferences > Locations , and ensure <nl> - a value is selected in the " Command Line Tools " dropdown . <nl> + # # # Bazel developers <nl> <nl> - # # Using in your own application <nl> + If you are using Bazel as the main build tool , you can simply add <nl> + ` TensorFlowLite ` dependency to your target in your ` BUILD ` file . <nl> <nl> - You ' ll need to update various settings in your app to link against TensorFlow <nl> - Lite . You can view them in the example project at <nl> - ` tensorflow / lite / examples / ios / simple / simple . xcodeproj ` but here ' s a full <nl> - rundown : <nl> + For Swift : <nl> <nl> - - You ' ll need to add the library at <nl> - ` tensorflow / lite / gen / lib / libtensorflow - lite . a ` to your linking build <nl> - stage , and in Search Paths add ` tensorflow / lite / gen / lib ` to the <nl> - Library Search Paths setting . <nl> + ` ` ` python <nl> + swift_library ( <nl> + deps = [ <nl> + " / / tensorflow / lite / experimental / swift : TensorFlowLite " , <nl> + ] , <nl> + ) <nl> + ` ` ` <nl> <nl> - - The _Header Search_ paths needs to contain : <nl> + For Objective - C : <nl> <nl> - - the root folder of tensorflow , <nl> - - ` tensorflow / lite / downloads ` <nl> - - ` tensorflow / lite / downloads / flatbuffers / include ` <nl> + ` ` ` python <nl> + objc_library ( <nl> + deps = [ <nl> + " / / tensorflow / lite / experimental / objc : TensorFlowLite " , <nl> + ] , <nl> + ) <nl> + ` ` ` <nl> <nl> - - C + + 11 support ( or later ) should be enabled by setting ` C + + Language Dialect ` <nl> - to ` GNU + + 11 ` ( or ` GNU + + 14 ` ) , and ` C + + Standard Library ` to ` libc + + ` . <nl> + When you build your app project , any changes to the TensorFlow Lite library will <nl> + be picked up and built into your app . <nl> + <nl> + # # # Modify Xcode project settings directly <nl> + <nl> + It is highly recommended to use CocoaPods or Bazel for adding TensorFlow Lite <nl> + dependency into your project . If you still wish to add ` TensorFlowLiteC ` <nl> + framework manually , you ' ll need to add the ` TensorFlowLiteC ` framework as an <nl> + embedded framework to your application project . Unzip the <nl> + ` TensorFlowLiteC_framework . zip ` generated from the above build to get the <nl> + ` TensorFlowLiteC . framework ` directory . This directory is the actual framework <nl> + which Xcode can understand . <nl> + <nl> + Once you ' ve prepared the ` TensorFlowLiteC . framework ` , first you need to add it <nl> + as an embedded binary to your app target . The exact project settings section for <nl> + this may differ depending on your Xcode version . <nl> + <nl> + * Xcode 11 : Go to the ' General ' tab of the project editor for your app target , <nl> + and add the ` TensorFlowLiteC . framework ` under ' Frameworks , Libraries , and <nl> + Embedded Content ' section . <nl> + * Xcode 10 and below : Go to the ' General ' tab of the project editor for your <nl> + app target , and add the ` TensorFlowLiteC . framework ` under ' Embedded <nl> + Binaries ' . The framework should also be added automatically under ' Linked <nl> + Frameworks and Libraries ' section . <nl> + <nl> + When you add the framework as an embedded binary , Xcode would also update the <nl> + ' Framework Search Paths ' entry under ' Build Settings ' tab to include the parent <nl> + directory of your framework . In case this does not happen automatically , you <nl> + should manually add the parent directory of the ` TensorFlowLiteC . framework ` <nl> + directory . <nl> + <nl> + Once these two settings are done , you should be able to import and call the <nl> + TensorFlow Lite ' s C API , defined by the header files under <nl> + ` TensorFlowLiteC . framework / Headers ` directory . <nl> + <nl> + [ bazel - install ] : https : / / docs . bazel . build / versions / master / install - os - x . html <nl> + [ bazelrc ] : https : / / github . com / tensorflow / tensorflow / blob / master / . bazelrc <nl> + [ configure - py ] : https : / / github . com / tensorflow / tensorflow / blob / master / configure . py <nl> + [ objc - api ] : https : / / github . com / tensorflow / tensorflow / tree / master / tensorflow / lite / experimental / objc <nl> + [ private - cocoapods ] : https : / / guides . cocoapods . org / making / private - cocoapods . html <nl> + [ swift - api ] : https : / / github . com / tensorflow / tensorflow / tree / master / tensorflow / lite / experimental / swift <nl> + [ tflitec - podspec ] : https : / / github . com / tensorflow / tensorflow / blob / master / tensorflow / lite / experimental / ios / TensorFlowLiteC . podspec <nl> mmm a / tensorflow / lite / g3doc / guide / ios . md <nl> ppp b / tensorflow / lite / g3doc / guide / ios . md <nl> pod ' TensorFlowLiteSwift ' <nl> pod ' TensorFlowLiteObjC ' <nl> ` ` ` <nl> <nl> + # # # # Specifying versions <nl> + <nl> + There are stable releases , and nightly releases available for both <nl> + ` TensorFlowLiteSwift ` and ` TensorFlowLiteObjC ` pods . If you do not specify a <nl> + version constraint as in the above examples , CocoaPods will pull the latest <nl> + stable release by default . <nl> + <nl> + You can also specify a version contraint . For example , if you wish to depend on <nl> + version 2 . 0 . 0 , you can write the dependency as : <nl> + <nl> + ` ` ` ruby <nl> + pod ' TensorFlowLiteSwift ' , ' ~ > 2 . 0 . 0 ' <nl> + ` ` ` <nl> + <nl> + This will ensure the latest available 2 . x . y version of ` TensorFlowLiteSwift ` pod <nl> + is used in your app . Alternatively , if you want to depend on the nightly builds , <nl> + you can write : <nl> + <nl> + ` ` ` ruby <nl> + pod ' TensorFlowLiteSwift ' , ' 0 . 0 . 1 - nightly ' <nl> + ` ` ` <nl> + <nl> + This will allow you to use the latest features added to TensorFlow Lite . Note <nl> + that once the ` Podfile . lock ` file is created when you run ` pod install ` command <nl> + for the first time , the nightly library version will be locked at the current <nl> + date ' s version . If you wish to update the nightly library to the newer one , you <nl> + should run ` pod update ` command . <nl> + <nl> + For more information on different ways of specifying version constraints , see <nl> + [ Specifying pod versions ] ( https : / / guides . cocoapods . org / using / the - podfile . html # specifying - pod - versions ) . <nl> + <nl> # # # Bazel developers <nl> <nl> In your ` BUILD ` file , add the ` TensorFlowLite ` dependency to your target . <nl> | Update TFLite iOS build instructions to use bazel | tensorflow/tensorflow | 169493e31608a9146c12da04b391443f4fec1cb5 | 2019-10-17T14:26:49Z |
mmm a / xbmc / FileItem . cpp <nl> ppp b / xbmc / FileItem . cpp <nl> CStdString CFileItem : : GetFolderThumb ( const CStdString & folderJPG / * = " folder . jp <nl> CStdString strFolder = m_strPath ; <nl> <nl> if ( IsStack ( ) | | <nl> - CUtil : : IsInRAR ( strFolder ) | | <nl> + CUtil : : IsInRAR ( strFolder ) | | <nl> CUtil : : IsInZIP ( strFolder ) ) <nl> { <nl> CUtil : : GetParentPath ( m_strPath , strFolder ) ; <nl> | fixed : cosmetics . eff you for making me behave like a git nub alcoheca ; P | xbmc/xbmc | 543bf17c942643d81c579a37c9f48027bb9509a4 | 2010-03-17T20:46:20Z |
mmm a / src / json . hpp <nl> ppp b / src / json . hpp <nl> class binary_reader <nl> * / <nl> BasicJsonType parse_ubjson_internal ( const bool get_char = true ) <nl> { <nl> - switch ( get_char ? get ( ) : current ) <nl> - { <nl> - case std : : char_traits < char > : : eof ( ) : / / EOF <nl> - JSON_THROW ( parse_error : : create ( 110 , chars_read , " unexpected end of input " ) ) ; <nl> - <nl> - case ' T ' : / / true <nl> - return true ; <nl> - case ' F ' : / / false <nl> - return false ; <nl> - <nl> - case ' Z ' : / / null <nl> - return nullptr ; <nl> - <nl> - case ' N ' : / / no - op <nl> - return parse_ubjson_internal ( ) ; / / read next byte <nl> - <nl> - case ' U ' : <nl> - return get_number < uint8_t > ( ) ; <nl> - case ' i ' : <nl> - return get_number < int8_t > ( ) ; <nl> - case ' I ' : <nl> - return get_number < int16_t > ( ) ; <nl> - case ' l ' : <nl> - return get_number < int32_t > ( ) ; <nl> - case ' L ' : <nl> - return get_number < int64_t > ( ) ; <nl> - case ' d ' : <nl> - return get_number < float > ( ) ; <nl> - case ' D ' : <nl> - return get_number < double > ( ) ; <nl> - <nl> - case ' C ' : / / char <nl> - { <nl> - get ( ) ; <nl> - check_eof ( ) ; <nl> - return std : : string ( 1 , static_cast < char > ( current ) ) ; <nl> - } <nl> - <nl> - case ' S ' : / / string <nl> - return get_ubjson_string ( ) ; <nl> - <nl> - case ' [ ' : / / array <nl> - return get_ubjson_array ( ) ; <nl> - <nl> - case ' { ' : / / object <nl> - return get_ubjson_object ( ) ; <nl> - <nl> - default : / / anything else <nl> - std : : stringstream ss ; <nl> - ss < < std : : setw ( 2 ) < < std : : uppercase < < std : : setfill ( ' 0 ' ) < < std : : hex < < current ; <nl> - JSON_THROW ( parse_error : : create ( 112 , chars_read , <nl> - " error reading UBJSON ; last byte : 0x " + ss . str ( ) ) ) ; <nl> - } <nl> + return get_ubjson_value ( get_char ? get_ignore_noop ( ) : current ) ; <nl> } <nl> <nl> / * ! <nl> class binary_reader <nl> return ( current = ia - > get_character ( ) ) ; <nl> } <nl> <nl> + / * ! <nl> + @ return character read from the input after ignoring all ' N ' entries <nl> + * / <nl> + int get_ignore_noop ( ) <nl> + { <nl> + do <nl> + { <nl> + get ( ) ; <nl> + } <nl> + while ( current = = ' N ' ) ; <nl> + <nl> + return current ; <nl> + } <nl> + <nl> / * <nl> @ brief read a number from the input <nl> <nl> class binary_reader <nl> } <nl> } <nl> <nl> + std : : pair < std : : size_t , int > get_ubjson_size_type ( ) <nl> + { <nl> + std : : size_t sz = std : : string : : npos ; <nl> + int tc = 0 ; <nl> + <nl> + get_ignore_noop ( ) ; <nl> + <nl> + switch ( current ) <nl> + { <nl> + case ' $ ' : <nl> + { <nl> + get_ignore_noop ( ) ; <nl> + check_eof ( ) ; <nl> + tc = current ; <nl> + <nl> + get_ignore_noop ( ) ; <nl> + if ( current ! = ' # ' ) <nl> + { <nl> + assert ( false ) ; <nl> + } <nl> + sz = parse_ubjson_internal ( ) ; <nl> + break ; <nl> + } <nl> + <nl> + case ' # ' : <nl> + { <nl> + sz = parse_ubjson_internal ( ) ; <nl> + break ; <nl> + } <nl> + <nl> + default : <nl> + break ; <nl> + } <nl> + <nl> + return std : : make_pair ( sz , tc ) ; <nl> + <nl> + } <nl> + <nl> + BasicJsonType get_ubjson_value ( const int prefix ) <nl> + { <nl> + switch ( prefix ) <nl> + { <nl> + case std : : char_traits < char > : : eof ( ) : / / EOF <nl> + JSON_THROW ( parse_error : : create ( 110 , chars_read , " unexpected end of input " ) ) ; <nl> + <nl> + case ' T ' : / / true <nl> + return true ; <nl> + case ' F ' : / / false <nl> + return false ; <nl> + <nl> + case ' Z ' : / / null <nl> + return nullptr ; <nl> + <nl> + case ' U ' : <nl> + return get_number < uint8_t > ( ) ; <nl> + case ' i ' : <nl> + return get_number < int8_t > ( ) ; <nl> + case ' I ' : <nl> + return get_number < int16_t > ( ) ; <nl> + case ' l ' : <nl> + return get_number < int32_t > ( ) ; <nl> + case ' L ' : <nl> + return get_number < int64_t > ( ) ; <nl> + case ' d ' : <nl> + return get_number < float > ( ) ; <nl> + case ' D ' : <nl> + return get_number < double > ( ) ; <nl> + <nl> + case ' C ' : / / char <nl> + { <nl> + get ( ) ; <nl> + check_eof ( ) ; <nl> + return std : : string ( 1 , static_cast < char > ( current ) ) ; <nl> + } <nl> + <nl> + case ' S ' : / / string <nl> + return get_ubjson_string ( ) ; <nl> + <nl> + case ' [ ' : / / array <nl> + return get_ubjson_array ( ) ; <nl> + <nl> + case ' { ' : / / object <nl> + return get_ubjson_object ( ) ; <nl> + <nl> + default : / / anything else <nl> + std : : stringstream ss ; <nl> + ss < < std : : setw ( 2 ) < < std : : uppercase < < std : : setfill ( ' 0 ' ) < < std : : hex < < current ; <nl> + JSON_THROW ( parse_error : : create ( 112 , chars_read , <nl> + " error reading UBJSON ; last byte : 0x " + ss . str ( ) ) ) ; <nl> + } <nl> + } <nl> + <nl> BasicJsonType get_ubjson_array ( ) <nl> { <nl> BasicJsonType result = value_t : : array ; <nl> + const auto size_and_type = get_ubjson_size_type ( ) ; <nl> <nl> - while ( get ( ) ! = ' ] ' ) <nl> + if ( size_and_type . first ! = std : : string : : npos ) <nl> + { <nl> + for ( std : : size_t i = 0 ; i < size_and_type . first ; + + i ) <nl> + { <nl> + if ( size_and_type . second ! = 0 ) <nl> + { <nl> + result . push_back ( get_ubjson_value ( size_and_type . second ) ) ; <nl> + } <nl> + else <nl> + { <nl> + result . push_back ( parse_ubjson_internal ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + else <nl> { <nl> - / / skip no - op <nl> - if ( current = = ' N ' ) <nl> + while ( current ! = ' ] ' ) <nl> { <nl> - continue ; <nl> + result . push_back ( parse_ubjson_internal ( false ) ) ; <nl> + get_ignore_noop ( ) ; <nl> } <nl> - result . push_back ( parse_ubjson_internal ( false ) ) ; <nl> } <nl> <nl> return result ; <nl> class binary_reader <nl> BasicJsonType get_ubjson_object ( ) <nl> { <nl> BasicJsonType result = value_t : : object ; <nl> + const auto size_and_type = get_ubjson_size_type ( ) ; <nl> <nl> - while ( get ( ) ! = ' } ' ) <nl> + if ( size_and_type . first ! = std : : string : : npos ) <nl> + { <nl> + for ( std : : size_t i = 0 ; i < size_and_type . first ; + + i ) <nl> + { <nl> + auto key = get_ubjson_string ( ) ; <nl> + if ( size_and_type . second ! = 0 ) <nl> + { <nl> + result [ std : : move ( key ) ] = get_ubjson_value ( size_and_type . second ) ; <nl> + } <nl> + else <nl> + { <nl> + result [ std : : move ( key ) ] = parse_ubjson_internal ( ) ; <nl> + } <nl> + } <nl> + } <nl> + else <nl> { <nl> - auto key = get_ubjson_string ( false ) ; <nl> - result [ std : : move ( key ) ] = parse_ubjson_internal ( ) ; <nl> + while ( current ! = ' } ' ) <nl> + { <nl> + auto key = get_ubjson_string ( false ) ; <nl> + result [ std : : move ( key ) ] = parse_ubjson_internal ( ) ; <nl> + get_ignore_noop ( ) ; <nl> + } <nl> } <nl> <nl> return result ; <nl> | : hammer : optimized input format | nlohmann/json | 31bfabc4c0868e7d2bf4a97cdafdeb9b966c4fab | 2018-01-10T15:14:43Z |
mmm a / tensorflow / contrib / data / python / kernel_tests / optimize_dataset_op_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / optimize_dataset_op_test . py <nl> def testOptimization ( self ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( get_next ) <nl> <nl> - # TODO ( b / 112914454 ) : Remove the test or figure out way to copy only new <nl> - # functions in optimize_dataset_op instead of taking union of old and new <nl> - # functions . <nl> - def _testFunctionLibraryDefinitionModification ( self ) : <nl> - dataset = dataset_ops . Dataset . from_tensors ( 0 ) . map ( lambda x : x ) . apply ( <nl> - optimization . optimize ( [ " _test_only_function_rename " ] ) ) <nl> - iterator = dataset . make_one_shot_iterator ( ) <nl> - get_next = iterator . get_next ( ) <nl> - <nl> - with self . test_session ( ) as sess : <nl> - with self . assertRaisesRegexp ( errors . NotFoundError , <nl> - " Function . * is not defined . " ) : <nl> - sess . run ( get_next ) <nl> - <nl> <nl> if __name__ = = " __main__ " : <nl> test . main ( ) <nl> mmm a / tensorflow / core / grappler / optimizers / data / BUILD <nl> ppp b / tensorflow / core / grappler / optimizers / data / BUILD <nl> tf_cc_test ( <nl> ] , <nl> ) <nl> <nl> - cc_library ( <nl> - name = " function_rename " , <nl> - srcs = [ " function_rename . cc " ] , <nl> - hdrs = [ <nl> - " function_rename . h " , <nl> - ] , <nl> - visibility = [ " / / visibility : public " ] , <nl> - deps = [ <nl> - " : graph_utils " , <nl> - " / / tensorflow / core : lib " , <nl> - " / / tensorflow / core / grappler : graph_view " , <nl> - " / / tensorflow / core / grappler : grappler_item " , <nl> - " / / tensorflow / core / grappler : op_types " , <nl> - " / / tensorflow / core / grappler : utils " , <nl> - " / / tensorflow / core / grappler / clusters : cluster " , <nl> - " / / tensorflow / core / grappler / optimizers : custom_graph_optimizer " , <nl> - " / / tensorflow / core / grappler / optimizers : custom_graph_optimizer_registry " , <nl> - ] + tf_protos_all ( ) , <nl> - ) <nl> - <nl> - tf_cc_test ( <nl> - name = " function_rename_test " , <nl> - srcs = [ " function_rename_test . cc " ] , <nl> - visibility = [ " / / visibility : public " ] , <nl> - deps = [ <nl> - " : function_rename " , <nl> - " / / tensorflow / core : framework " , <nl> - " / / tensorflow / core : test " , <nl> - " / / tensorflow / core : test_main " , <nl> - " / / tensorflow / core / grappler : grappler_item " , <nl> - ] + tf_protos_all ( ) , <nl> - ) <nl> - <nl> cc_library ( <nl> name = " fusion_utils " , <nl> srcs = [ " fusion_utils . cc " ] , <nl> cc_library ( <nl> visibility = [ " / / visibility : public " ] , <nl> deps = [ <nl> " : filter_fusion " , <nl> - " : function_rename " , <nl> " : latency_all_edges " , <nl> " : map_and_batch_fusion " , <nl> " : map_and_filter_fusion " , <nl> deleted file mode 100644 <nl> index 8cf044d1bdf02 . . 0000000000000 <nl> mmm a / tensorflow / core / grappler / optimizers / data / function_rename . cc <nl> ppp / dev / null <nl> <nl> - / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - # include " tensorflow / core / grappler / optimizers / data / function_rename . h " <nl> - <nl> - # include " tensorflow / core / grappler / clusters / cluster . h " <nl> - # include " tensorflow / core / grappler / graph_view . h " <nl> - # include " tensorflow / core / grappler / grappler_item . h " <nl> - # include " tensorflow / core / grappler / op_types . h " <nl> - # include " tensorflow / core / grappler / optimizers / custom_graph_optimizer_registry . h " <nl> - # include " tensorflow / core / grappler / optimizers / data / graph_utils . h " <nl> - # include " tensorflow / core / grappler / utils . h " <nl> - # include " tensorflow / core / platform / protobuf . h " <nl> - <nl> - namespace tensorflow { <nl> - namespace grappler { <nl> - <nl> - Status FunctionRename : : Optimize ( Cluster * cluster , const GrapplerItem & item , <nl> - GraphDef * output ) { <nl> - * output = item . graph ; <nl> - GraphView graph ( output ) ; <nl> - int n = output - > mutable_library ( ) - > function_size ( ) ; <nl> - for ( int i = 0 ; i < n ; + + i ) { <nl> - FunctionDef * fn = output - > mutable_library ( ) - > mutable_function ( i ) ; <nl> - fn - > mutable_signature ( ) - > set_name ( fn - > signature ( ) . name ( ) + " world " ) ; <nl> - } <nl> - <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - void FunctionRename : : Feedback ( Cluster * cluster , const GrapplerItem & item , <nl> - const GraphDef & optimize_output , double result ) { <nl> - / / no - op <nl> - } <nl> - <nl> - REGISTER_GRAPH_OPTIMIZER_AS ( FunctionRename , " _test_only_function_rename " ) ; <nl> - <nl> - } / / end namespace grappler <nl> - } / / end namespace tensorflow <nl> deleted file mode 100644 <nl> index 23ad9470ff388 . . 0000000000000 <nl> mmm a / tensorflow / core / grappler / optimizers / data / function_rename . h <nl> ppp / dev / null <nl> <nl> - / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - # ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FUNCTION_RENAME_H_ <nl> - # define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FUNCTION_RENAME_H_ <nl> - <nl> - # include " tensorflow / core / grappler / optimizers / custom_graph_optimizer . h " <nl> - <nl> - namespace tensorflow { <nl> - namespace grappler { <nl> - <nl> - class FunctionRename : public CustomGraphOptimizer { <nl> - public : <nl> - FunctionRename ( ) = default ; <nl> - ~ FunctionRename ( ) override = default ; <nl> - <nl> - string name ( ) const override { return " _test_only_function_rename " ; } ; <nl> - <nl> - Status Init ( <nl> - const tensorflow : : RewriterConfig_CustomGraphOptimizer * config ) override { <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - Status Optimize ( Cluster * cluster , const GrapplerItem & item , <nl> - GraphDef * output ) override ; <nl> - <nl> - void Feedback ( Cluster * cluster , const GrapplerItem & item , <nl> - const GraphDef & optimize_output , double result ) override ; <nl> - } ; <nl> - <nl> - } / / end namespace grappler <nl> - } / / end namespace tensorflow <nl> - <nl> - # endif / / TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FUNCTION_RENAME_H_ <nl> deleted file mode 100644 <nl> index 56b8a960a77d1 . . 0000000000000 <nl> mmm a / tensorflow / core / grappler / optimizers / data / function_rename_test . cc <nl> ppp / dev / null <nl> <nl> - / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - # include " tensorflow / core / grappler / optimizers / data / function_rename . h " <nl> - <nl> - # include " tensorflow / core / framework / function . pb . h " <nl> - # include " tensorflow / core / framework / op_def . pb . h " <nl> - # include " tensorflow / core / grappler / grappler_item . h " <nl> - # include " tensorflow / core / lib / core / status_test_util . h " <nl> - # include " tensorflow / core / platform / test . h " <nl> - <nl> - namespace tensorflow { <nl> - namespace grappler { <nl> - namespace { <nl> - <nl> - TEST ( FunctionRenameTest , RenameFunction ) { <nl> - GrapplerItem item ; <nl> - GraphDef * graph = & item . graph ; <nl> - FunctionDef * fn = graph - > mutable_library ( ) - > add_function ( ) ; <nl> - fn - > mutable_signature ( ) - > set_name ( " hello " ) ; <nl> - <nl> - FunctionRename optimizer ; <nl> - GraphDef output ; <nl> - TF_ASSERT_OK ( optimizer . Optimize ( nullptr , item , & output ) ) ; <nl> - EXPECT_EQ ( output . library ( ) . function ( 0 ) . signature ( ) . name ( ) , " helloworld " ) ; <nl> - } <nl> - <nl> - } / / namespace <nl> - } / / namespace grappler <nl> - } / / namespace tensorflow <nl> | [ tf . data ] removing test for obsolete functionality | tensorflow/tensorflow | abc8452394aeeecc1f3fef27f7098a5924bdd0e9 | 2018-08-27T18:55:43Z |
mmm a / modules / mono / editor / GodotSharpTools / GodotSharpTools . csproj <nl> ppp b / modules / mono / editor / GodotSharpTools / GodotSharpTools . csproj <nl> <nl> < RootNamespace > GodotSharpTools < / RootNamespace > <nl> < AssemblyName > GodotSharpTools < / AssemblyName > <nl> < TargetFrameworkVersion > v4 . 5 < / TargetFrameworkVersion > <nl> + < BaseIntermediateOutputPath > obj < / BaseIntermediateOutputPath > <nl> < / PropertyGroup > <nl> < PropertyGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | AnyCPU ' " > <nl> < DebugSymbols > true < / DebugSymbols > <nl> mmm a / modules / mono / editor / mono_bottom_panel . cpp <nl> ppp b / modules / mono / editor / mono_bottom_panel . cpp <nl> void MonoBottomPanel : : _build_project_pressed ( ) { <nl> if ( ! FileAccess : : exists ( GodotSharpDirs : : get_project_sln_path ( ) ) ) <nl> return ; / / No solution to build <nl> <nl> - String scripts_metadata_path = GodotSharpDirs : : get_res_metadata_dir ( ) . plus_file ( " scripts_metadata . editor " ) ; <nl> - Error metadata_err = CSharpProject : : generate_scripts_metadata ( GodotSharpDirs : : get_project_csproj_path ( ) , scripts_metadata_path ) ; <nl> + String scripts_metadata_path_editor = GodotSharpDirs : : get_res_metadata_dir ( ) . plus_file ( " scripts_metadata . editor " ) ; <nl> + String scripts_metadata_path_player = GodotSharpDirs : : get_res_metadata_dir ( ) . plus_file ( " scripts_metadata . editor_player " ) ; <nl> + <nl> + Error metadata_err = CSharpProject : : generate_scripts_metadata ( GodotSharpDirs : : get_project_csproj_path ( ) , scripts_metadata_path_editor ) ; <nl> ERR_FAIL_COND ( metadata_err ! = OK ) ; <nl> <nl> + if ( FileAccess : : exists ( scripts_metadata_path_editor ) ) { <nl> + DirAccessRef da = DirAccess : : create ( DirAccess : : ACCESS_RESOURCES ) ; <nl> + Error copy_err = da - > copy ( scripts_metadata_path_editor , scripts_metadata_path_player ) ; <nl> + <nl> + ERR_EXPLAIN ( " Failed to copy scripts metadata file " ) ; <nl> + ERR_FAIL_COND ( copy_err ! = OK ) ; <nl> + } <nl> + <nl> bool build_success = GodotSharpBuilds : : get_singleton ( ) - > build_project_blocking ( " Tools " ) ; <nl> <nl> if ( build_success ) { <nl> | Mono : Create player script metadata when building manually | godotengine/godot | 5f8c30fbcaad9f45c32aa58f4f995c88e917ac96 | 2019-02-08T21:49:27Z |
mmm a / dlib / cmake_utils / test_for_cudnn / find_cudnn . txt <nl> ppp b / dlib / cmake_utils / test_for_cudnn / find_cudnn . txt <nl> get_filename_component ( cudnn_hint_path " $ { CUDA_CUBLAS_LIBRARIES } " PATH ) <nl> find_library ( cudnn cudnn <nl> HINTS $ { cudnn_hint_path } ENV CUDNN_LIBRARY_DIR ENV CUDNN_HOME <nl> PATHS / usr / local / usr / local / cuda ENV LD_LIBRARY_PATH <nl> - PATH_SUFFIXES lib64 lib <nl> + PATH_SUFFIXES lib64 lib x64 <nl> ) <nl> mark_as_advanced ( cudnn cudnn_include ) <nl> | find_cudnn modified for cudnn installed a user directory . ( ) | davisking/dlib | 766c46b5cf0f9cc5022f9f67e039b68ab7d9af64 | 2016-10-07T10:36:58Z |
new file mode 100644 <nl> index 00000000000 . . 6cae4a867e1 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / reified_generics / new_self_new_parent . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + class A < reify Ta > { } <nl> + class C < reify Tb > extends A < Tb > { <nl> + public function f ( ) : void { <nl> + $ c = new self ( ) ; <nl> + hh_show ( $ c ) ; / / C < [ unresolved ] > TODO : T41884303 <nl> + $ a = new parent ( ) ; <nl> + hh_show ( $ a ) ; / / A < [ unresolved ] > TODO : T41884303 <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 1d42e92b2f4 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / reified_generics / new_self_new_parent . php . exp <nl> <nl> + File " new_self_new_parent . php " , line 8 , characters 5 - 15 : <nl> + exact C < [ unresolved ] > <nl> + File " new_self_new_parent . php " , line 10 , characters 5 - 15 : <nl> + exact A < [ unresolved ] > <nl> + No errors <nl> new file mode 100644 <nl> index 00000000000 . . cce82bb112e <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / reified_generics / newable_targ_extends . php <nl> <nl> + < ? hh <nl> + <nl> + < < __ConsistentConstruct > > <nl> + abstract class Top { } <nl> + class Concrete extends Top { } <nl> + <nl> + class C < < < __Newable > > reify T as Top > { <nl> + public function f ( ) : void { <nl> + new T ( ) ; <nl> + } <nl> + } <nl> + <nl> + class Bad extends C < Top > { } <nl> + class Good extends C < Concrete > { } <nl> new file mode 100644 <nl> index 00000000000 . . 0bca4a10af3 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / reified_generics / newable_targ_extends . php . exp <nl> <nl> + File " newable_targ_extends . php " , line 13 , characters 21 - 23 : <nl> + A newable type argument must be a concrete class or a newable type parameter . ( Typing [ 4307 ] ) <nl> + File " newable_targ_extends . php " , line 7 , characters 29 - 29 : <nl> + Type parameter T was declared __Newable here <nl> | Add test cases for extends newable and new self / parent | facebook/hhvm | e33e38abd0871ed7c79d4dca94f656e30e06411f | 2019-03-20T06:15:06Z |
mmm a / README . md <nl> ppp b / README . md <nl> <nl> <nl> * v1 . 0 build 11 ( master branch ) * <nl> <nl> - [ ! [ Build Status ] ( https : / / travis - ci . org / AIM360 / Catch . png ) ] ( https : / / travis - ci . org / AIM360 / Catch ) Travis CI build status for this fork <nl> + Build status ( on Travis CI ) [ ! [ Build Status ] ( https : / / travis - ci . org / philsquared / Catch . png ) ] ( https : / / travis - ci . org / philsquared / Catch ) <nl> <nl> # New release with significant changes <nl> <nl> | Updated travis URL | catchorg/Catch2 | 2b644b5128ce2ba4a1faf36e8a17e05f7a7cb4c7 | 2013-10-21T19:55:01Z |
new file mode 100644 <nl> index 00000000000 . . 74870396514 <nl> mmm / dev / null <nl> ppp b / Ahuacatl / TODO <nl> <nl> + nodes : <nl> + - escape variable names <nl> + <nl> + code generation : <nl> + - symbol tables / scopes <nl> + <nl> + avocado integration : <nl> + - error handling <nl> + - Avocado integration <nl> + <nl> + bind : <nl> + - bind parameter injection <nl> + - bind parameter validation <nl> + <nl> + optimization : <nl> + - constant folding <nl> new file mode 100644 <nl> index 00000000000 . . f260cc2c180 <nl> mmm / dev / null <nl> ppp b / Ahuacatl / grammar . ebnf <nl> <nl> + / * converted on Fri Apr 13 , 2012 , 15 : 01 ( UTC + 02 ) by bison - to - w3c v0 . 24 . 383 which is Copyright ( c ) 2011 - 2012 by Gunther Rademacher < grd @ gmx . net > * / <nl> + <nl> + query : : = statement_block_statement * return_statement <nl> + statement_block_statement <nl> + : : = for_statement <nl> + | let_statement <nl> + | filter_statement <nl> + | collect_statement <nl> + | sort_statement <nl> + | limit_statement <nl> + for_statement <nl> + : : = T_FOR variable_name T_IN expression <nl> + filter_statement <nl> + : : = T_FILTER expression <nl> + let_statement <nl> + : : = T_LET variable_name T_ASSIGN expression <nl> + collect_statement <nl> + : : = T_COLLECT collect_element ( T_COMMA collect_element ) * optional_into <nl> + collect_element <nl> + : : = variable_name T_ASSIGN expression <nl> + | expression <nl> + optional_into <nl> + : : = ( T_INTO variable_name ) ? <nl> + sort_statement <nl> + : : = T_SORT sort_element ( T_COMMA sort_element ) * <nl> + sort_element <nl> + : : = expression sort_direction <nl> + sort_direction <nl> + : : = ( T_ASC | T_DESC ) ? <nl> + limit_statement <nl> + : : = T_LIMIT signed_number <nl> + | T_LIMIT signed_number T_COMMA signed_number <nl> + return_statement <nl> + : : = T_RETURN expression <nl> + expression <nl> + : : = T_OPEN expression T_CLOSE <nl> + | T_OPEN query T_CLOSE <nl> + | operator_unary <nl> + | operator_binary <nl> + | operator_ternary <nl> + | T_STRING T_OPEN optional_function_call_arguments T_CLOSE FUNCCALL <nl> + | compound_type <nl> + | atomic_value <nl> + | ( reference_part | T_DOLLAR ) ( ' . ' reference_part REFERENCE ) * <nl> + operator_unary <nl> + : : = T_PLUS expression UPLUS <nl> + | T_MINUS expression UMINUS <nl> + | T_NOT expression T_NOT <nl> + operator_binary <nl> + : : = expression T_OR expression <nl> + | expression T_AND expression <nl> + | expression T_PLUS expression <nl> + | expression T_MINUS expression <nl> + | expression T_TIMES expression <nl> + | expression T_DIV expression <nl> + | expression T_MOD expression <nl> + | expression T_EQ expression <nl> + | expression T_NE expression <nl> + | expression T_LT expression <nl> + | expression T_GT expression <nl> + | expression T_LE expression <nl> + | expression T_GE expression <nl> + | expression T_IN expression <nl> + operator_ternary <nl> + : : = expression T_QUESTION expression T_COLON expression <nl> + optional_function_call_arguments <nl> + : : = ( expression ( T_COMMA expression ) * ) ? <nl> + compound_type <nl> + : : = list <nl> + | document <nl> + list : : = T_LIST_OPEN optional_list_elements T_LIST_CLOSE <nl> + optional_list_elements <nl> + : : = ( expression ( T_COMMA expression ) * ) ? <nl> + document : : = T_DOC_OPEN optional_document_elements T_DOC_CLOSE <nl> + optional_document_elements <nl> + : : = ( document_element ( T_COMMA document_element ) * ) ? <nl> + document_element <nl> + : : = document_element_name T_COLON expression <nl> + reference_part <nl> + : : = T_STRING <nl> + | T_STRING T_LIST_OPEN reference_index T_LIST_CLOSE <nl> + reference_index <nl> + : : = T_QUOTED_STRING <nl> + | T_NUMBER <nl> + | T_TIMES <nl> + atomic_value <nl> + : : = value_literal <nl> + | bind_parameter <nl> + value_literal <nl> + : : = T_QUOTED_STRING <nl> + | T_NUMBER <nl> + | T_NULL <nl> + | T_TRUE <nl> + | T_FALSE <nl> + bind_parameter <nl> + : : = T_PARAMETER <nl> + document_element_name <nl> + : : = T_STRING <nl> + | T_QUOTED_STRING <nl> + variable_name <nl> + : : = T_STRING <nl> + signed_number <nl> + : : = T_NUMBER <nl> + | ' - ' T_NUMBER <nl> new file mode 100644 <nl> index 00000000000 . . 6636f369f4e <nl> mmm / dev / null <nl> ppp b / Ahuacatl / grammar . xhtml <nl> <nl> + < ! DOCTYPE html PUBLIC " - / / W3C / / DTD XHTML 1 . 0 Transitional / / EN " " http : / / www . w3 . org / TR / xhtml1 / DTD / xhtml1 - transitional . dtd " > <nl> + < html xmlns = " http : / / www . w3 . org / 1999 / xhtml " > < head > < xhtml : meta xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " http - equiv = " Content - Type " content = " text / html ; charset = UTF - 8 " / > < xhtml : link xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " rel = " shortcut icon " href = " http : / / railroad . my28msec . com / favicon . ico " / > < xhtml : title xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > Railroad Diagram Generator < / xhtml : title > < style type = " text / css " > <nl> + : : - moz - selection <nl> + { <nl> + color : # FFFCF0 ; <nl> + background : # 4D3D00 ; <nl> + } <nl> + : : selection <nl> + { <nl> + color : # FFFCF0 ; <nl> + background : # 4D3D00 ; <nl> + } <nl> + body <nl> + { <nl> + font : normal 12px Verdana , sans - serif ; <nl> + color : # 4D3D00 ; <nl> + background : # FFFCF0 ; <nl> + } <nl> + a : link , a : visited <nl> + { <nl> + color : # 4D3D00 ; <nl> + } <nl> + a . button , # tabs li a <nl> + { <nl> + padding : 0 . 25em 0 . 5em ; <nl> + border : 1px solid # 806600 ; <nl> + background : # F1E8C6 ; <nl> + color : # 806600 ; <nl> + text - decoration : none ; <nl> + font - weight : bold ; <nl> + } <nl> + a : hover , # tabs li a : hover <nl> + { <nl> + color : # 1A1400 ; <nl> + background : # FFF4C7 ; <nl> + border - color : # 1A1400 ; <nl> + } <nl> + # tabs <nl> + { <nl> + padding : 3px 10px ; <nl> + margin - left : 0 ; <nl> + margin - top : 58px ; <nl> + border - bottom : 1px solid # 4D3D00 ; <nl> + } <nl> + # tabs li <nl> + { <nl> + list - style : none ; <nl> + margin - left : 5px ; <nl> + display : inline ; <nl> + } <nl> + # tabs li a <nl> + { <nl> + border - bottom : 1px solid # 4D3D00 ; <nl> + } <nl> + # tabs li a . active <nl> + { <nl> + color : # 4D3D00 ; <nl> + background : # FFFCF0 ; <nl> + border - color : # 4D3D00 ; <nl> + border - bottom : 1px solid # FFFCF0 ; <nl> + outline : none ; <nl> + } <nl> + # divs div <nl> + { <nl> + display : none ; <nl> + overflow : auto ; <nl> + } <nl> + # divs div . active <nl> + { <nl> + display : block ; <nl> + } <nl> + # text <nl> + { <nl> + border - color : # 806600 ; <nl> + background : # FFFEFA ; <nl> + color : # 1A1400 ; <nl> + } <nl> + td . time <nl> + { <nl> + vertical - align : top ; <nl> + } <nl> + span . time <nl> + { <nl> + font - size : 9px ; <nl> + visibility : hidden ; <nl> + } <nl> + td . time : hover span . time <nl> + { <nl> + visibility : visible ; <nl> + } <nl> + < / style > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < / svg > < / head > < body > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " query " > query : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 488 " height = " 68 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 33 1 29 1 37 " class = " filled " / > < polygon points = " 17 33 9 29 9 37 " class = " filled " / > < line x1 = " 17 " y1 = " 33 " x2 = " 19 " y2 = " 33 " class = " line " / > < a xlink : href = " # statement_block_statement " xlink : title = " statement_block_statement " > < rect x = " 71 " y = " 19 " width = " 198 " height = " 32 " / > < rect x = " 69 " y = " 17 " width = " 198 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 79 " y = " 37 " > statement_block_statement < / text > < / a > < line x1 = " 59 " y1 = " 33 " x2 = " 69 " y2 = " 33 " class = " line " / > < line x1 = " 267 " y1 = " 33 " x2 = " 277 " y2 = " 33 " class = " line " / > < path d = " M39 33 L59 33 M58 33 Q49 33 49 23 L49 11 Q49 1 59 1 " class = " line " / > < path d = " M277 33 L297 33 M277 33 Q287 33 287 23 L287 11 Q287 1 277 1 " class = " line " / > < line x1 = " 59 " y1 = " 1 " x2 = " 69 " y2 = " 1 " class = " line " / > < line x1 = " 69 " y1 = " 1 " x2 = " 277 " y2 = " 1 " class = " line " / > < line x1 = " 19 " y1 = " 33 " x2 = " 39 " y2 = " 33 " class = " line " / > < line x1 = " 297 " y1 = " 33 " x2 = " 317 " y2 = " 33 " class = " line " / > < path d = " M19 33 Q29 33 29 43 " class = " line " / > < path d = " M307 43 Q307 33 317 33 " class = " line " / > < line x1 = " 29 " y1 = " 43 " x2 = " 29 " y2 = " 57 " class = " line " / > < line x1 = " 307 " y1 = " 57 " x2 = " 307 " y2 = " 43 " class = " line " / > < path d = " M29 57 Q29 67 39 67 " class = " line " / > < path d = " M297 67 Q307 67 307 57 " class = " line " / > < line x1 = " 39 " y1 = " 67 " x2 = " 49 " y2 = " 67 " class = " line " / > < line x1 = " 49 " y1 = " 67 " x2 = " 297 " y2 = " 67 " class = " line " / > < a xlink : href = " # return_statement " xlink : title = " return_statement " > < rect x = " 329 " y = " 19 " width = " 132 " height = " 32 " / > < rect x = " 327 " y = " 17 " width = " 132 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 337 " y = " 37 " > return_statement < / text > < / a > < line x1 = " 317 " y1 = " 33 " x2 = " 327 " y2 = " 33 " class = " line " / > < line x1 = " 459 " y1 = " 33 " x2 = " 469 " y2 = " 33 " class = " line " / > < line x1 = " 472 " y1 = " 33 " x2 = " 469 " y2 = " 33 " class = " line " / > < polygon points = " 479 33 487 29 487 37 " class = " filled " / > < polygon points = " 479 33 471 29 471 37 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # expression " title = " expression " > expression < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " statement_block_statement " > statement_block_statement : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 230 " height = " 256 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # for_statement " xlink : title = " for_statement " > < rect x = " 51 " y = " 3 " width = " 110 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 110 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > for_statement < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 159 " y1 = " 17 " x2 = " 169 " y2 = " 17 " class = " line " / > < line x1 = " 169 " y1 = " 17 " x2 = " 191 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 191 " y1 = " 17 " x2 = " 211 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M201 27 Q201 17 211 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 201 " y1 = " 51 " x2 = " 201 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M191 61 Q201 61 201 51 " class = " line " / > < a xlink : href = " # let_statement " xlink : title = " let_statement " > < rect x = " 51 " y = " 47 " width = " 108 " height = " 32 " / > < rect x = " 49 " y = " 45 " width = " 108 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 65 " > let_statement < / text > < / a > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 157 " y1 = " 61 " x2 = " 167 " y2 = " 61 " class = " line " / > < line x1 = " 167 " y1 = " 61 " x2 = " 191 " y2 = " 61 " class = " line " / > < line x1 = " 29 " y1 = " 51 " x2 = " 29 " y2 = " 71 " class = " line " / > < line x1 = " 201 " y1 = " 71 " x2 = " 201 " y2 = " 51 " class = " line " / > < line x1 = " 29 " y1 = " 71 " x2 = " 29 " y2 = " 95 " class = " line " / > < line x1 = " 201 " y1 = " 95 " x2 = " 201 " y2 = " 71 " class = " line " / > < path d = " M29 95 Q29 105 39 105 " class = " line " / > < path d = " M191 105 Q201 105 201 95 " class = " line " / > < a xlink : href = " # filter_statement " xlink : title = " filter_statement " > < rect x = " 51 " y = " 91 " width = " 120 " height = " 32 " / > < rect x = " 49 " y = " 89 " width = " 120 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 109 " > filter_statement < / text > < / a > < line x1 = " 39 " y1 = " 105 " x2 = " 49 " y2 = " 105 " class = " line " / > < line x1 = " 169 " y1 = " 105 " x2 = " 179 " y2 = " 105 " class = " line " / > < line x1 = " 179 " y1 = " 105 " x2 = " 191 " y2 = " 105 " class = " line " / > < line x1 = " 29 " y1 = " 95 " x2 = " 29 " y2 = " 115 " class = " line " / > < line x1 = " 201 " y1 = " 115 " x2 = " 201 " y2 = " 95 " class = " line " / > < line x1 = " 29 " y1 = " 115 " x2 = " 29 " y2 = " 139 " class = " line " / > < line x1 = " 201 " y1 = " 139 " x2 = " 201 " y2 = " 115 " class = " line " / > < path d = " M29 139 Q29 149 39 149 " class = " line " / > < path d = " M191 149 Q201 149 201 139 " class = " line " / > < a xlink : href = " # collect_statement " xlink : title = " collect_statement " > < rect x = " 51 " y = " 135 " width = " 132 " height = " 32 " / > < rect x = " 49 " y = " 133 " width = " 132 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 153 " > collect_statement < / text > < / a > < line x1 = " 39 " y1 = " 149 " x2 = " 49 " y2 = " 149 " class = " line " / > < line x1 = " 181 " y1 = " 149 " x2 = " 191 " y2 = " 149 " class = " line " / > < line x1 = " 29 " y1 = " 139 " x2 = " 29 " y2 = " 159 " class = " line " / > < line x1 = " 201 " y1 = " 159 " x2 = " 201 " y2 = " 139 " class = " line " / > < line x1 = " 29 " y1 = " 159 " x2 = " 29 " y2 = " 183 " class = " line " / > < line x1 = " 201 " y1 = " 183 " x2 = " 201 " y2 = " 159 " class = " line " / > < path d = " M29 183 Q29 193 39 193 " class = " line " / > < path d = " M191 193 Q201 193 201 183 " class = " line " / > < a xlink : href = " # sort_statement " xlink : title = " sort_statement " > < rect x = " 51 " y = " 179 " width = " 118 " height = " 32 " / > < rect x = " 49 " y = " 177 " width = " 118 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 197 " > sort_statement < / text > < / a > < line x1 = " 39 " y1 = " 193 " x2 = " 49 " y2 = " 193 " class = " line " / > < line x1 = " 167 " y1 = " 193 " x2 = " 177 " y2 = " 193 " class = " line " / > < line x1 = " 177 " y1 = " 193 " x2 = " 191 " y2 = " 193 " class = " line " / > < line x1 = " 29 " y1 = " 183 " x2 = " 29 " y2 = " 203 " class = " line " / > < line x1 = " 201 " y1 = " 203 " x2 = " 201 " y2 = " 183 " class = " line " / > < line x1 = " 29 " y1 = " 203 " x2 = " 29 " y2 = " 227 " class = " line " / > < line x1 = " 201 " y1 = " 227 " x2 = " 201 " y2 = " 203 " class = " line " / > < path d = " M29 227 Q29 237 39 237 " class = " line " / > < path d = " M191 237 Q201 237 201 227 " class = " line " / > < a xlink : href = " # limit_statement " xlink : title = " limit_statement " > < rect x = " 51 " y = " 223 " width = " 118 " height = " 32 " / > < rect x = " 49 " y = " 221 " width = " 118 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 241 " > limit_statement < / text > < / a > < line x1 = " 39 " y1 = " 237 " x2 = " 49 " y2 = " 237 " class = " line " / > < line x1 = " 167 " y1 = " 237 " x2 = " 177 " y2 = " 237 " class = " line " / > < line x1 = " 177 " y1 = " 237 " x2 = " 191 " y2 = " 237 " class = " line " / > < line x1 = " 214 " y1 = " 17 " x2 = " 211 " y2 = " 17 " class = " line " / > < polygon points = " 221 17 229 13 229 21 " class = " filled " / > < polygon points = " 221 17 213 13 213 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # query " title = " query " > query < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " for_statement " > for_statement : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 426 " height = " 36 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_FOR " xlink : title = " T_FOR " > < rect x = " 31 " y = " 3 " width = " 60 " height = " 32 " / > < rect x = " 29 " y = " 1 " width = " 60 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 39 " y = " 21 " > T_FOR < / text > < / a > < line x1 = " 19 " y1 = " 17 " x2 = " 29 " y2 = " 17 " class = " line " / > < line x1 = " 89 " y1 = " 17 " x2 = " 99 " y2 = " 17 " class = " line " / > < a xlink : href = " # variable_name " xlink : title = " variable_name " > < rect x = " 111 " y = " 3 " width = " 112 " height = " 32 " / > < rect x = " 109 " y = " 1 " width = " 112 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 119 " y = " 21 " > variable_name < / text > < / a > < line x1 = " 99 " y1 = " 17 " x2 = " 109 " y2 = " 17 " class = " line " / > < line x1 = " 221 " y1 = " 17 " x2 = " 231 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_IN " xlink : title = " T_IN " > < rect x = " 243 " y = " 3 " width = " 48 " height = " 32 " / > < rect x = " 241 " y = " 1 " width = " 48 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 251 " y = " 21 " > T_IN < / text > < / a > < line x1 = " 231 " y1 = " 17 " x2 = " 241 " y2 = " 17 " class = " line " / > < line x1 = " 289 " y1 = " 17 " x2 = " 299 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 311 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 309 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 319 " y = " 21 " > expression < / text > < / a > < line x1 = " 299 " y1 = " 17 " x2 = " 309 " y2 = " 17 " class = " line " / > < line x1 = " 397 " y1 = " 17 " x2 = " 407 " y2 = " 17 " class = " line " / > < line x1 = " 410 " y1 = " 17 " x2 = " 407 " y2 = " 17 " class = " line " / > < polygon points = " 417 17 425 13 425 21 " class = " filled " / > < polygon points = " 417 17 409 13 409 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # statement_block_statement " title = " statement_block_statement " > statement_block_statement < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " filter_statement " > filter_statement : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 242 " height = " 36 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_FILTER " xlink : title = " T_FILTER " > < rect x = " 31 " y = " 3 " width = " 76 " height = " 32 " / > < rect x = " 29 " y = " 1 " width = " 76 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 39 " y = " 21 " > T_FILTER < / text > < / a > < line x1 = " 19 " y1 = " 17 " x2 = " 29 " y2 = " 17 " class = " line " / > < line x1 = " 105 " y1 = " 17 " x2 = " 115 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 127 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 125 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 135 " y = " 21 " > expression < / text > < / a > < line x1 = " 115 " y1 = " 17 " x2 = " 125 " y2 = " 17 " class = " line " / > < line x1 = " 213 " y1 = " 17 " x2 = " 223 " y2 = " 17 " class = " line " / > < line x1 = " 226 " y1 = " 17 " x2 = " 223 " y2 = " 17 " class = " line " / > < polygon points = " 233 17 241 13 241 21 " class = " filled " / > < polygon points = " 233 17 225 13 225 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # statement_block_statement " title = " statement_block_statement " > statement_block_statement < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " let_statement " > let_statement : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 456 " height = " 36 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_LET " xlink : title = " T_LET " > < rect x = " 31 " y = " 3 " width = " 56 " height = " 32 " / > < rect x = " 29 " y = " 1 " width = " 56 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 39 " y = " 21 " > T_LET < / text > < / a > < line x1 = " 19 " y1 = " 17 " x2 = " 29 " y2 = " 17 " class = " line " / > < line x1 = " 85 " y1 = " 17 " x2 = " 95 " y2 = " 17 " class = " line " / > < a xlink : href = " # variable_name " xlink : title = " variable_name " > < rect x = " 107 " y = " 3 " width = " 112 " height = " 32 " / > < rect x = " 105 " y = " 1 " width = " 112 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 115 " y = " 21 " > variable_name < / text > < / a > < line x1 = " 95 " y1 = " 17 " x2 = " 105 " y2 = " 17 " class = " line " / > < line x1 = " 217 " y1 = " 17 " x2 = " 227 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_ASSIGN " xlink : title = " T_ASSIGN " > < rect x = " 239 " y = " 3 " width = " 82 " height = " 32 " / > < rect x = " 237 " y = " 1 " width = " 82 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 247 " y = " 21 " > T_ASSIGN < / text > < / a > < line x1 = " 227 " y1 = " 17 " x2 = " 237 " y2 = " 17 " class = " line " / > < line x1 = " 319 " y1 = " 17 " x2 = " 329 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 341 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 339 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 349 " y = " 21 " > expression < / text > < / a > < line x1 = " 329 " y1 = " 17 " x2 = " 339 " y2 = " 17 " class = " line " / > < line x1 = " 427 " y1 = " 17 " x2 = " 437 " y2 = " 17 " class = " line " / > < line x1 = " 440 " y1 = " 17 " x2 = " 437 " y2 = " 17 " class = " line " / > < polygon points = " 447 17 455 13 455 21 " class = " filled " / > < polygon points = " 447 17 439 13 439 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # statement_block_statement " title = " statement_block_statement " > statement_block_statement < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " collect_statement " > collect_statement : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 450 " height = " 80 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 61 1 57 1 65 " class = " filled " / > < polygon points = " 17 61 9 57 9 65 " class = " filled " / > < line x1 = " 17 " y1 = " 61 " x2 = " 19 " y2 = " 61 " class = " line " / > < a xlink : href = " # T_COLLECT " xlink : title = " T_COLLECT " > < rect x = " 31 " y = " 47 " width = " 92 " height = " 32 " / > < rect x = " 29 " y = " 45 " width = " 92 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 39 " y = " 65 " > T_COLLECT < / text > < / a > < line x1 = " 19 " y1 = " 61 " x2 = " 29 " y2 = " 61 " class = " line " / > < line x1 = " 121 " y1 = " 61 " x2 = " 131 " y2 = " 61 " class = " line " / > < a xlink : href = " # collect_element " xlink : title = " collect_element " > < rect x = " 163 " y = " 47 " width = " 118 " height = " 32 " / > < rect x = " 161 " y = " 45 " width = " 118 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 171 " y = " 65 " > collect_element < / text > < / a > < line x1 = " 151 " y1 = " 61 " x2 = " 161 " y2 = " 61 " class = " line " / > < line x1 = " 279 " y1 = " 61 " x2 = " 289 " y2 = " 61 " class = " line " / > < path d = " M131 61 L151 61 M150 61 Q141 61 141 51 L141 27 Q141 17 151 17 " class = " line " / > < path d = " M289 61 L309 61 M289 61 Q299 61 299 51 L299 27 Q299 17 289 17 " class = " line " / > < a xlink : href = " # T_COMMA " xlink : title = " T_COMMA " > < rect x = " 163 " y = " 3 " width = " 82 " height = " 32 " / > < rect x = " 161 " y = " 1 " width = " 82 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 171 " y = " 21 " > T_COMMA < / text > < / a > < line x1 = " 151 " y1 = " 17 " x2 = " 161 " y2 = " 17 " class = " line " / > < line x1 = " 243 " y1 = " 17 " x2 = " 253 " y2 = " 17 " class = " line " / > < line x1 = " 253 " y1 = " 17 " x2 = " 289 " y2 = " 17 " class = " line " / > < a xlink : href = " # optional_into " xlink : title = " optional_into " > < rect x = " 321 " y = " 47 " width = " 102 " height = " 32 " / > < rect x = " 319 " y = " 45 " width = " 102 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 329 " y = " 65 " > optional_into < / text > < / a > < line x1 = " 309 " y1 = " 61 " x2 = " 319 " y2 = " 61 " class = " line " / > < line x1 = " 421 " y1 = " 61 " x2 = " 431 " y2 = " 61 " class = " line " / > < line x1 = " 434 " y1 = " 61 " x2 = " 431 " y2 = " 61 " class = " line " / > < polygon points = " 441 61 449 57 449 65 " class = " filled " / > < polygon points = " 441 61 433 57 433 65 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # statement_block_statement " title = " statement_block_statement " > statement_block_statement < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " collect_element " > collect_element : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 420 " height = " 80 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # variable_name " xlink : title = " variable_name " > < rect x = " 51 " y = " 3 " width = " 112 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 112 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > variable_name < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 161 " y1 = " 17 " x2 = " 171 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_ASSIGN " xlink : title = " T_ASSIGN " > < rect x = " 183 " y = " 3 " width = " 82 " height = " 32 " / > < rect x = " 181 " y = " 1 " width = " 82 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 191 " y = " 21 " > T_ASSIGN < / text > < / a > < line x1 = " 171 " y1 = " 17 " x2 = " 181 " y2 = " 17 " class = " line " / > < line x1 = " 263 " y1 = " 17 " x2 = " 273 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 285 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 283 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 293 " y = " 21 " > expression < / text > < / a > < line x1 = " 273 " y1 = " 17 " x2 = " 283 " y2 = " 17 " class = " line " / > < line x1 = " 371 " y1 = " 17 " x2 = " 381 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 381 " y1 = " 17 " x2 = " 401 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M391 27 Q391 17 401 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 391 " y1 = " 51 " x2 = " 391 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M381 61 Q391 61 391 51 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 47 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 45 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 65 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 137 " y1 = " 61 " x2 = " 147 " y2 = " 61 " class = " line " / > < line x1 = " 147 " y1 = " 61 " x2 = " 381 " y2 = " 61 " class = " line " / > < line x1 = " 404 " y1 = " 17 " x2 = " 401 " y2 = " 17 " class = " line " / > < polygon points = " 411 17 419 13 419 21 " class = " filled " / > < polygon points = " 411 17 403 13 403 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # collect_statement " title = " collect_statement " > collect_statement < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " optional_into " > optional_into : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 296 " height = " 52 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_INTO " xlink : title = " T_INTO " > < rect x = " 51 " y = " 3 " width = " 66 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 66 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > T_INTO < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 115 " y1 = " 17 " x2 = " 125 " y2 = " 17 " class = " line " / > < a xlink : href = " # variable_name " xlink : title = " variable_name " > < rect x = " 137 " y = " 3 " width = " 112 " height = " 32 " / > < rect x = " 135 " y = " 1 " width = " 112 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 145 " y = " 21 " > variable_name < / text > < / a > < line x1 = " 125 " y1 = " 17 " x2 = " 135 " y2 = " 17 " class = " line " / > < line x1 = " 247 " y1 = " 17 " x2 = " 257 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 257 " y1 = " 17 " x2 = " 277 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M267 27 Q267 17 277 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 41 " class = " line " / > < line x1 = " 267 " y1 = " 41 " x2 = " 267 " y2 = " 27 " class = " line " / > < path d = " M29 41 Q29 51 39 51 " class = " line " / > < path d = " M257 51 Q267 51 267 41 " class = " line " / > < line x1 = " 39 " y1 = " 51 " x2 = " 49 " y2 = " 51 " class = " line " / > < line x1 = " 49 " y1 = " 51 " x2 = " 257 " y2 = " 51 " class = " line " / > < line x1 = " 280 " y1 = " 17 " x2 = " 277 " y2 = " 17 " class = " line " / > < polygon points = " 287 17 295 13 295 21 " class = " filled " / > < polygon points = " 287 17 279 13 279 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # collect_statement " title = " collect_statement " > collect_statement < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " sort_statement " > sort_statement : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 290 " height = " 80 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 61 1 57 1 65 " class = " filled " / > < polygon points = " 17 61 9 57 9 65 " class = " filled " / > < line x1 = " 17 " y1 = " 61 " x2 = " 19 " y2 = " 61 " class = " line " / > < a xlink : href = " # T_SORT " xlink : title = " T_SORT " > < rect x = " 31 " y = " 47 " width = " 68 " height = " 32 " / > < rect x = " 29 " y = " 45 " width = " 68 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 39 " y = " 65 " > T_SORT < / text > < / a > < line x1 = " 19 " y1 = " 61 " x2 = " 29 " y2 = " 61 " class = " line " / > < line x1 = " 97 " y1 = " 61 " x2 = " 107 " y2 = " 61 " class = " line " / > < a xlink : href = " # sort_element " xlink : title = " sort_element " > < rect x = " 139 " y = " 47 " width = " 104 " height = " 32 " / > < rect x = " 137 " y = " 45 " width = " 104 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 147 " y = " 65 " > sort_element < / text > < / a > < line x1 = " 127 " y1 = " 61 " x2 = " 137 " y2 = " 61 " class = " line " / > < line x1 = " 241 " y1 = " 61 " x2 = " 251 " y2 = " 61 " class = " line " / > < path d = " M107 61 L127 61 M126 61 Q117 61 117 51 L117 27 Q117 17 127 17 " class = " line " / > < path d = " M251 61 L271 61 M251 61 Q261 61 261 51 L261 27 Q261 17 251 17 " class = " line " / > < a xlink : href = " # T_COMMA " xlink : title = " T_COMMA " > < rect x = " 139 " y = " 3 " width = " 82 " height = " 32 " / > < rect x = " 137 " y = " 1 " width = " 82 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 147 " y = " 21 " > T_COMMA < / text > < / a > < line x1 = " 127 " y1 = " 17 " x2 = " 137 " y2 = " 17 " class = " line " / > < line x1 = " 219 " y1 = " 17 " x2 = " 229 " y2 = " 17 " class = " line " / > < line x1 = " 229 " y1 = " 17 " x2 = " 251 " y2 = " 17 " class = " line " / > < line x1 = " 274 " y1 = " 61 " x2 = " 271 " y2 = " 61 " class = " line " / > < polygon points = " 281 61 289 57 289 65 " class = " filled " / > < polygon points = " 281 61 273 57 273 65 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # statement_block_statement " title = " statement_block_statement " > statement_block_statement < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " sort_element " > sort_element : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 272 " height = " 36 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 31 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 29 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 39 " y = " 21 " > expression < / text > < / a > < line x1 = " 19 " y1 = " 17 " x2 = " 29 " y2 = " 17 " class = " line " / > < line x1 = " 117 " y1 = " 17 " x2 = " 127 " y2 = " 17 " class = " line " / > < a xlink : href = " # sort_direction " xlink : title = " sort_direction " > < rect x = " 139 " y = " 3 " width = " 106 " height = " 32 " / > < rect x = " 137 " y = " 1 " width = " 106 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 147 " y = " 21 " > sort_direction < / text > < / a > < line x1 = " 127 " y1 = " 17 " x2 = " 137 " y2 = " 17 " class = " line " / > < line x1 = " 243 " y1 = " 17 " x2 = " 253 " y2 = " 17 " class = " line " / > < line x1 = " 256 " y1 = " 17 " x2 = " 253 " y2 = " 17 " class = " line " / > < polygon points = " 263 17 271 13 271 21 " class = " filled " / > < polygon points = " 263 17 255 13 255 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # sort_statement " title = " sort_statement " > sort_statement < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " sort_direction " > sort_direction : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 166 " height = " 96 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_ASC " xlink : title = " T_ASC " > < rect x = " 51 " y = " 3 " width = " 60 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 60 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > T_ASC < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 109 " y1 = " 17 " x2 = " 119 " y2 = " 17 " class = " line " / > < line x1 = " 119 " y1 = " 17 " x2 = " 127 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 127 " y1 = " 17 " x2 = " 147 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M137 27 Q137 17 147 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 137 " y1 = " 51 " x2 = " 137 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M127 61 Q137 61 137 51 " class = " line " / > < a xlink : href = " # T_DESC " xlink : title = " T_DESC " > < rect x = " 51 " y = " 47 " width = " 68 " height = " 32 " / > < rect x = " 49 " y = " 45 " width = " 68 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 65 " > T_DESC < / text > < / a > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 117 " y1 = " 61 " x2 = " 127 " y2 = " 61 " class = " line " / > < line x1 = " 29 " y1 = " 51 " x2 = " 29 " y2 = " 71 " class = " line " / > < line x1 = " 137 " y1 = " 71 " x2 = " 137 " y2 = " 51 " class = " line " / > < line x1 = " 29 " y1 = " 71 " x2 = " 29 " y2 = " 85 " class = " line " / > < line x1 = " 137 " y1 = " 85 " x2 = " 137 " y2 = " 71 " class = " line " / > < path d = " M29 85 Q29 95 39 95 " class = " line " / > < path d = " M127 95 Q137 95 137 85 " class = " line " / > < line x1 = " 39 " y1 = " 95 " x2 = " 49 " y2 = " 95 " class = " line " / > < line x1 = " 49 " y1 = " 95 " x2 = " 127 " y2 = " 95 " class = " line " / > < line x1 = " 150 " y1 = " 17 " x2 = " 147 " y2 = " 17 " class = " line " / > < polygon points = " 157 17 165 13 165 21 " class = " filled " / > < polygon points = " 157 17 149 13 149 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # sort_element " title = " sort_element " > sort_element < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " limit_statement " > limit_statement : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 544 " height = " 80 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_LIMIT " xlink : title = " T_LIMIT " > < rect x = " 51 " y = " 3 " width = " 68 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 68 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > T_LIMIT < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 117 " y1 = " 17 " x2 = " 127 " y2 = " 17 " class = " line " / > < a xlink : href = " # signed_number " xlink : title = " signed_number " > < rect x = " 139 " y = " 3 " width = " 118 " height = " 32 " / > < rect x = " 137 " y = " 1 " width = " 118 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 147 " y = " 21 " > signed_number < / text > < / a > < line x1 = " 127 " y1 = " 17 " x2 = " 137 " y2 = " 17 " class = " line " / > < line x1 = " 255 " y1 = " 17 " x2 = " 265 " y2 = " 17 " class = " line " / > < line x1 = " 265 " y1 = " 17 " x2 = " 505 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 505 " y1 = " 17 " x2 = " 525 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M515 27 Q515 17 525 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 515 " y1 = " 51 " x2 = " 515 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M505 61 Q515 61 515 51 " class = " line " / > < a xlink : href = " # T_LIMIT " xlink : title = " T_LIMIT " > < rect x = " 51 " y = " 47 " width = " 68 " height = " 32 " / > < rect x = " 49 " y = " 45 " width = " 68 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 65 " > T_LIMIT < / text > < / a > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 117 " y1 = " 61 " x2 = " 127 " y2 = " 61 " class = " line " / > < a xlink : href = " # signed_number " xlink : title = " signed_number " > < rect x = " 139 " y = " 47 " width = " 118 " height = " 32 " / > < rect x = " 137 " y = " 45 " width = " 118 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 147 " y = " 65 " > signed_number < / text > < / a > < line x1 = " 127 " y1 = " 61 " x2 = " 137 " y2 = " 61 " class = " line " / > < line x1 = " 255 " y1 = " 61 " x2 = " 265 " y2 = " 61 " class = " line " / > < a xlink : href = " # T_COMMA " xlink : title = " T_COMMA " > < rect x = " 277 " y = " 47 " width = " 82 " height = " 32 " / > < rect x = " 275 " y = " 45 " width = " 82 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 285 " y = " 65 " > T_COMMA < / text > < / a > < line x1 = " 265 " y1 = " 61 " x2 = " 275 " y2 = " 61 " class = " line " / > < line x1 = " 357 " y1 = " 61 " x2 = " 367 " y2 = " 61 " class = " line " / > < a xlink : href = " # signed_number " xlink : title = " signed_number " > < rect x = " 379 " y = " 47 " width = " 118 " height = " 32 " / > < rect x = " 377 " y = " 45 " width = " 118 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 387 " y = " 65 " > signed_number < / text > < / a > < line x1 = " 367 " y1 = " 61 " x2 = " 377 " y2 = " 61 " class = " line " / > < line x1 = " 495 " y1 = " 61 " x2 = " 505 " y2 = " 61 " class = " line " / > < line x1 = " 528 " y1 = " 17 " x2 = " 525 " y2 = " 17 " class = " line " / > < polygon points = " 535 17 543 13 543 21 " class = " filled " / > < polygon points = " 535 17 527 13 527 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # statement_block_statement " title = " statement_block_statement " > statement_block_statement < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " return_statement " > return_statement : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 250 " height = " 36 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_RETURN " xlink : title = " T_RETURN " > < rect x = " 31 " y = " 3 " width = " 84 " height = " 32 " / > < rect x = " 29 " y = " 1 " width = " 84 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 39 " y = " 21 " > T_RETURN < / text > < / a > < line x1 = " 19 " y1 = " 17 " x2 = " 29 " y2 = " 17 " class = " line " / > < line x1 = " 113 " y1 = " 17 " x2 = " 123 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 135 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 133 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 143 " y = " 21 " > expression < / text > < / a > < line x1 = " 123 " y1 = " 17 " x2 = " 133 " y2 = " 17 " class = " line " / > < line x1 = " 221 " y1 = " 17 " x2 = " 231 " y2 = " 17 " class = " line " / > < line x1 = " 234 " y1 = " 17 " x2 = " 231 " y2 = " 17 " class = " line " / > < polygon points = " 241 17 249 13 249 21 " class = " filled " / > < polygon points = " 241 17 233 13 233 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # query " title = " query " > query < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " expression " > expression : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 720 " height = " 454 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_OPEN " xlink : title = " T_OPEN " > < rect x = " 51 " y = " 3 " width = " 70 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 70 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > T_OPEN < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 119 " y1 = " 17 " x2 = " 129 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 141 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 139 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 149 " y = " 21 " > expression < / text > < / a > < line x1 = " 129 " y1 = " 17 " x2 = " 139 " y2 = " 17 " class = " line " / > < line x1 = " 227 " y1 = " 17 " x2 = " 237 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_CLOSE " xlink : title = " T_CLOSE " > < rect x = " 249 " y = " 3 " width = " 76 " height = " 32 " / > < rect x = " 247 " y = " 1 " width = " 76 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 257 " y = " 21 " > T_CLOSE < / text > < / a > < line x1 = " 237 " y1 = " 17 " x2 = " 247 " y2 = " 17 " class = " line " / > < line x1 = " 323 " y1 = " 17 " x2 = " 333 " y2 = " 17 " class = " line " / > < line x1 = " 333 " y1 = " 17 " x2 = " 681 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 681 " y1 = " 17 " x2 = " 701 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M691 27 Q691 17 701 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 691 " y1 = " 51 " x2 = " 691 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M681 61 Q691 61 691 51 " class = " line " / > < a xlink : href = " # T_OPEN " xlink : title = " T_OPEN " > < rect x = " 51 " y = " 47 " width = " 70 " height = " 32 " / > < rect x = " 49 " y = " 45 " width = " 70 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 65 " > T_OPEN < / text > < / a > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 119 " y1 = " 61 " x2 = " 129 " y2 = " 61 " class = " line " / > < a xlink : href = " # query " xlink : title = " query " > < rect x = " 141 " y = " 47 " width = " 56 " height = " 32 " / > < rect x = " 139 " y = " 45 " width = " 56 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 149 " y = " 65 " > query < / text > < / a > < line x1 = " 129 " y1 = " 61 " x2 = " 139 " y2 = " 61 " class = " line " / > < line x1 = " 195 " y1 = " 61 " x2 = " 205 " y2 = " 61 " class = " line " / > < a xlink : href = " # T_CLOSE " xlink : title = " T_CLOSE " > < rect x = " 217 " y = " 47 " width = " 76 " height = " 32 " / > < rect x = " 215 " y = " 45 " width = " 76 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 225 " y = " 65 " > T_CLOSE < / text > < / a > < line x1 = " 205 " y1 = " 61 " x2 = " 215 " y2 = " 61 " class = " line " / > < line x1 = " 291 " y1 = " 61 " x2 = " 301 " y2 = " 61 " class = " line " / > < line x1 = " 301 " y1 = " 61 " x2 = " 681 " y2 = " 61 " class = " line " / > < line x1 = " 29 " y1 = " 51 " x2 = " 29 " y2 = " 71 " class = " line " / > < line x1 = " 691 " y1 = " 71 " x2 = " 691 " y2 = " 51 " class = " line " / > < line x1 = " 29 " y1 = " 71 " x2 = " 29 " y2 = " 95 " class = " line " / > < line x1 = " 691 " y1 = " 95 " x2 = " 691 " y2 = " 71 " class = " line " / > < path d = " M29 95 Q29 105 39 105 " class = " line " / > < path d = " M681 105 Q691 105 691 95 " class = " line " / > < a xlink : href = " # operator_unary " xlink : title = " operator_unary " > < rect x = " 51 " y = " 91 " width = " 118 " height = " 32 " / > < rect x = " 49 " y = " 89 " width = " 118 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 109 " > operator_unary < / text > < / a > < line x1 = " 39 " y1 = " 105 " x2 = " 49 " y2 = " 105 " class = " line " / > < line x1 = " 167 " y1 = " 105 " x2 = " 177 " y2 = " 105 " class = " line " / > < line x1 = " 177 " y1 = " 105 " x2 = " 681 " y2 = " 105 " class = " line " / > < line x1 = " 29 " y1 = " 95 " x2 = " 29 " y2 = " 115 " class = " line " / > < line x1 = " 691 " y1 = " 115 " x2 = " 691 " y2 = " 95 " class = " line " / > < line x1 = " 29 " y1 = " 115 " x2 = " 29 " y2 = " 139 " class = " line " / > < line x1 = " 691 " y1 = " 139 " x2 = " 691 " y2 = " 115 " class = " line " / > < path d = " M29 139 Q29 149 39 149 " class = " line " / > < path d = " M681 149 Q691 149 691 139 " class = " line " / > < a xlink : href = " # operator_binary " xlink : title = " operator_binary " > < rect x = " 51 " y = " 135 " width = " 122 " height = " 32 " / > < rect x = " 49 " y = " 133 " width = " 122 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 153 " > operator_binary < / text > < / a > < line x1 = " 39 " y1 = " 149 " x2 = " 49 " y2 = " 149 " class = " line " / > < line x1 = " 171 " y1 = " 149 " x2 = " 181 " y2 = " 149 " class = " line " / > < line x1 = " 181 " y1 = " 149 " x2 = " 681 " y2 = " 149 " class = " line " / > < line x1 = " 29 " y1 = " 139 " x2 = " 29 " y2 = " 159 " class = " line " / > < line x1 = " 691 " y1 = " 159 " x2 = " 691 " y2 = " 139 " class = " line " / > < line x1 = " 29 " y1 = " 159 " x2 = " 29 " y2 = " 183 " class = " line " / > < line x1 = " 691 " y1 = " 183 " x2 = " 691 " y2 = " 159 " class = " line " / > < path d = " M29 183 Q29 193 39 193 " class = " line " / > < path d = " M681 193 Q691 193 691 183 " class = " line " / > < a xlink : href = " # operator_ternary " xlink : title = " operator_ternary " > < rect x = " 51 " y = " 179 " width = " 128 " height = " 32 " / > < rect x = " 49 " y = " 177 " width = " 128 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 197 " > operator_ternary < / text > < / a > < line x1 = " 39 " y1 = " 193 " x2 = " 49 " y2 = " 193 " class = " line " / > < line x1 = " 177 " y1 = " 193 " x2 = " 187 " y2 = " 193 " class = " line " / > < line x1 = " 187 " y1 = " 193 " x2 = " 681 " y2 = " 193 " class = " line " / > < line x1 = " 29 " y1 = " 183 " x2 = " 29 " y2 = " 203 " class = " line " / > < line x1 = " 691 " y1 = " 203 " x2 = " 691 " y2 = " 183 " class = " line " / > < line x1 = " 29 " y1 = " 203 " x2 = " 29 " y2 = " 227 " class = " line " / > < line x1 = " 691 " y1 = " 227 " x2 = " 691 " y2 = " 203 " class = " line " / > < path d = " M29 227 Q29 237 39 237 " class = " line " / > < path d = " M681 237 Q691 237 691 227 " class = " line " / > < a xlink : href = " # T_STRING " xlink : title = " T_STRING " > < rect x = " 51 " y = " 223 " width = " 80 " height = " 32 " / > < rect x = " 49 " y = " 221 " width = " 80 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 241 " > T_STRING < / text > < / a > < line x1 = " 39 " y1 = " 237 " x2 = " 49 " y2 = " 237 " class = " line " / > < line x1 = " 129 " y1 = " 237 " x2 = " 139 " y2 = " 237 " class = " line " / > < a xlink : href = " # T_OPEN " xlink : title = " T_OPEN " > < rect x = " 151 " y = " 223 " width = " 70 " height = " 32 " / > < rect x = " 149 " y = " 221 " width = " 70 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 159 " y = " 241 " > T_OPEN < / text > < / a > < line x1 = " 139 " y1 = " 237 " x2 = " 149 " y2 = " 237 " class = " line " / > < line x1 = " 219 " y1 = " 237 " x2 = " 229 " y2 = " 237 " class = " line " / > < a xlink : href = " # optional_function_call_arguments " xlink : title = " optional_function_call_arguments " > < rect x = " 241 " y = " 223 " width = " 232 " height = " 32 " / > < rect x = " 239 " y = " 221 " width = " 232 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 249 " y = " 241 " > optional_function_call_arguments < / text > < / a > < line x1 = " 229 " y1 = " 237 " x2 = " 239 " y2 = " 237 " class = " line " / > < line x1 = " 471 " y1 = " 237 " x2 = " 481 " y2 = " 237 " class = " line " / > < a xlink : href = " # T_CLOSE " xlink : title = " T_CLOSE " > < rect x = " 493 " y = " 223 " width = " 76 " height = " 32 " / > < rect x = " 491 " y = " 221 " width = " 76 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 501 " y = " 241 " > T_CLOSE < / text > < / a > < line x1 = " 481 " y1 = " 237 " x2 = " 491 " y2 = " 237 " class = " line " / > < line x1 = " 567 " y1 = " 237 " x2 = " 577 " y2 = " 237 " class = " line " / > < a xlink : href = " # FUNCCALL " xlink : title = " FUNCCALL " > < rect x = " 589 " y = " 223 " width = " 84 " height = " 32 " / > < rect x = " 587 " y = " 221 " width = " 84 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 597 " y = " 241 " > FUNCCALL < / text > < / a > < line x1 = " 577 " y1 = " 237 " x2 = " 587 " y2 = " 237 " class = " line " / > < line x1 = " 671 " y1 = " 237 " x2 = " 681 " y2 = " 237 " class = " line " / > < line x1 = " 29 " y1 = " 227 " x2 = " 29 " y2 = " 247 " class = " line " / > < line x1 = " 691 " y1 = " 247 " x2 = " 691 " y2 = " 227 " class = " line " / > < line x1 = " 29 " y1 = " 247 " x2 = " 29 " y2 = " 271 " class = " line " / > < line x1 = " 691 " y1 = " 271 " x2 = " 691 " y2 = " 247 " class = " line " / > < path d = " M29 271 Q29 281 39 281 " class = " line " / > < path d = " M681 281 Q691 281 691 271 " class = " line " / > < a xlink : href = " # compound_type " xlink : title = " compound_type " > < rect x = " 51 " y = " 267 " width = " 120 " height = " 32 " / > < rect x = " 49 " y = " 265 " width = " 120 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 285 " > compound_type < / text > < / a > < line x1 = " 39 " y1 = " 281 " x2 = " 49 " y2 = " 281 " class = " line " / > < line x1 = " 169 " y1 = " 281 " x2 = " 179 " y2 = " 281 " class = " line " / > < line x1 = " 179 " y1 = " 281 " x2 = " 681 " y2 = " 281 " class = " line " / > < line x1 = " 29 " y1 = " 271 " x2 = " 29 " y2 = " 291 " class = " line " / > < line x1 = " 691 " y1 = " 291 " x2 = " 691 " y2 = " 271 " class = " line " / > < line x1 = " 29 " y1 = " 291 " x2 = " 29 " y2 = " 315 " class = " line " / > < line x1 = " 691 " y1 = " 315 " x2 = " 691 " y2 = " 291 " class = " line " / > < path d = " M29 315 Q29 325 39 325 " class = " line " / > < path d = " M681 325 Q691 325 691 315 " class = " line " / > < a xlink : href = " # atomic_value " xlink : title = " atomic_value " > < rect x = " 51 " y = " 311 " width = " 102 " height = " 32 " / > < rect x = " 49 " y = " 309 " width = " 102 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 329 " > atomic_value < / text > < / a > < line x1 = " 39 " y1 = " 325 " x2 = " 49 " y2 = " 325 " class = " line " / > < line x1 = " 151 " y1 = " 325 " x2 = " 161 " y2 = " 325 " class = " line " / > < line x1 = " 161 " y1 = " 325 " x2 = " 681 " y2 = " 325 " class = " line " / > < line x1 = " 29 " y1 = " 315 " x2 = " 29 " y2 = " 335 " class = " line " / > < line x1 = " 691 " y1 = " 335 " x2 = " 691 " y2 = " 315 " class = " line " / > < line x1 = " 29 " y1 = " 335 " x2 = " 29 " y2 = " 381 " class = " line " / > < line x1 = " 691 " y1 = " 381 " x2 = " 691 " y2 = " 335 " class = " line " / > < path d = " M29 381 Q29 391 39 391 " class = " line " / > < path d = " M681 391 Q691 391 691 381 " class = " line " / > < a xlink : href = " # reference_part " xlink : title = " reference_part " > < rect x = " 71 " y = " 377 " width = " 114 " height = " 32 " / > < rect x = " 69 " y = " 375 " width = " 114 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 79 " y = " 395 " > reference_part < / text > < / a > < line x1 = " 59 " y1 = " 391 " x2 = " 69 " y2 = " 391 " class = " line " / > < line x1 = " 183 " y1 = " 391 " x2 = " 193 " y2 = " 391 " class = " line " / > < line x1 = " 39 " y1 = " 391 " x2 = " 59 " y2 = " 391 " class = " line " / > < line x1 = " 193 " y1 = " 391 " x2 = " 213 " y2 = " 391 " class = " line " / > < path d = " M39 391 Q49 391 49 401 " class = " line " / > < path d = " M203 401 Q203 391 213 391 " class = " line " / > < line x1 = " 49 " y1 = " 401 " x2 = " 49 " y2 = " 425 " class = " line " / > < line x1 = " 203 " y1 = " 425 " x2 = " 203 " y2 = " 401 " class = " line " / > < path d = " M49 425 Q49 435 59 435 " class = " line " / > < path d = " M193 435 Q203 435 203 425 " class = " line " / > < a xlink : href = " # T_DOLLAR " xlink : title = " T_DOLLAR " > < rect x = " 71 " y = " 421 " width = " 84 " height = " 32 " / > < rect x = " 69 " y = " 419 " width = " 84 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 79 " y = " 439 " > T_DOLLAR < / text > < / a > < line x1 = " 59 " y1 = " 435 " x2 = " 69 " y2 = " 435 " class = " line " / > < line x1 = " 153 " y1 = " 435 " x2 = " 163 " y2 = " 435 " class = " line " / > < line x1 = " 163 " y1 = " 435 " x2 = " 193 " y2 = " 435 " class = " line " / > < rect x = " 265 " y = " 377 " width = " 24 " height = " 32 " rx = " 10 " / > < rect x = " 263 " y = " 375 " width = " 24 " height = " 32 " class = " terminal " rx = " 10 " / > < text class = " terminal " x = " 273 " y = " 395 " > . < / text > < line x1 = " 253 " y1 = " 391 " x2 = " 263 " y2 = " 391 " class = " line " / > < line x1 = " 287 " y1 = " 391 " x2 = " 297 " y2 = " 391 " class = " line " / > < a xlink : href = " # reference_part " xlink : title = " reference_part " > < rect x = " 309 " y = " 377 " width = " 114 " height = " 32 " / > < rect x = " 307 " y = " 375 " width = " 114 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 317 " y = " 395 " > reference_part < / text > < / a > < line x1 = " 297 " y1 = " 391 " x2 = " 307 " y2 = " 391 " class = " line " / > < line x1 = " 421 " y1 = " 391 " x2 = " 431 " y2 = " 391 " class = " line " / > < a xlink : href = " # REFERENCE " xlink : title = " REFERENCE " > < rect x = " 443 " y = " 377 " width = " 92 " height = " 32 " / > < rect x = " 441 " y = " 375 " width = " 92 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 451 " y = " 395 " > REFERENCE < / text > < / a > < line x1 = " 431 " y1 = " 391 " x2 = " 441 " y2 = " 391 " class = " line " / > < line x1 = " 533 " y1 = " 391 " x2 = " 543 " y2 = " 391 " class = " line " / > < path d = " M233 391 L253 391 M252 391 Q243 391 243 381 L243 369 Q243 359 253 359 " class = " line " / > < path d = " M543 391 L563 391 M543 391 Q553 391 553 381 L553 369 Q553 359 543 359 " class = " line " / > < line x1 = " 253 " y1 = " 359 " x2 = " 263 " y2 = " 359 " class = " line " / > < line x1 = " 263 " y1 = " 359 " x2 = " 543 " y2 = " 359 " class = " line " / > < line x1 = " 213 " y1 = " 391 " x2 = " 233 " y2 = " 391 " class = " line " / > < line x1 = " 563 " y1 = " 391 " x2 = " 583 " y2 = " 391 " class = " line " / > < path d = " M213 391 Q223 391 223 401 " class = " line " / > < path d = " M573 401 Q573 391 583 391 " class = " line " / > < line x1 = " 223 " y1 = " 401 " x2 = " 223 " y2 = " 415 " class = " line " / > < line x1 = " 573 " y1 = " 415 " x2 = " 573 " y2 = " 401 " class = " line " / > < path d = " M223 415 Q223 425 233 425 " class = " line " / > < path d = " M563 425 Q573 425 573 415 " class = " line " / > < line x1 = " 233 " y1 = " 425 " x2 = " 243 " y2 = " 425 " class = " line " / > < line x1 = " 243 " y1 = " 425 " x2 = " 563 " y2 = " 425 " class = " line " / > < line x1 = " 583 " y1 = " 391 " x2 = " 681 " y2 = " 391 " class = " line " / > < line x1 = " 704 " y1 = " 17 " x2 = " 701 " y2 = " 17 " class = " line " / > < polygon points = " 711 17 719 13 719 21 " class = " filled " / > < polygon points = " 711 17 703 13 703 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # collect_element " title = " collect_element " > collect_element < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # document_element " title = " document_element " > document_element < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # expression " title = " expression " > expression < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # filter_statement " title = " filter_statement " > filter_statement < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # for_statement " title = " for_statement " > for_statement < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # let_statement " title = " let_statement " > let_statement < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # operator_binary " title = " operator_binary " > operator_binary < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # operator_ternary " title = " operator_ternary " > operator_ternary < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # operator_unary " title = " operator_unary " > operator_unary < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # optional_function_call_arguments " title = " optional_function_call_arguments " > optional_function_call_arguments < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # optional_list_elements " title = " optional_list_elements " > optional_list_elements < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # return_statement " title = " return_statement " > return_statement < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # sort_element " title = " sort_element " > sort_element < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " operator_unary " > operator_unary : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 372 " height = " 124 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_PLUS " xlink : title = " T_PLUS " > < rect x = " 51 " y = " 3 " width = " 66 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 66 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > T_PLUS < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 115 " y1 = " 17 " x2 = " 125 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 137 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 135 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 145 " y = " 21 " > expression < / text > < / a > < line x1 = " 125 " y1 = " 17 " x2 = " 135 " y2 = " 17 " class = " line " / > < line x1 = " 223 " y1 = " 17 " x2 = " 233 " y2 = " 17 " class = " line " / > < a xlink : href = " # UPLUS " xlink : title = " UPLUS " > < rect x = " 245 " y = " 3 " width = " 60 " height = " 32 " / > < rect x = " 243 " y = " 1 " width = " 60 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 253 " y = " 21 " > UPLUS < / text > < / a > < line x1 = " 233 " y1 = " 17 " x2 = " 243 " y2 = " 17 " class = " line " / > < line x1 = " 303 " y1 = " 17 " x2 = " 313 " y2 = " 17 " class = " line " / > < line x1 = " 313 " y1 = " 17 " x2 = " 333 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 333 " y1 = " 17 " x2 = " 353 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M343 27 Q343 17 353 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 343 " y1 = " 51 " x2 = " 343 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M333 61 Q343 61 343 51 " class = " line " / > < a xlink : href = " # T_MINUS " xlink : title = " T_MINUS " > < rect x = " 51 " y = " 47 " width = " 76 " height = " 32 " / > < rect x = " 49 " y = " 45 " width = " 76 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 65 " > T_MINUS < / text > < / a > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 125 " y1 = " 61 " x2 = " 135 " y2 = " 61 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 147 " y = " 47 " width = " 88 " height = " 32 " / > < rect x = " 145 " y = " 45 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 155 " y = " 65 " > expression < / text > < / a > < line x1 = " 135 " y1 = " 61 " x2 = " 145 " y2 = " 61 " class = " line " / > < line x1 = " 233 " y1 = " 61 " x2 = " 243 " y2 = " 61 " class = " line " / > < a xlink : href = " # UMINUS " xlink : title = " UMINUS " > < rect x = " 255 " y = " 47 " width = " 70 " height = " 32 " / > < rect x = " 253 " y = " 45 " width = " 70 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 263 " y = " 65 " > UMINUS < / text > < / a > < line x1 = " 243 " y1 = " 61 " x2 = " 253 " y2 = " 61 " class = " line " / > < line x1 = " 323 " y1 = " 61 " x2 = " 333 " y2 = " 61 " class = " line " / > < line x1 = " 29 " y1 = " 51 " x2 = " 29 " y2 = " 71 " class = " line " / > < line x1 = " 343 " y1 = " 71 " x2 = " 343 " y2 = " 51 " class = " line " / > < line x1 = " 29 " y1 = " 71 " x2 = " 29 " y2 = " 95 " class = " line " / > < line x1 = " 343 " y1 = " 95 " x2 = " 343 " y2 = " 71 " class = " line " / > < path d = " M29 95 Q29 105 39 105 " class = " line " / > < path d = " M333 105 Q343 105 343 95 " class = " line " / > < a xlink : href = " # T_NOT " xlink : title = " T_NOT " > < rect x = " 51 " y = " 91 " width = " 60 " height = " 32 " / > < rect x = " 49 " y = " 89 " width = " 60 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 109 " > T_NOT < / text > < / a > < line x1 = " 39 " y1 = " 105 " x2 = " 49 " y2 = " 105 " class = " line " / > < line x1 = " 109 " y1 = " 105 " x2 = " 119 " y2 = " 105 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 131 " y = " 91 " width = " 88 " height = " 32 " / > < rect x = " 129 " y = " 89 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 139 " y = " 109 " > expression < / text > < / a > < line x1 = " 119 " y1 = " 105 " x2 = " 129 " y2 = " 105 " class = " line " / > < line x1 = " 217 " y1 = " 105 " x2 = " 227 " y2 = " 105 " class = " line " / > < a xlink : href = " # T_NOT " xlink : title = " T_NOT " > < rect x = " 239 " y = " 91 " width = " 60 " height = " 32 " / > < rect x = " 237 " y = " 89 " width = " 60 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 247 " y = " 109 " > T_NOT < / text > < / a > < line x1 = " 227 " y1 = " 105 " x2 = " 237 " y2 = " 105 " class = " line " / > < line x1 = " 297 " y1 = " 105 " x2 = " 307 " y2 = " 105 " class = " line " / > < line x1 = " 307 " y1 = " 105 " x2 = " 333 " y2 = " 105 " class = " line " / > < line x1 = " 356 " y1 = " 17 " x2 = " 353 " y2 = " 17 " class = " line " / > < polygon points = " 363 17 371 13 371 21 " class = " filled " / > < polygon points = " 363 17 355 13 355 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # expression " title = " expression " > expression < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " operator_binary " > operator_binary : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 390 " height = " 608 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 137 " y1 = " 17 " x2 = " 147 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_OR " xlink : title = " T_OR " > < rect x = " 159 " y = " 3 " width = " 52 " height = " 32 " / > < rect x = " 157 " y = " 1 " width = " 52 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 21 " > T_OR < / text > < / a > < line x1 = " 147 " y1 = " 17 " x2 = " 157 " y2 = " 17 " class = " line " / > < line x1 = " 209 " y1 = " 17 " x2 = " 219 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 231 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 229 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 239 " y = " 21 " > expression < / text > < / a > < line x1 = " 219 " y1 = " 17 " x2 = " 229 " y2 = " 17 " class = " line " / > < line x1 = " 317 " y1 = " 17 " x2 = " 327 " y2 = " 17 " class = " line " / > < line x1 = " 327 " y1 = " 17 " x2 = " 351 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 351 " y1 = " 17 " x2 = " 371 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M361 27 Q361 17 371 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 361 " y1 = " 51 " x2 = " 361 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M351 61 Q361 61 361 51 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 47 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 45 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 65 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 137 " y1 = " 61 " x2 = " 147 " y2 = " 61 " class = " line " / > < a xlink : href = " # T_AND " xlink : title = " T_AND " > < rect x = " 159 " y = " 47 " width = " 60 " height = " 32 " / > < rect x = " 157 " y = " 45 " width = " 60 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 65 " > T_AND < / text > < / a > < line x1 = " 147 " y1 = " 61 " x2 = " 157 " y2 = " 61 " class = " line " / > < line x1 = " 217 " y1 = " 61 " x2 = " 227 " y2 = " 61 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 239 " y = " 47 " width = " 88 " height = " 32 " / > < rect x = " 237 " y = " 45 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 247 " y = " 65 " > expression < / text > < / a > < line x1 = " 227 " y1 = " 61 " x2 = " 237 " y2 = " 61 " class = " line " / > < line x1 = " 325 " y1 = " 61 " x2 = " 335 " y2 = " 61 " class = " line " / > < line x1 = " 335 " y1 = " 61 " x2 = " 351 " y2 = " 61 " class = " line " / > < line x1 = " 29 " y1 = " 51 " x2 = " 29 " y2 = " 71 " class = " line " / > < line x1 = " 361 " y1 = " 71 " x2 = " 361 " y2 = " 51 " class = " line " / > < line x1 = " 29 " y1 = " 71 " x2 = " 29 " y2 = " 95 " class = " line " / > < line x1 = " 361 " y1 = " 95 " x2 = " 361 " y2 = " 71 " class = " line " / > < path d = " M29 95 Q29 105 39 105 " class = " line " / > < path d = " M351 105 Q361 105 361 95 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 91 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 89 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 109 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 105 " x2 = " 49 " y2 = " 105 " class = " line " / > < line x1 = " 137 " y1 = " 105 " x2 = " 147 " y2 = " 105 " class = " line " / > < a xlink : href = " # T_PLUS " xlink : title = " T_PLUS " > < rect x = " 159 " y = " 91 " width = " 66 " height = " 32 " / > < rect x = " 157 " y = " 89 " width = " 66 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 109 " > T_PLUS < / text > < / a > < line x1 = " 147 " y1 = " 105 " x2 = " 157 " y2 = " 105 " class = " line " / > < line x1 = " 223 " y1 = " 105 " x2 = " 233 " y2 = " 105 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 245 " y = " 91 " width = " 88 " height = " 32 " / > < rect x = " 243 " y = " 89 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 253 " y = " 109 " > expression < / text > < / a > < line x1 = " 233 " y1 = " 105 " x2 = " 243 " y2 = " 105 " class = " line " / > < line x1 = " 331 " y1 = " 105 " x2 = " 341 " y2 = " 105 " class = " line " / > < line x1 = " 341 " y1 = " 105 " x2 = " 351 " y2 = " 105 " class = " line " / > < line x1 = " 29 " y1 = " 95 " x2 = " 29 " y2 = " 115 " class = " line " / > < line x1 = " 361 " y1 = " 115 " x2 = " 361 " y2 = " 95 " class = " line " / > < line x1 = " 29 " y1 = " 115 " x2 = " 29 " y2 = " 139 " class = " line " / > < line x1 = " 361 " y1 = " 139 " x2 = " 361 " y2 = " 115 " class = " line " / > < path d = " M29 139 Q29 149 39 149 " class = " line " / > < path d = " M351 149 Q361 149 361 139 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 135 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 133 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 153 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 149 " x2 = " 49 " y2 = " 149 " class = " line " / > < line x1 = " 137 " y1 = " 149 " x2 = " 147 " y2 = " 149 " class = " line " / > < a xlink : href = " # T_MINUS " xlink : title = " T_MINUS " > < rect x = " 159 " y = " 135 " width = " 76 " height = " 32 " / > < rect x = " 157 " y = " 133 " width = " 76 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 153 " > T_MINUS < / text > < / a > < line x1 = " 147 " y1 = " 149 " x2 = " 157 " y2 = " 149 " class = " line " / > < line x1 = " 233 " y1 = " 149 " x2 = " 243 " y2 = " 149 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 255 " y = " 135 " width = " 88 " height = " 32 " / > < rect x = " 253 " y = " 133 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 263 " y = " 153 " > expression < / text > < / a > < line x1 = " 243 " y1 = " 149 " x2 = " 253 " y2 = " 149 " class = " line " / > < line x1 = " 341 " y1 = " 149 " x2 = " 351 " y2 = " 149 " class = " line " / > < line x1 = " 29 " y1 = " 139 " x2 = " 29 " y2 = " 159 " class = " line " / > < line x1 = " 361 " y1 = " 159 " x2 = " 361 " y2 = " 139 " class = " line " / > < line x1 = " 29 " y1 = " 159 " x2 = " 29 " y2 = " 183 " class = " line " / > < line x1 = " 361 " y1 = " 183 " x2 = " 361 " y2 = " 159 " class = " line " / > < path d = " M29 183 Q29 193 39 193 " class = " line " / > < path d = " M351 193 Q361 193 361 183 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 179 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 177 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 197 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 193 " x2 = " 49 " y2 = " 193 " class = " line " / > < line x1 = " 137 " y1 = " 193 " x2 = " 147 " y2 = " 193 " class = " line " / > < a xlink : href = " # T_TIMES " xlink : title = " T_TIMES " > < rect x = " 159 " y = " 179 " width = " 72 " height = " 32 " / > < rect x = " 157 " y = " 177 " width = " 72 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 197 " > T_TIMES < / text > < / a > < line x1 = " 147 " y1 = " 193 " x2 = " 157 " y2 = " 193 " class = " line " / > < line x1 = " 229 " y1 = " 193 " x2 = " 239 " y2 = " 193 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 251 " y = " 179 " width = " 88 " height = " 32 " / > < rect x = " 249 " y = " 177 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 259 " y = " 197 " > expression < / text > < / a > < line x1 = " 239 " y1 = " 193 " x2 = " 249 " y2 = " 193 " class = " line " / > < line x1 = " 337 " y1 = " 193 " x2 = " 347 " y2 = " 193 " class = " line " / > < line x1 = " 347 " y1 = " 193 " x2 = " 351 " y2 = " 193 " class = " line " / > < line x1 = " 29 " y1 = " 183 " x2 = " 29 " y2 = " 203 " class = " line " / > < line x1 = " 361 " y1 = " 203 " x2 = " 361 " y2 = " 183 " class = " line " / > < line x1 = " 29 " y1 = " 203 " x2 = " 29 " y2 = " 227 " class = " line " / > < line x1 = " 361 " y1 = " 227 " x2 = " 361 " y2 = " 203 " class = " line " / > < path d = " M29 227 Q29 237 39 237 " class = " line " / > < path d = " M351 237 Q361 237 361 227 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 223 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 221 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 241 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 237 " x2 = " 49 " y2 = " 237 " class = " line " / > < line x1 = " 137 " y1 = " 237 " x2 = " 147 " y2 = " 237 " class = " line " / > < a xlink : href = " # T_DIV " xlink : title = " T_DIV " > < rect x = " 159 " y = " 223 " width = " 56 " height = " 32 " / > < rect x = " 157 " y = " 221 " width = " 56 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 241 " > T_DIV < / text > < / a > < line x1 = " 147 " y1 = " 237 " x2 = " 157 " y2 = " 237 " class = " line " / > < line x1 = " 213 " y1 = " 237 " x2 = " 223 " y2 = " 237 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 235 " y = " 223 " width = " 88 " height = " 32 " / > < rect x = " 233 " y = " 221 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 243 " y = " 241 " > expression < / text > < / a > < line x1 = " 223 " y1 = " 237 " x2 = " 233 " y2 = " 237 " class = " line " / > < line x1 = " 321 " y1 = " 237 " x2 = " 331 " y2 = " 237 " class = " line " / > < line x1 = " 331 " y1 = " 237 " x2 = " 351 " y2 = " 237 " class = " line " / > < line x1 = " 29 " y1 = " 227 " x2 = " 29 " y2 = " 247 " class = " line " / > < line x1 = " 361 " y1 = " 247 " x2 = " 361 " y2 = " 227 " class = " line " / > < line x1 = " 29 " y1 = " 247 " x2 = " 29 " y2 = " 271 " class = " line " / > < line x1 = " 361 " y1 = " 271 " x2 = " 361 " y2 = " 247 " class = " line " / > < path d = " M29 271 Q29 281 39 281 " class = " line " / > < path d = " M351 281 Q361 281 361 271 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 267 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 265 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 285 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 281 " x2 = " 49 " y2 = " 281 " class = " line " / > < line x1 = " 137 " y1 = " 281 " x2 = " 147 " y2 = " 281 " class = " line " / > < a xlink : href = " # T_MOD " xlink : title = " T_MOD " > < rect x = " 159 " y = " 267 " width = " 64 " height = " 32 " / > < rect x = " 157 " y = " 265 " width = " 64 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 285 " > T_MOD < / text > < / a > < line x1 = " 147 " y1 = " 281 " x2 = " 157 " y2 = " 281 " class = " line " / > < line x1 = " 221 " y1 = " 281 " x2 = " 231 " y2 = " 281 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 243 " y = " 267 " width = " 88 " height = " 32 " / > < rect x = " 241 " y = " 265 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 251 " y = " 285 " > expression < / text > < / a > < line x1 = " 231 " y1 = " 281 " x2 = " 241 " y2 = " 281 " class = " line " / > < line x1 = " 329 " y1 = " 281 " x2 = " 339 " y2 = " 281 " class = " line " / > < line x1 = " 339 " y1 = " 281 " x2 = " 351 " y2 = " 281 " class = " line " / > < line x1 = " 29 " y1 = " 271 " x2 = " 29 " y2 = " 291 " class = " line " / > < line x1 = " 361 " y1 = " 291 " x2 = " 361 " y2 = " 271 " class = " line " / > < line x1 = " 29 " y1 = " 291 " x2 = " 29 " y2 = " 315 " class = " line " / > < line x1 = " 361 " y1 = " 315 " x2 = " 361 " y2 = " 291 " class = " line " / > < path d = " M29 315 Q29 325 39 325 " class = " line " / > < path d = " M351 325 Q361 325 361 315 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 311 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 309 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 329 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 325 " x2 = " 49 " y2 = " 325 " class = " line " / > < line x1 = " 137 " y1 = " 325 " x2 = " 147 " y2 = " 325 " class = " line " / > < a xlink : href = " # T_EQ " xlink : title = " T_EQ " > < rect x = " 159 " y = " 311 " width = " 52 " height = " 32 " / > < rect x = " 157 " y = " 309 " width = " 52 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 329 " > T_EQ < / text > < / a > < line x1 = " 147 " y1 = " 325 " x2 = " 157 " y2 = " 325 " class = " line " / > < line x1 = " 209 " y1 = " 325 " x2 = " 219 " y2 = " 325 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 231 " y = " 311 " width = " 88 " height = " 32 " / > < rect x = " 229 " y = " 309 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 239 " y = " 329 " > expression < / text > < / a > < line x1 = " 219 " y1 = " 325 " x2 = " 229 " y2 = " 325 " class = " line " / > < line x1 = " 317 " y1 = " 325 " x2 = " 327 " y2 = " 325 " class = " line " / > < line x1 = " 327 " y1 = " 325 " x2 = " 351 " y2 = " 325 " class = " line " / > < line x1 = " 29 " y1 = " 315 " x2 = " 29 " y2 = " 335 " class = " line " / > < line x1 = " 361 " y1 = " 335 " x2 = " 361 " y2 = " 315 " class = " line " / > < line x1 = " 29 " y1 = " 335 " x2 = " 29 " y2 = " 359 " class = " line " / > < line x1 = " 361 " y1 = " 359 " x2 = " 361 " y2 = " 335 " class = " line " / > < path d = " M29 359 Q29 369 39 369 " class = " line " / > < path d = " M351 369 Q361 369 361 359 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 355 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 353 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 373 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 369 " x2 = " 49 " y2 = " 369 " class = " line " / > < line x1 = " 137 " y1 = " 369 " x2 = " 147 " y2 = " 369 " class = " line " / > < a xlink : href = " # T_NE " xlink : title = " T_NE " > < rect x = " 159 " y = " 355 " width = " 52 " height = " 32 " / > < rect x = " 157 " y = " 353 " width = " 52 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 373 " > T_NE < / text > < / a > < line x1 = " 147 " y1 = " 369 " x2 = " 157 " y2 = " 369 " class = " line " / > < line x1 = " 209 " y1 = " 369 " x2 = " 219 " y2 = " 369 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 231 " y = " 355 " width = " 88 " height = " 32 " / > < rect x = " 229 " y = " 353 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 239 " y = " 373 " > expression < / text > < / a > < line x1 = " 219 " y1 = " 369 " x2 = " 229 " y2 = " 369 " class = " line " / > < line x1 = " 317 " y1 = " 369 " x2 = " 327 " y2 = " 369 " class = " line " / > < line x1 = " 327 " y1 = " 369 " x2 = " 351 " y2 = " 369 " class = " line " / > < line x1 = " 29 " y1 = " 359 " x2 = " 29 " y2 = " 379 " class = " line " / > < line x1 = " 361 " y1 = " 379 " x2 = " 361 " y2 = " 359 " class = " line " / > < line x1 = " 29 " y1 = " 379 " x2 = " 29 " y2 = " 403 " class = " line " / > < line x1 = " 361 " y1 = " 403 " x2 = " 361 " y2 = " 379 " class = " line " / > < path d = " M29 403 Q29 413 39 413 " class = " line " / > < path d = " M351 413 Q361 413 361 403 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 399 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 397 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 417 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 413 " x2 = " 49 " y2 = " 413 " class = " line " / > < line x1 = " 137 " y1 = " 413 " x2 = " 147 " y2 = " 413 " class = " line " / > < a xlink : href = " # T_LT " xlink : title = " T_LT " > < rect x = " 159 " y = " 399 " width = " 48 " height = " 32 " / > < rect x = " 157 " y = " 397 " width = " 48 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 417 " > T_LT < / text > < / a > < line x1 = " 147 " y1 = " 413 " x2 = " 157 " y2 = " 413 " class = " line " / > < line x1 = " 205 " y1 = " 413 " x2 = " 215 " y2 = " 413 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 227 " y = " 399 " width = " 88 " height = " 32 " / > < rect x = " 225 " y = " 397 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 235 " y = " 417 " > expression < / text > < / a > < line x1 = " 215 " y1 = " 413 " x2 = " 225 " y2 = " 413 " class = " line " / > < line x1 = " 313 " y1 = " 413 " x2 = " 323 " y2 = " 413 " class = " line " / > < line x1 = " 323 " y1 = " 413 " x2 = " 351 " y2 = " 413 " class = " line " / > < line x1 = " 29 " y1 = " 403 " x2 = " 29 " y2 = " 423 " class = " line " / > < line x1 = " 361 " y1 = " 423 " x2 = " 361 " y2 = " 403 " class = " line " / > < line x1 = " 29 " y1 = " 423 " x2 = " 29 " y2 = " 447 " class = " line " / > < line x1 = " 361 " y1 = " 447 " x2 = " 361 " y2 = " 423 " class = " line " / > < path d = " M29 447 Q29 457 39 457 " class = " line " / > < path d = " M351 457 Q361 457 361 447 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 443 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 441 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 461 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 457 " x2 = " 49 " y2 = " 457 " class = " line " / > < line x1 = " 137 " y1 = " 457 " x2 = " 147 " y2 = " 457 " class = " line " / > < a xlink : href = " # T_GT " xlink : title = " T_GT " > < rect x = " 159 " y = " 443 " width = " 50 " height = " 32 " / > < rect x = " 157 " y = " 441 " width = " 50 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 461 " > T_GT < / text > < / a > < line x1 = " 147 " y1 = " 457 " x2 = " 157 " y2 = " 457 " class = " line " / > < line x1 = " 207 " y1 = " 457 " x2 = " 217 " y2 = " 457 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 229 " y = " 443 " width = " 88 " height = " 32 " / > < rect x = " 227 " y = " 441 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 237 " y = " 461 " > expression < / text > < / a > < line x1 = " 217 " y1 = " 457 " x2 = " 227 " y2 = " 457 " class = " line " / > < line x1 = " 315 " y1 = " 457 " x2 = " 325 " y2 = " 457 " class = " line " / > < line x1 = " 325 " y1 = " 457 " x2 = " 351 " y2 = " 457 " class = " line " / > < line x1 = " 29 " y1 = " 447 " x2 = " 29 " y2 = " 467 " class = " line " / > < line x1 = " 361 " y1 = " 467 " x2 = " 361 " y2 = " 447 " class = " line " / > < line x1 = " 29 " y1 = " 467 " x2 = " 29 " y2 = " 491 " class = " line " / > < line x1 = " 361 " y1 = " 491 " x2 = " 361 " y2 = " 467 " class = " line " / > < path d = " M29 491 Q29 501 39 501 " class = " line " / > < path d = " M351 501 Q361 501 361 491 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 487 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 485 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 505 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 501 " x2 = " 49 " y2 = " 501 " class = " line " / > < line x1 = " 137 " y1 = " 501 " x2 = " 147 " y2 = " 501 " class = " line " / > < a xlink : href = " # T_LE " xlink : title = " T_LE " > < rect x = " 159 " y = " 487 " width = " 50 " height = " 32 " / > < rect x = " 157 " y = " 485 " width = " 50 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 505 " > T_LE < / text > < / a > < line x1 = " 147 " y1 = " 501 " x2 = " 157 " y2 = " 501 " class = " line " / > < line x1 = " 207 " y1 = " 501 " x2 = " 217 " y2 = " 501 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 229 " y = " 487 " width = " 88 " height = " 32 " / > < rect x = " 227 " y = " 485 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 237 " y = " 505 " > expression < / text > < / a > < line x1 = " 217 " y1 = " 501 " x2 = " 227 " y2 = " 501 " class = " line " / > < line x1 = " 315 " y1 = " 501 " x2 = " 325 " y2 = " 501 " class = " line " / > < line x1 = " 325 " y1 = " 501 " x2 = " 351 " y2 = " 501 " class = " line " / > < line x1 = " 29 " y1 = " 491 " x2 = " 29 " y2 = " 511 " class = " line " / > < line x1 = " 361 " y1 = " 511 " x2 = " 361 " y2 = " 491 " class = " line " / > < line x1 = " 29 " y1 = " 511 " x2 = " 29 " y2 = " 535 " class = " line " / > < line x1 = " 361 " y1 = " 535 " x2 = " 361 " y2 = " 511 " class = " line " / > < path d = " M29 535 Q29 545 39 545 " class = " line " / > < path d = " M351 545 Q361 545 361 535 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 531 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 529 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 549 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 545 " x2 = " 49 " y2 = " 545 " class = " line " / > < line x1 = " 137 " y1 = " 545 " x2 = " 147 " y2 = " 545 " class = " line " / > < a xlink : href = " # T_GE " xlink : title = " T_GE " > < rect x = " 159 " y = " 531 " width = " 52 " height = " 32 " / > < rect x = " 157 " y = " 529 " width = " 52 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 549 " > T_GE < / text > < / a > < line x1 = " 147 " y1 = " 545 " x2 = " 157 " y2 = " 545 " class = " line " / > < line x1 = " 209 " y1 = " 545 " x2 = " 219 " y2 = " 545 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 231 " y = " 531 " width = " 88 " height = " 32 " / > < rect x = " 229 " y = " 529 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 239 " y = " 549 " > expression < / text > < / a > < line x1 = " 219 " y1 = " 545 " x2 = " 229 " y2 = " 545 " class = " line " / > < line x1 = " 317 " y1 = " 545 " x2 = " 327 " y2 = " 545 " class = " line " / > < line x1 = " 327 " y1 = " 545 " x2 = " 351 " y2 = " 545 " class = " line " / > < line x1 = " 29 " y1 = " 535 " x2 = " 29 " y2 = " 555 " class = " line " / > < line x1 = " 361 " y1 = " 555 " x2 = " 361 " y2 = " 535 " class = " line " / > < line x1 = " 29 " y1 = " 555 " x2 = " 29 " y2 = " 579 " class = " line " / > < line x1 = " 361 " y1 = " 579 " x2 = " 361 " y2 = " 555 " class = " line " / > < path d = " M29 579 Q29 589 39 589 " class = " line " / > < path d = " M351 589 Q361 589 361 579 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 51 " y = " 575 " width = " 88 " height = " 32 " / > < rect x = " 49 " y = " 573 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 593 " > expression < / text > < / a > < line x1 = " 39 " y1 = " 589 " x2 = " 49 " y2 = " 589 " class = " line " / > < line x1 = " 137 " y1 = " 589 " x2 = " 147 " y2 = " 589 " class = " line " / > < a xlink : href = " # T_IN " xlink : title = " T_IN " > < rect x = " 159 " y = " 575 " width = " 48 " height = " 32 " / > < rect x = " 157 " y = " 573 " width = " 48 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 167 " y = " 593 " > T_IN < / text > < / a > < line x1 = " 147 " y1 = " 589 " x2 = " 157 " y2 = " 589 " class = " line " / > < line x1 = " 205 " y1 = " 589 " x2 = " 215 " y2 = " 589 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 227 " y = " 575 " width = " 88 " height = " 32 " / > < rect x = " 225 " y = " 573 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 235 " y = " 593 " > expression < / text > < / a > < line x1 = " 215 " y1 = " 589 " x2 = " 225 " y2 = " 589 " class = " line " / > < line x1 = " 313 " y1 = " 589 " x2 = " 323 " y2 = " 589 " class = " line " / > < line x1 = " 323 " y1 = " 589 " x2 = " 351 " y2 = " 589 " class = " line " / > < line x1 = " 374 " y1 = " 17 " x2 = " 371 " y2 = " 17 " class = " line " / > < polygon points = " 381 17 389 13 389 21 " class = " filled " / > < polygon points = " 381 17 373 13 373 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # expression " title = " expression " > expression < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " operator_ternary " > operator_ternary : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 582 " height = " 36 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 31 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 29 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 39 " y = " 21 " > expression < / text > < / a > < line x1 = " 19 " y1 = " 17 " x2 = " 29 " y2 = " 17 " class = " line " / > < line x1 = " 117 " y1 = " 17 " x2 = " 127 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_QUESTION " xlink : title = " T_QUESTION " > < rect x = " 139 " y = " 3 " width = " 100 " height = " 32 " / > < rect x = " 137 " y = " 1 " width = " 100 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 147 " y = " 21 " > T_QUESTION < / text > < / a > < line x1 = " 127 " y1 = " 17 " x2 = " 137 " y2 = " 17 " class = " line " / > < line x1 = " 237 " y1 = " 17 " x2 = " 247 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 259 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 257 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 267 " y = " 21 " > expression < / text > < / a > < line x1 = " 247 " y1 = " 17 " x2 = " 257 " y2 = " 17 " class = " line " / > < line x1 = " 345 " y1 = " 17 " x2 = " 355 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_COLON " xlink : title = " T_COLON " > < rect x = " 367 " y = " 3 " width = " 80 " height = " 32 " / > < rect x = " 365 " y = " 1 " width = " 80 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 375 " y = " 21 " > T_COLON < / text > < / a > < line x1 = " 355 " y1 = " 17 " x2 = " 365 " y2 = " 17 " class = " line " / > < line x1 = " 445 " y1 = " 17 " x2 = " 455 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 467 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 465 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 475 " y = " 21 " > expression < / text > < / a > < line x1 = " 455 " y1 = " 17 " x2 = " 465 " y2 = " 17 " class = " line " / > < line x1 = " 553 " y1 = " 17 " x2 = " 563 " y2 = " 17 " class = " line " / > < line x1 = " 566 " y1 = " 17 " x2 = " 563 " y2 = " 17 " class = " line " / > < polygon points = " 573 17 581 13 581 21 " class = " filled " / > < polygon points = " 573 17 565 13 565 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # expression " title = " expression " > expression < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " optional_function_call_arguments " > optional_function_call_arguments : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 226 " height = " 96 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 61 1 57 1 65 " class = " filled " / > < polygon points = " 17 61 9 57 9 65 " class = " filled " / > < line x1 = " 17 " y1 = " 61 " x2 = " 19 " y2 = " 61 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 71 " y = " 47 " width = " 88 " height = " 32 " / > < rect x = " 69 " y = " 45 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 79 " y = " 65 " > expression < / text > < / a > < line x1 = " 59 " y1 = " 61 " x2 = " 69 " y2 = " 61 " class = " line " / > < line x1 = " 157 " y1 = " 61 " x2 = " 167 " y2 = " 61 " class = " line " / > < path d = " M39 61 L59 61 M58 61 Q49 61 49 51 L49 27 Q49 17 59 17 " class = " line " / > < path d = " M167 61 L187 61 M167 61 Q177 61 177 51 L177 27 Q177 17 167 17 " class = " line " / > < a xlink : href = " # T_COMMA " xlink : title = " T_COMMA " > < rect x = " 71 " y = " 3 " width = " 82 " height = " 32 " / > < rect x = " 69 " y = " 1 " width = " 82 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 79 " y = " 21 " > T_COMMA < / text > < / a > < line x1 = " 59 " y1 = " 17 " x2 = " 69 " y2 = " 17 " class = " line " / > < line x1 = " 151 " y1 = " 17 " x2 = " 161 " y2 = " 17 " class = " line " / > < line x1 = " 161 " y1 = " 17 " x2 = " 167 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 61 " x2 = " 39 " y2 = " 61 " class = " line " / > < line x1 = " 187 " y1 = " 61 " x2 = " 207 " y2 = " 61 " class = " line " / > < path d = " M19 61 Q29 61 29 71 " class = " line " / > < path d = " M197 71 Q197 61 207 61 " class = " line " / > < line x1 = " 29 " y1 = " 71 " x2 = " 29 " y2 = " 85 " class = " line " / > < line x1 = " 197 " y1 = " 85 " x2 = " 197 " y2 = " 71 " class = " line " / > < path d = " M29 85 Q29 95 39 95 " class = " line " / > < path d = " M187 95 Q197 95 197 85 " class = " line " / > < line x1 = " 39 " y1 = " 95 " x2 = " 49 " y2 = " 95 " class = " line " / > < line x1 = " 49 " y1 = " 95 " x2 = " 187 " y2 = " 95 " class = " line " / > < line x1 = " 210 " y1 = " 61 " x2 = " 207 " y2 = " 61 " class = " line " / > < polygon points = " 217 61 225 57 225 65 " class = " filled " / > < polygon points = " 217 61 209 57 209 65 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # expression " title = " expression " > expression < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " compound_type " > compound_type : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 180 " height = " 80 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # list " xlink : title = " list " > < rect x = " 51 " y = " 3 " width = " 38 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 38 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > list < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 87 " y1 = " 17 " x2 = " 97 " y2 = " 17 " class = " line " / > < line x1 = " 97 " y1 = " 17 " x2 = " 141 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 141 " y1 = " 17 " x2 = " 161 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M151 27 Q151 17 161 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 151 " y1 = " 51 " x2 = " 151 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M141 61 Q151 61 151 51 " class = " line " / > < a xlink : href = " # document " xlink : title = " document " > < rect x = " 51 " y = " 47 " width = " 82 " height = " 32 " / > < rect x = " 49 " y = " 45 " width = " 82 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 65 " > document < / text > < / a > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 131 " y1 = " 61 " x2 = " 141 " y2 = " 61 " class = " line " / > < line x1 = " 164 " y1 = " 17 " x2 = " 161 " y2 = " 17 " class = " line " / > < polygon points = " 171 17 179 13 179 21 " class = " filled " / > < polygon points = " 171 17 163 13 163 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # expression " title = " expression " > expression < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " list " > list : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 476 " height = " 36 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_LIST_OPEN " xlink : title = " T_LIST_OPEN " > < rect x = " 31 " y = " 3 " width = " 104 " height = " 32 " / > < rect x = " 29 " y = " 1 " width = " 104 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 39 " y = " 21 " > T_LIST_OPEN < / text > < / a > < line x1 = " 19 " y1 = " 17 " x2 = " 29 " y2 = " 17 " class = " line " / > < line x1 = " 133 " y1 = " 17 " x2 = " 143 " y2 = " 17 " class = " line " / > < a xlink : href = " # optional_list_elements " xlink : title = " optional_list_elements " > < rect x = " 155 " y = " 3 " width = " 162 " height = " 32 " / > < rect x = " 153 " y = " 1 " width = " 162 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 163 " y = " 21 " > optional_list_elements < / text > < / a > < line x1 = " 143 " y1 = " 17 " x2 = " 153 " y2 = " 17 " class = " line " / > < line x1 = " 315 " y1 = " 17 " x2 = " 325 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_LIST_CLOSE " xlink : title = " T_LIST_CLOSE " > < rect x = " 337 " y = " 3 " width = " 112 " height = " 32 " / > < rect x = " 335 " y = " 1 " width = " 112 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 345 " y = " 21 " > T_LIST_CLOSE < / text > < / a > < line x1 = " 325 " y1 = " 17 " x2 = " 335 " y2 = " 17 " class = " line " / > < line x1 = " 447 " y1 = " 17 " x2 = " 457 " y2 = " 17 " class = " line " / > < line x1 = " 460 " y1 = " 17 " x2 = " 457 " y2 = " 17 " class = " line " / > < polygon points = " 467 17 475 13 475 21 " class = " filled " / > < polygon points = " 467 17 459 13 459 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # compound_type " title = " compound_type " > compound_type < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " optional_list_elements " > optional_list_elements : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 226 " height = " 96 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 61 1 57 1 65 " class = " filled " / > < polygon points = " 17 61 9 57 9 65 " class = " filled " / > < line x1 = " 17 " y1 = " 61 " x2 = " 19 " y2 = " 61 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 71 " y = " 47 " width = " 88 " height = " 32 " / > < rect x = " 69 " y = " 45 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 79 " y = " 65 " > expression < / text > < / a > < line x1 = " 59 " y1 = " 61 " x2 = " 69 " y2 = " 61 " class = " line " / > < line x1 = " 157 " y1 = " 61 " x2 = " 167 " y2 = " 61 " class = " line " / > < path d = " M39 61 L59 61 M58 61 Q49 61 49 51 L49 27 Q49 17 59 17 " class = " line " / > < path d = " M167 61 L187 61 M167 61 Q177 61 177 51 L177 27 Q177 17 167 17 " class = " line " / > < a xlink : href = " # T_COMMA " xlink : title = " T_COMMA " > < rect x = " 71 " y = " 3 " width = " 82 " height = " 32 " / > < rect x = " 69 " y = " 1 " width = " 82 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 79 " y = " 21 " > T_COMMA < / text > < / a > < line x1 = " 59 " y1 = " 17 " x2 = " 69 " y2 = " 17 " class = " line " / > < line x1 = " 151 " y1 = " 17 " x2 = " 161 " y2 = " 17 " class = " line " / > < line x1 = " 161 " y1 = " 17 " x2 = " 167 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 61 " x2 = " 39 " y2 = " 61 " class = " line " / > < line x1 = " 187 " y1 = " 61 " x2 = " 207 " y2 = " 61 " class = " line " / > < path d = " M19 61 Q29 61 29 71 " class = " line " / > < path d = " M197 71 Q197 61 207 61 " class = " line " / > < line x1 = " 29 " y1 = " 71 " x2 = " 29 " y2 = " 85 " class = " line " / > < line x1 = " 197 " y1 = " 85 " x2 = " 197 " y2 = " 71 " class = " line " / > < path d = " M29 85 Q29 95 39 95 " class = " line " / > < path d = " M187 95 Q197 95 197 85 " class = " line " / > < line x1 = " 39 " y1 = " 95 " x2 = " 49 " y2 = " 95 " class = " line " / > < line x1 = " 49 " y1 = " 95 " x2 = " 187 " y2 = " 95 " class = " line " / > < line x1 = " 210 " y1 = " 61 " x2 = " 207 " y2 = " 61 " class = " line " / > < polygon points = " 217 61 225 57 225 65 " class = " filled " / > < polygon points = " 217 61 209 57 209 65 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # list " title = " list " > list < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " document " > document : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 522 " height = " 36 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_DOC_OPEN " xlink : title = " T_DOC_OPEN " > < rect x = " 31 " y = " 3 " width = " 106 " height = " 32 " / > < rect x = " 29 " y = " 1 " width = " 106 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 39 " y = " 21 " > T_DOC_OPEN < / text > < / a > < line x1 = " 19 " y1 = " 17 " x2 = " 29 " y2 = " 17 " class = " line " / > < line x1 = " 135 " y1 = " 17 " x2 = " 145 " y2 = " 17 " class = " line " / > < a xlink : href = " # optional_document_elements " xlink : title = " optional_document_elements " > < rect x = " 157 " y = " 3 " width = " 206 " height = " 32 " / > < rect x = " 155 " y = " 1 " width = " 206 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 165 " y = " 21 " > optional_document_elements < / text > < / a > < line x1 = " 145 " y1 = " 17 " x2 = " 155 " y2 = " 17 " class = " line " / > < line x1 = " 361 " y1 = " 17 " x2 = " 371 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_DOC_CLOSE " xlink : title = " T_DOC_CLOSE " > < rect x = " 383 " y = " 3 " width = " 112 " height = " 32 " / > < rect x = " 381 " y = " 1 " width = " 112 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 391 " y = " 21 " > T_DOC_CLOSE < / text > < / a > < line x1 = " 371 " y1 = " 17 " x2 = " 381 " y2 = " 17 " class = " line " / > < line x1 = " 493 " y1 = " 17 " x2 = " 503 " y2 = " 17 " class = " line " / > < line x1 = " 506 " y1 = " 17 " x2 = " 503 " y2 = " 17 " class = " line " / > < polygon points = " 513 17 521 13 521 21 " class = " filled " / > < polygon points = " 513 17 505 13 505 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # compound_type " title = " compound_type " > compound_type < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " optional_document_elements " > optional_document_elements : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 278 " height = " 96 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 61 1 57 1 65 " class = " filled " / > < polygon points = " 17 61 9 57 9 65 " class = " filled " / > < line x1 = " 17 " y1 = " 61 " x2 = " 19 " y2 = " 61 " class = " line " / > < a xlink : href = " # document_element " xlink : title = " document_element " > < rect x = " 71 " y = " 47 " width = " 140 " height = " 32 " / > < rect x = " 69 " y = " 45 " width = " 140 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 79 " y = " 65 " > document_element < / text > < / a > < line x1 = " 59 " y1 = " 61 " x2 = " 69 " y2 = " 61 " class = " line " / > < line x1 = " 209 " y1 = " 61 " x2 = " 219 " y2 = " 61 " class = " line " / > < path d = " M39 61 L59 61 M58 61 Q49 61 49 51 L49 27 Q49 17 59 17 " class = " line " / > < path d = " M219 61 L239 61 M219 61 Q229 61 229 51 L229 27 Q229 17 219 17 " class = " line " / > < a xlink : href = " # T_COMMA " xlink : title = " T_COMMA " > < rect x = " 71 " y = " 3 " width = " 82 " height = " 32 " / > < rect x = " 69 " y = " 1 " width = " 82 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 79 " y = " 21 " > T_COMMA < / text > < / a > < line x1 = " 59 " y1 = " 17 " x2 = " 69 " y2 = " 17 " class = " line " / > < line x1 = " 151 " y1 = " 17 " x2 = " 161 " y2 = " 17 " class = " line " / > < line x1 = " 161 " y1 = " 17 " x2 = " 219 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 61 " x2 = " 39 " y2 = " 61 " class = " line " / > < line x1 = " 239 " y1 = " 61 " x2 = " 259 " y2 = " 61 " class = " line " / > < path d = " M19 61 Q29 61 29 71 " class = " line " / > < path d = " M249 71 Q249 61 259 61 " class = " line " / > < line x1 = " 29 " y1 = " 71 " x2 = " 29 " y2 = " 85 " class = " line " / > < line x1 = " 249 " y1 = " 85 " x2 = " 249 " y2 = " 71 " class = " line " / > < path d = " M29 85 Q29 95 39 95 " class = " line " / > < path d = " M239 95 Q249 95 249 85 " class = " line " / > < line x1 = " 39 " y1 = " 95 " x2 = " 49 " y2 = " 95 " class = " line " / > < line x1 = " 49 " y1 = " 95 " x2 = " 239 " y2 = " 95 " class = " line " / > < line x1 = " 262 " y1 = " 61 " x2 = " 259 " y2 = " 61 " class = " line " / > < polygon points = " 269 61 277 57 277 65 " class = " filled " / > < polygon points = " 269 61 261 57 261 65 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # document " title = " document " > document < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " document_element " > document_element : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 450 " height = " 36 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # document_element_name " xlink : title = " document_element_name " > < rect x = " 31 " y = " 3 " width = " 184 " height = " 32 " / > < rect x = " 29 " y = " 1 " width = " 184 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 39 " y = " 21 " > document_element_name < / text > < / a > < line x1 = " 19 " y1 = " 17 " x2 = " 29 " y2 = " 17 " class = " line " / > < line x1 = " 213 " y1 = " 17 " x2 = " 223 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_COLON " xlink : title = " T_COLON " > < rect x = " 235 " y = " 3 " width = " 80 " height = " 32 " / > < rect x = " 233 " y = " 1 " width = " 80 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 243 " y = " 21 " > T_COLON < / text > < / a > < line x1 = " 223 " y1 = " 17 " x2 = " 233 " y2 = " 17 " class = " line " / > < line x1 = " 313 " y1 = " 17 " x2 = " 323 " y2 = " 17 " class = " line " / > < a xlink : href = " # expression " xlink : title = " expression " > < rect x = " 335 " y = " 3 " width = " 88 " height = " 32 " / > < rect x = " 333 " y = " 1 " width = " 88 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 343 " y = " 21 " > expression < / text > < / a > < line x1 = " 323 " y1 = " 17 " x2 = " 333 " y2 = " 17 " class = " line " / > < line x1 = " 421 " y1 = " 17 " x2 = " 431 " y2 = " 17 " class = " line " / > < line x1 = " 434 " y1 = " 17 " x2 = " 431 " y2 = " 17 " class = " line " / > < polygon points = " 441 17 449 13 449 21 " class = " filled " / > < polygon points = " 441 17 433 13 433 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # optional_document_elements " title = " optional_document_elements " > optional_document_elements < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " reference_part " > reference_part : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 576 " height = " 80 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_STRING " xlink : title = " T_STRING " > < rect x = " 51 " y = " 3 " width = " 80 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 80 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > T_STRING < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 129 " y1 = " 17 " x2 = " 139 " y2 = " 17 " class = " line " / > < line x1 = " 139 " y1 = " 17 " x2 = " 537 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 537 " y1 = " 17 " x2 = " 557 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M547 27 Q547 17 557 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 547 " y1 = " 51 " x2 = " 547 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M537 61 Q547 61 547 51 " class = " line " / > < a xlink : href = " # T_STRING " xlink : title = " T_STRING " > < rect x = " 51 " y = " 47 " width = " 80 " height = " 32 " / > < rect x = " 49 " y = " 45 " width = " 80 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 65 " > T_STRING < / text > < / a > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 129 " y1 = " 61 " x2 = " 139 " y2 = " 61 " class = " line " / > < a xlink : href = " # T_LIST_OPEN " xlink : title = " T_LIST_OPEN " > < rect x = " 151 " y = " 47 " width = " 104 " height = " 32 " / > < rect x = " 149 " y = " 45 " width = " 104 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 159 " y = " 65 " > T_LIST_OPEN < / text > < / a > < line x1 = " 139 " y1 = " 61 " x2 = " 149 " y2 = " 61 " class = " line " / > < line x1 = " 253 " y1 = " 61 " x2 = " 263 " y2 = " 61 " class = " line " / > < a xlink : href = " # reference_index " xlink : title = " reference_index " > < rect x = " 275 " y = " 47 " width = " 122 " height = " 32 " / > < rect x = " 273 " y = " 45 " width = " 122 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 283 " y = " 65 " > reference_index < / text > < / a > < line x1 = " 263 " y1 = " 61 " x2 = " 273 " y2 = " 61 " class = " line " / > < line x1 = " 395 " y1 = " 61 " x2 = " 405 " y2 = " 61 " class = " line " / > < a xlink : href = " # T_LIST_CLOSE " xlink : title = " T_LIST_CLOSE " > < rect x = " 417 " y = " 47 " width = " 112 " height = " 32 " / > < rect x = " 415 " y = " 45 " width = " 112 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 425 " y = " 65 " > T_LIST_CLOSE < / text > < / a > < line x1 = " 405 " y1 = " 61 " x2 = " 415 " y2 = " 61 " class = " line " / > < line x1 = " 527 " y1 = " 61 " x2 = " 537 " y2 = " 61 " class = " line " / > < line x1 = " 560 " y1 = " 17 " x2 = " 557 " y2 = " 17 " class = " line " / > < polygon points = " 567 17 575 13 575 21 " class = " filled " / > < polygon points = " 567 17 559 13 559 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # expression " title = " expression " > expression < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " reference_index " > reference_index : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 240 " height = " 124 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_QUOTED_STRING " xlink : title = " T_QUOTED_STRING " > < rect x = " 51 " y = " 3 " width = " 142 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 142 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > T_QUOTED_STRING < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 191 " y1 = " 17 " x2 = " 201 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 201 " y1 = " 17 " x2 = " 221 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M211 27 Q211 17 221 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 211 " y1 = " 51 " x2 = " 211 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M201 61 Q211 61 211 51 " class = " line " / > < a xlink : href = " # T_NUMBER " xlink : title = " T_NUMBER " > < rect x = " 51 " y = " 47 " width = " 86 " height = " 32 " / > < rect x = " 49 " y = " 45 " width = " 86 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 65 " > T_NUMBER < / text > < / a > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 135 " y1 = " 61 " x2 = " 145 " y2 = " 61 " class = " line " / > < line x1 = " 145 " y1 = " 61 " x2 = " 201 " y2 = " 61 " class = " line " / > < line x1 = " 29 " y1 = " 51 " x2 = " 29 " y2 = " 71 " class = " line " / > < line x1 = " 211 " y1 = " 71 " x2 = " 211 " y2 = " 51 " class = " line " / > < line x1 = " 29 " y1 = " 71 " x2 = " 29 " y2 = " 95 " class = " line " / > < line x1 = " 211 " y1 = " 95 " x2 = " 211 " y2 = " 71 " class = " line " / > < path d = " M29 95 Q29 105 39 105 " class = " line " / > < path d = " M201 105 Q211 105 211 95 " class = " line " / > < a xlink : href = " # T_TIMES " xlink : title = " T_TIMES " > < rect x = " 51 " y = " 91 " width = " 72 " height = " 32 " / > < rect x = " 49 " y = " 89 " width = " 72 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 109 " > T_TIMES < / text > < / a > < line x1 = " 39 " y1 = " 105 " x2 = " 49 " y2 = " 105 " class = " line " / > < line x1 = " 121 " y1 = " 105 " x2 = " 131 " y2 = " 105 " class = " line " / > < line x1 = " 131 " y1 = " 105 " x2 = " 201 " y2 = " 105 " class = " line " / > < line x1 = " 224 " y1 = " 17 " x2 = " 221 " y2 = " 17 " class = " line " / > < polygon points = " 231 17 239 13 239 21 " class = " filled " / > < polygon points = " 231 17 223 13 223 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # reference_part " title = " reference_part " > reference_part < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " atomic_value " > atomic_value : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 218 " height = " 80 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # value_literal " xlink : title = " value_literal " > < rect x = " 51 " y = " 3 " width = " 96 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 96 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > value_literal < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 145 " y1 = " 17 " x2 = " 155 " y2 = " 17 " class = " line " / > < line x1 = " 155 " y1 = " 17 " x2 = " 179 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 179 " y1 = " 17 " x2 = " 199 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M189 27 Q189 17 199 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 189 " y1 = " 51 " x2 = " 189 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M179 61 Q189 61 189 51 " class = " line " / > < a xlink : href = " # bind_parameter " xlink : title = " bind_parameter " > < rect x = " 51 " y = " 47 " width = " 120 " height = " 32 " / > < rect x = " 49 " y = " 45 " width = " 120 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 65 " > bind_parameter < / text > < / a > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 169 " y1 = " 61 " x2 = " 179 " y2 = " 61 " class = " line " / > < line x1 = " 202 " y1 = " 17 " x2 = " 199 " y2 = " 17 " class = " line " / > < polygon points = " 209 17 217 13 217 21 " class = " filled " / > < polygon points = " 209 17 201 13 201 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # expression " title = " expression " > expression < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " value_literal " > value_literal : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 240 " height = " 212 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_QUOTED_STRING " xlink : title = " T_QUOTED_STRING " > < rect x = " 51 " y = " 3 " width = " 142 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 142 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > T_QUOTED_STRING < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 191 " y1 = " 17 " x2 = " 201 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 201 " y1 = " 17 " x2 = " 221 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M211 27 Q211 17 221 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 211 " y1 = " 51 " x2 = " 211 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M201 61 Q211 61 211 51 " class = " line " / > < a xlink : href = " # T_NUMBER " xlink : title = " T_NUMBER " > < rect x = " 51 " y = " 47 " width = " 86 " height = " 32 " / > < rect x = " 49 " y = " 45 " width = " 86 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 65 " > T_NUMBER < / text > < / a > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 135 " y1 = " 61 " x2 = " 145 " y2 = " 61 " class = " line " / > < line x1 = " 145 " y1 = " 61 " x2 = " 201 " y2 = " 61 " class = " line " / > < line x1 = " 29 " y1 = " 51 " x2 = " 29 " y2 = " 71 " class = " line " / > < line x1 = " 211 " y1 = " 71 " x2 = " 211 " y2 = " 51 " class = " line " / > < line x1 = " 29 " y1 = " 71 " x2 = " 29 " y2 = " 95 " class = " line " / > < line x1 = " 211 " y1 = " 95 " x2 = " 211 " y2 = " 71 " class = " line " / > < path d = " M29 95 Q29 105 39 105 " class = " line " / > < path d = " M201 105 Q211 105 211 95 " class = " line " / > < a xlink : href = " # T_NULL " xlink : title = " T_NULL " > < rect x = " 51 " y = " 91 " width = " 66 " height = " 32 " / > < rect x = " 49 " y = " 89 " width = " 66 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 109 " > T_NULL < / text > < / a > < line x1 = " 39 " y1 = " 105 " x2 = " 49 " y2 = " 105 " class = " line " / > < line x1 = " 115 " y1 = " 105 " x2 = " 125 " y2 = " 105 " class = " line " / > < line x1 = " 125 " y1 = " 105 " x2 = " 201 " y2 = " 105 " class = " line " / > < line x1 = " 29 " y1 = " 95 " x2 = " 29 " y2 = " 115 " class = " line " / > < line x1 = " 211 " y1 = " 115 " x2 = " 211 " y2 = " 95 " class = " line " / > < line x1 = " 29 " y1 = " 115 " x2 = " 29 " y2 = " 139 " class = " line " / > < line x1 = " 211 " y1 = " 139 " x2 = " 211 " y2 = " 115 " class = " line " / > < path d = " M29 139 Q29 149 39 149 " class = " line " / > < path d = " M201 149 Q211 149 211 139 " class = " line " / > < a xlink : href = " # T_TRUE " xlink : title = " T_TRUE " > < rect x = " 51 " y = " 135 " width = " 66 " height = " 32 " / > < rect x = " 49 " y = " 133 " width = " 66 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 153 " > T_TRUE < / text > < / a > < line x1 = " 39 " y1 = " 149 " x2 = " 49 " y2 = " 149 " class = " line " / > < line x1 = " 115 " y1 = " 149 " x2 = " 125 " y2 = " 149 " class = " line " / > < line x1 = " 125 " y1 = " 149 " x2 = " 201 " y2 = " 149 " class = " line " / > < line x1 = " 29 " y1 = " 139 " x2 = " 29 " y2 = " 159 " class = " line " / > < line x1 = " 211 " y1 = " 159 " x2 = " 211 " y2 = " 139 " class = " line " / > < line x1 = " 29 " y1 = " 159 " x2 = " 29 " y2 = " 183 " class = " line " / > < line x1 = " 211 " y1 = " 183 " x2 = " 211 " y2 = " 159 " class = " line " / > < path d = " M29 183 Q29 193 39 193 " class = " line " / > < path d = " M201 193 Q211 193 211 183 " class = " line " / > < a xlink : href = " # T_FALSE " xlink : title = " T_FALSE " > < rect x = " 51 " y = " 179 " width = " 72 " height = " 32 " / > < rect x = " 49 " y = " 177 " width = " 72 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 197 " > T_FALSE < / text > < / a > < line x1 = " 39 " y1 = " 193 " x2 = " 49 " y2 = " 193 " class = " line " / > < line x1 = " 121 " y1 = " 193 " x2 = " 131 " y2 = " 193 " class = " line " / > < line x1 = " 131 " y1 = " 193 " x2 = " 201 " y2 = " 193 " class = " line " / > < line x1 = " 224 " y1 = " 17 " x2 = " 221 " y2 = " 17 " class = " line " / > < polygon points = " 231 17 239 13 239 21 " class = " filled " / > < polygon points = " 231 17 223 13 223 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # atomic_value " title = " atomic_value " > atomic_value < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " bind_parameter " > bind_parameter : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 166 " height = " 36 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_PARAMETER " xlink : title = " T_PARAMETER " > < rect x = " 31 " y = " 3 " width = " 108 " height = " 32 " / > < rect x = " 29 " y = " 1 " width = " 108 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 39 " y = " 21 " > T_PARAMETER < / text > < / a > < line x1 = " 19 " y1 = " 17 " x2 = " 29 " y2 = " 17 " class = " line " / > < line x1 = " 137 " y1 = " 17 " x2 = " 147 " y2 = " 17 " class = " line " / > < line x1 = " 150 " y1 = " 17 " x2 = " 147 " y2 = " 17 " class = " line " / > < polygon points = " 157 17 165 13 165 21 " class = " filled " / > < polygon points = " 157 17 149 13 149 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # atomic_value " title = " atomic_value " > atomic_value < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " document_element_name " > document_element_name : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 240 " height = " 80 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_STRING " xlink : title = " T_STRING " > < rect x = " 51 " y = " 3 " width = " 80 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 80 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > T_STRING < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 129 " y1 = " 17 " x2 = " 139 " y2 = " 17 " class = " line " / > < line x1 = " 139 " y1 = " 17 " x2 = " 201 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 201 " y1 = " 17 " x2 = " 221 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M211 27 Q211 17 221 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 211 " y1 = " 51 " x2 = " 211 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M201 61 Q211 61 211 51 " class = " line " / > < a xlink : href = " # T_QUOTED_STRING " xlink : title = " T_QUOTED_STRING " > < rect x = " 51 " y = " 47 " width = " 142 " height = " 32 " / > < rect x = " 49 " y = " 45 " width = " 142 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 65 " > T_QUOTED_STRING < / text > < / a > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 191 " y1 = " 61 " x2 = " 201 " y2 = " 61 " class = " line " / > < line x1 = " 224 " y1 = " 17 " x2 = " 221 " y2 = " 17 " class = " line " / > < polygon points = " 231 17 239 13 239 21 " class = " filled " / > < polygon points = " 231 17 223 13 223 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # document_element " title = " document_element " > document_element < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " variable_name " > variable_name : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 138 " height = " 36 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_STRING " xlink : title = " T_STRING " > < rect x = " 31 " y = " 3 " width = " 80 " height = " 32 " / > < rect x = " 29 " y = " 1 " width = " 80 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 39 " y = " 21 " > T_STRING < / text > < / a > < line x1 = " 19 " y1 = " 17 " x2 = " 29 " y2 = " 17 " class = " line " / > < line x1 = " 109 " y1 = " 17 " x2 = " 119 " y2 = " 17 " class = " line " / > < line x1 = " 122 " y1 = " 17 " x2 = " 119 " y2 = " 17 " class = " line " / > < polygon points = " 129 17 137 13 137 21 " class = " filled " / > < polygon points = " 129 17 121 13 121 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # collect_element " title = " collect_element " > collect_element < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # for_statement " title = " for_statement " > for_statement < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # let_statement " title = " let_statement " > let_statement < / xhtml : a > < / xhtml : li > < xhtml : li > < xhtml : a href = " # optional_into " title = " optional_into " > optional_into < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " style = " font - size : 14px ; font - weight : bold " > < xhtml : a name = " signed_number " > signed_number : < / xhtml : a > < / xhtml : p > < svg xmlns = " http : / / www . w3 . org / 2000 / svg " xmlns : xlink = " http : / / www . w3 . org / 1999 / xlink " width = " 230 " height = " 80 " > < defs > < style type = " text / css " > <nl> + @ namespace " http : / / www . w3 . org / 2000 / svg " ; <nl> + . line { fill : none ; stroke : # 332900 ; } <nl> + filled { fill : # 332900 ; stroke : none ; } <nl> + text { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + } <nl> + text . terminal { font - family : Verdana , Sans - serif ; <nl> + font - size : 12px ; <nl> + fill : # 4D3D00 ; <nl> + font - weight : bold ; <nl> + } <nl> + rect , circle , polygon { fill : # 332900 ; stroke : # 332900 ; } <nl> + rect . terminal { fill : # FFCC00 ; stroke : # 332900 ; } <nl> + rect . nonterminal { fill : # FFEB99 ; stroke : # 332900 ; } <nl> + rect . text { fill : none ; stroke : none ; } <nl> + polygon . regexp { fill : # FFF5CC ; stroke : # 332900 ; } <nl> + < / style > < / defs > < polygon points = " 9 17 1 13 1 21 " class = " filled " / > < polygon points = " 17 17 9 13 9 21 " class = " filled " / > < line x1 = " 17 " y1 = " 17 " x2 = " 19 " y2 = " 17 " class = " line " / > < a xlink : href = " # T_NUMBER " xlink : title = " T_NUMBER " > < rect x = " 51 " y = " 3 " width = " 86 " height = " 32 " / > < rect x = " 49 " y = " 1 " width = " 86 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 59 " y = " 21 " > T_NUMBER < / text > < / a > < line x1 = " 39 " y1 = " 17 " x2 = " 49 " y2 = " 17 " class = " line " / > < line x1 = " 135 " y1 = " 17 " x2 = " 145 " y2 = " 17 " class = " line " / > < line x1 = " 145 " y1 = " 17 " x2 = " 191 " y2 = " 17 " class = " line " / > < line x1 = " 19 " y1 = " 17 " x2 = " 39 " y2 = " 17 " class = " line " / > < line x1 = " 191 " y1 = " 17 " x2 = " 211 " y2 = " 17 " class = " line " / > < path d = " M19 17 Q29 17 29 27 " class = " line " / > < path d = " M201 27 Q201 17 211 17 " class = " line " / > < line x1 = " 29 " y1 = " 27 " x2 = " 29 " y2 = " 51 " class = " line " / > < line x1 = " 201 " y1 = " 51 " x2 = " 201 " y2 = " 27 " class = " line " / > < path d = " M29 51 Q29 61 39 61 " class = " line " / > < path d = " M191 61 Q201 61 201 51 " class = " line " / > < rect x = " 51 " y = " 47 " width = " 26 " height = " 32 " rx = " 10 " / > < rect x = " 49 " y = " 45 " width = " 26 " height = " 32 " class = " terminal " rx = " 10 " / > < text class = " terminal " x = " 59 " y = " 65 " > - < / text > < line x1 = " 39 " y1 = " 61 " x2 = " 49 " y2 = " 61 " class = " line " / > < line x1 = " 75 " y1 = " 61 " x2 = " 85 " y2 = " 61 " class = " line " / > < a xlink : href = " # T_NUMBER " xlink : title = " T_NUMBER " > < rect x = " 97 " y = " 47 " width = " 86 " height = " 32 " / > < rect x = " 95 " y = " 45 " width = " 86 " height = " 32 " class = " nonterminal " / > < text class = " nonterminal " x = " 105 " y = " 65 " > T_NUMBER < / text > < / a > < line x1 = " 85 " y1 = " 61 " x2 = " 95 " y2 = " 61 " class = " line " / > < line x1 = " 181 " y1 = " 61 " x2 = " 191 " y2 = " 61 " class = " line " / > < line x1 = " 214 " y1 = " 17 " x2 = " 211 " y2 = " 17 " class = " line " / > < polygon points = " 221 17 229 13 229 21 " class = " filled " / > < polygon points = " 221 17 213 13 213 21 " class = " filled " / > < / svg > < xhtml : p xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " > referenced by : < xhtml : ul > < xhtml : li > < xhtml : a href = " # limit_statement " title = " limit_statement " > limit_statement < / xhtml : a > < / xhtml : li > < / xhtml : ul > < / xhtml : p > < xhtml : br xmlns : xhtml = " http : / / www . w3 . org / 1999 / xhtml " / > < / body > < / html > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . d16a87b4ff1 <nl> mmm / dev / null <nl> ppp b / Ahuacatl / main . c <nl> <nl> + <nl> + # include < BasicsC / common . h > <nl> + # include < BasicsC / strings . h > <nl> + <nl> + # include " Ahuacatl / ast - node . h " <nl> + # include " Ahuacatl / parser . h " <nl> + # include " Ahuacatl / ast - dump . h " <nl> + # include " Ahuacatl / ast - codegen - js . h " <nl> + # include " Ahuacatl / grammar . h " <nl> + <nl> + char * TRI_ParseGetErrorMessage ( const char * const query , const size_t line , const size_t column ) { <nl> + size_t currentLine = 1 ; <nl> + size_t currentColumn = 1 ; <nl> + const char * p = query ; <nl> + size_t offset ; <nl> + char c ; <nl> + <nl> + while ( ( c = * p + + ) ) { <nl> + if ( c = = ' \ n ' ) { <nl> + + + currentLine ; <nl> + currentColumn = 0 ; <nl> + } <nl> + else if ( c = = ' \ r ' ) { <nl> + if ( * p = = ' \ n ' ) { <nl> + + + currentLine ; <nl> + currentColumn = 0 ; <nl> + p + + ; <nl> + } <nl> + } <nl> + <nl> + + + currentColumn ; <nl> + <nl> + if ( currentLine > = line & & currentColumn > = column ) { <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + offset = p - query ; <nl> + if ( strlen ( query ) < offset + 32 ) { <nl> + return TRI_DuplicateString2 ( query + offset , strlen ( query ) - offset ) ; <nl> + } <nl> + <nl> + return TRI_Concatenate2String ( TRI_DuplicateString2 ( query + offset , 32 ) , " . . . " ) ; <nl> + } <nl> + <nl> + int main ( int argc , char * argv [ ] ) { <nl> + TRI_aql_parse_context_t * context ; <nl> + <nl> + if ( argc < = 1 ) { <nl> + printf ( " please specify a query in argv [ 1 ] \ n " ) ; <nl> + return 1 ; <nl> + } <nl> + <nl> + context = TRI_CreateParseContextAql ( argv [ 1 ] ) ; <nl> + if ( ! context ) { <nl> + printf ( " error \ n " ) ; <nl> + return 1 ; <nl> + } <nl> + <nl> + if ( Ahuacatlparse ( context ) ) { <nl> + printf ( " error \ n " ) ; <nl> + } <nl> + <nl> + if ( context - > _first ) { <nl> + TRI_DumpAql ( ( TRI_aql_node_t * ) context - > _first ) ; <nl> + TRI_GenerateCodeAql ( ( TRI_aql_node_t * ) context - > _first ) ; <nl> + } <nl> + <nl> + TRI_FreeParseContextAql ( context ) ; <nl> + <nl> + return 0 ; <nl> + } <nl> mmm a / Makefile . am <nl> ppp b / Makefile . am <nl> if ENABLE_BISON <nl> include Makefile . bison <nl> endif <nl> <nl> - if ENABLE_ERRORS_DEPENDENCY <nl> . PHONY : errorfiles <nl> <nl> + if ENABLE_ERRORS_DEPENDENCY <nl> errorfiles : BasicsC / errors . dat <nl> @ top_srcdir @ / config / build_errorfile . sh @ top_srcdir @ / config / generateErrorfile . py BasicsC / errors . dat BasicsC / voc - errors . h <nl> @ top_srcdir @ / config / build_errorfile . sh @ top_srcdir @ / config / generateErrorfile . py BasicsC / errors . dat BasicsC / voc - errors . c <nl> mmm a / Makefile . in <nl> ppp b / Makefile . in <nl> unittests - http - server : <nl> @ ENABLE_BISON_TRUE @ JsonParserX / % . cpp : @ srcdir @ / JsonParserX / % . yy <nl> @ ENABLE_BISON_TRUE @ @ top_srcdir @ / config / bison - c + + . sh $ ( BISON ) $ @ $ < <nl> <nl> - @ ENABLE_ERRORS_DEPENDENCY_TRUE @ . PHONY : errorfiles <nl> + . PHONY : errorfiles <nl> <nl> @ ENABLE_ERRORS_DEPENDENCY_TRUE @ errorfiles : BasicsC / errors . dat <nl> @ ENABLE_ERRORS_DEPENDENCY_TRUE @ @ top_srcdir @ / config / build_errorfile . sh @ top_srcdir @ / config / generateErrorfile . py BasicsC / errors . dat BasicsC / voc - errors . h <nl> new file mode 100755 <nl> index 00000000000 . . 4bce6979f8b <nl> mmm / dev / null <nl> ppp b / SkipLists / compare . h <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief compare methods used for skiplist indexes <nl> + / / / <nl> + / / / @ file <nl> + / / / <nl> + / / / DISCLAIMER <nl> + / / / <nl> + / / / Copyright by triAGENS GmbH - All rights reserved . <nl> + / / / <nl> + / / / The Programs ( which include both the software and documentation ) <nl> + / / / contain proprietary information of triAGENS GmbH ; they are <nl> + / / / provided under a license agreement containing restrictions on use and <nl> + / / / disclosure and are also protected by copyright , patent and other <nl> + / / / intellectual and industrial property laws . Reverse engineering , <nl> + / / / disassembly or decompilation of the Programs , except to the extent <nl> + / / / required to obtain interoperability with other independently created <nl> + / / / software or as specified by law , is prohibited . <nl> + / / / <nl> + / / / The Programs are not intended for use in any nuclear , aviation , mass <nl> + / / / transit , medical , or other inherently dangerous applications . It shall <nl> + / / / be the licensee ' s responsibility to take all appropriate fail - safe , <nl> + / / / backup , redundancy , and other measures to ensure the safe use of such <nl> + / / / applications if the Programs are used for such purposes , and triAGENS <nl> + / / / GmbH disclaims liability for any damages caused by such use of <nl> + / / / the Programs . <nl> + / / / <nl> + / / / This software is the confidential and proprietary information of <nl> + / / / triAGENS GmbH . You shall not disclose such confidential and <nl> + / / / proprietary information and shall use it only in accordance with the <nl> + / / / terms of the license agreement you entered into with triAGENS GmbH . <nl> + / / / <nl> + / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / <nl> + / / / @ author Dr . O <nl> + / / / @ author Copyright 2011 , triagens GmbH , Cologne , Germany <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + # ifndef TRIAGENS_DURHAM_VOC_BASE_SKIPLIST_COMPARE_H <nl> + # define TRIAGENS_DURHAM_VOC_BASE_SKIPLIST_COMPARE_H 1 <nl> + <nl> + # include " ShapedJson / json - shaper . h " <nl> + # include " ShapedJson / shaped - json . h " <nl> + # include " VocBase / document - collection . h " <nl> + <nl> + # define USE_STATIC_SKIPLIST_COMPARE 1 <nl> + <nl> + # define SKIPLIST_ELEMENT_TYPE ( a , b ) \ <nl> + struct a { \ <nl> + size_t numFields ; \ <nl> + TRI_shaped_json_t * fields ; \ <nl> + void * data ; \ <nl> + void * collection ; \ <nl> + } b <nl> + <nl> + <nl> + # ifdef __cplusplus <nl> + extern " C " { <nl> + # endif <nl> + <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Helper method for recursion for CompareShapedJsonShapedJson <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + <nl> + static int CompareShapeTypes ( const TRI_shaped_json_t * left , const TRI_shaped_json_t * right , TRI_shaper_t * leftShaper , TRI_shaper_t * rightShaper ) { <nl> + <nl> + int result ; <nl> + size_t j ; <nl> + TRI_shape_type_t leftType ; <nl> + TRI_shape_type_t rightType ; <nl> + const TRI_shape_t * leftShape ; <nl> + const TRI_shape_t * rightShape ; <nl> + size_t leftListLength ; <nl> + size_t rightListLength ; <nl> + size_t listLength ; <nl> + TRI_shaped_json_t leftElement ; <nl> + TRI_shaped_json_t rightElement ; <nl> + char * leftString ; <nl> + char * rightString ; <nl> + <nl> + <nl> + leftShape = leftShaper - > lookupShapeId ( leftShaper , left - > _sid ) ; <nl> + rightShape = rightShaper - > lookupShapeId ( rightShaper , right - > _sid ) ; <nl> + leftType = leftShape - > _type ; <nl> + rightType = rightShape - > _type ; <nl> + <nl> + switch ( leftType ) { <nl> + <nl> + case TRI_SHAPE_ILLEGAL : { <nl> + switch ( rightType ) { <nl> + case TRI_SHAPE_ILLEGAL : <nl> + { <nl> + return 0 ; <nl> + } <nl> + case TRI_SHAPE_NULL : <nl> + case TRI_SHAPE_BOOLEAN : <nl> + case TRI_SHAPE_NUMBER : <nl> + case TRI_SHAPE_SHORT_STRING : <nl> + case TRI_SHAPE_LONG_STRING : <nl> + case TRI_SHAPE_ARRAY : <nl> + case TRI_SHAPE_LIST : <nl> + case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> + case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> + { <nl> + return - 1 ; <nl> + } <nl> + } / / end of switch ( rightType ) <nl> + } <nl> + <nl> + case TRI_SHAPE_NULL : { <nl> + switch ( rightType ) { <nl> + case TRI_SHAPE_ILLEGAL : <nl> + { <nl> + return 1 ; <nl> + } <nl> + case TRI_SHAPE_NULL : <nl> + { <nl> + return 0 ; <nl> + } <nl> + case TRI_SHAPE_BOOLEAN : <nl> + case TRI_SHAPE_NUMBER : <nl> + case TRI_SHAPE_SHORT_STRING : <nl> + case TRI_SHAPE_LONG_STRING : <nl> + case TRI_SHAPE_ARRAY : <nl> + case TRI_SHAPE_LIST : <nl> + case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> + case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> + { <nl> + return - 1 ; <nl> + } <nl> + } / / end of switch ( rightType ) <nl> + } <nl> + <nl> + case TRI_SHAPE_BOOLEAN : { <nl> + switch ( rightType ) { <nl> + case TRI_SHAPE_ILLEGAL : <nl> + case TRI_SHAPE_NULL : <nl> + { <nl> + return 1 ; <nl> + } <nl> + case TRI_SHAPE_BOOLEAN : <nl> + { <nl> + / / check which is false and which is true ! <nl> + if ( * ( ( TRI_shape_boolean_t * ) ( left - > _data . data ) ) = = * ( ( TRI_shape_boolean_t * ) ( right - > _data . data ) ) ) { <nl> + return 0 ; <nl> + } <nl> + if ( * ( ( TRI_shape_boolean_t * ) ( left - > _data . data ) ) < * ( ( TRI_shape_boolean_t * ) ( right - > _data . data ) ) ) { <nl> + return - 1 ; <nl> + } <nl> + return 1 ; <nl> + } <nl> + case TRI_SHAPE_NUMBER : <nl> + case TRI_SHAPE_SHORT_STRING : <nl> + case TRI_SHAPE_LONG_STRING : <nl> + case TRI_SHAPE_ARRAY : <nl> + case TRI_SHAPE_LIST : <nl> + case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> + case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> + { <nl> + return - 1 ; <nl> + } <nl> + } / / end of switch ( rightType ) <nl> + } <nl> + <nl> + case TRI_SHAPE_NUMBER : { <nl> + switch ( rightType ) { <nl> + case TRI_SHAPE_ILLEGAL : <nl> + case TRI_SHAPE_NULL : <nl> + case TRI_SHAPE_BOOLEAN : <nl> + { <nl> + return 1 ; <nl> + } <nl> + case TRI_SHAPE_NUMBER : <nl> + { <nl> + / / compare the numbers . <nl> + if ( * ( ( TRI_shape_number_t * ) ( left - > _data . data ) ) = = * ( ( TRI_shape_number_t * ) ( right - > _data . data ) ) ) { <nl> + return 0 ; <nl> + } <nl> + if ( * ( ( TRI_shape_number_t * ) ( left - > _data . data ) ) < * ( ( TRI_shape_number_t * ) ( right - > _data . data ) ) ) { <nl> + return - 1 ; <nl> + } <nl> + return 1 ; <nl> + } <nl> + case TRI_SHAPE_SHORT_STRING : <nl> + case TRI_SHAPE_LONG_STRING : <nl> + case TRI_SHAPE_ARRAY : <nl> + case TRI_SHAPE_LIST : <nl> + case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> + case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> + { <nl> + return - 1 ; <nl> + } <nl> + } / / end of switch ( rightType ) <nl> + } <nl> + <nl> + case TRI_SHAPE_SHORT_STRING : <nl> + case TRI_SHAPE_LONG_STRING : <nl> + { <nl> + switch ( rightType ) { <nl> + case TRI_SHAPE_ILLEGAL : <nl> + case TRI_SHAPE_NULL : <nl> + case TRI_SHAPE_BOOLEAN : <nl> + case TRI_SHAPE_NUMBER : <nl> + { <nl> + return 1 ; <nl> + } <nl> + case TRI_SHAPE_SHORT_STRING : <nl> + case TRI_SHAPE_LONG_STRING : <nl> + { <nl> + / / compare strings <nl> + / / extract the strings <nl> + if ( leftType = = TRI_SHAPE_SHORT_STRING ) { <nl> + leftString = ( char * ) ( sizeof ( TRI_shape_length_short_string_t ) + left - > _data . data ) ; <nl> + } <nl> + else { <nl> + leftString = ( char * ) ( sizeof ( TRI_shape_length_long_string_t ) + left - > _data . data ) ; <nl> + } <nl> + <nl> + if ( rightType = = TRI_SHAPE_SHORT_STRING ) { <nl> + rightString = ( char * ) ( sizeof ( TRI_shape_length_short_string_t ) + right - > _data . data ) ; <nl> + } <nl> + else { <nl> + rightString = ( char * ) ( sizeof ( TRI_shape_length_long_string_t ) + right - > _data . data ) ; <nl> + } <nl> + <nl> + result = strcmp ( leftString , rightString ) ; <nl> + return result ; <nl> + } <nl> + case TRI_SHAPE_ARRAY : <nl> + case TRI_SHAPE_LIST : <nl> + case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> + case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> + { <nl> + return - 1 ; <nl> + } <nl> + } / / end of switch ( rightType ) <nl> + } <nl> + <nl> + case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> + case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> + case TRI_SHAPE_LIST : <nl> + { <nl> + switch ( rightType ) { <nl> + case TRI_SHAPE_ILLEGAL : <nl> + case TRI_SHAPE_NULL : <nl> + case TRI_SHAPE_BOOLEAN : <nl> + case TRI_SHAPE_NUMBER : <nl> + case TRI_SHAPE_SHORT_STRING : <nl> + case TRI_SHAPE_LONG_STRING : <nl> + { <nl> + return 1 ; <nl> + } <nl> + case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> + case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> + case TRI_SHAPE_LIST : <nl> + { <nl> + / / unfortunately recursion : check the types of all the entries <nl> + leftListLength = * ( ( TRI_shape_length_list_t * ) ( left - > _data . data ) ) ; <nl> + rightListLength = * ( ( TRI_shape_length_list_t * ) ( right - > _data . data ) ) ; <nl> + <nl> + / / determine the smallest list <nl> + if ( leftListLength > rightListLength ) { <nl> + listLength = rightListLength ; <nl> + } <nl> + else { <nl> + listLength = leftListLength ; <nl> + } <nl> + <nl> + for ( j = 0 ; j < listLength ; + + j ) { <nl> + <nl> + if ( leftType = = TRI_SHAPE_HOMOGENEOUS_LIST ) { <nl> + TRI_AtHomogeneousListShapedJson ( ( const TRI_homogeneous_list_shape_t * ) ( leftShape ) , <nl> + left , j , & leftElement ) ; <nl> + } <nl> + else if ( leftType = = TRI_SHAPE_HOMOGENEOUS_SIZED_LIST ) { <nl> + TRI_AtHomogeneousSizedListShapedJson ( ( const TRI_homogeneous_sized_list_shape_t * ) ( leftShape ) , <nl> + left , j , & leftElement ) ; <nl> + } <nl> + else { <nl> + TRI_AtListShapedJson ( ( const TRI_list_shape_t * ) ( leftShape ) , left , j , & leftElement ) ; <nl> + } <nl> + <nl> + <nl> + if ( rightType = = TRI_SHAPE_HOMOGENEOUS_LIST ) { <nl> + TRI_AtHomogeneousListShapedJson ( ( const TRI_homogeneous_list_shape_t * ) ( rightShape ) , <nl> + right , j , & rightElement ) ; <nl> + } <nl> + else if ( rightType = = TRI_SHAPE_HOMOGENEOUS_SIZED_LIST ) { <nl> + TRI_AtHomogeneousSizedListShapedJson ( ( const TRI_homogeneous_sized_list_shape_t * ) ( rightShape ) , <nl> + right , j , & rightElement ) ; <nl> + } <nl> + else { <nl> + TRI_AtListShapedJson ( ( const TRI_list_shape_t * ) ( rightShape ) , right , j , & rightElement ) ; <nl> + } <nl> + <nl> + result = CompareShapeTypes ( & leftElement , & rightElement , leftShaper , rightShaper ) ; <nl> + if ( result ! = 0 ) { <nl> + return result ; <nl> + } <nl> + } <nl> + <nl> + / / up to listLength everything matches <nl> + if ( leftListLength < rightListLength ) { <nl> + return - 1 ; <nl> + } <nl> + else if ( leftListLength > rightListLength ) { <nl> + return 1 ; <nl> + } <nl> + return 0 ; <nl> + } <nl> + <nl> + <nl> + case TRI_SHAPE_ARRAY : <nl> + { <nl> + return - 1 ; <nl> + } <nl> + } / / end of switch ( rightType ) <nl> + } <nl> + <nl> + case TRI_SHAPE_ARRAY : <nl> + { <nl> + / * start oreste : <nl> + char * shape = ( char * ) ( leftShape ) ; <nl> + uint64_t fixedEntries ; <nl> + uint64_t variableEntries ; <nl> + uint64_t ssid ; <nl> + uint64_t aaid ; <nl> + char * name ; <nl> + TRI_shape_t * newShape ; <nl> + <nl> + shape = shape + sizeof ( TRI_shape_t ) ; <nl> + fixedEntries = * ( ( TRI_shape_size_t * ) ( shape ) ) ; <nl> + shape = shape + sizeof ( TRI_shape_size_t ) ; <nl> + variableEntries = * ( ( TRI_shape_size_t * ) ( shape ) ) ; <nl> + shape = shape + sizeof ( TRI_shape_size_t ) ; <nl> + ssid = * ( ( TRI_shape_sid_t * ) ( shape ) ) ; <nl> + shape = shape + ( sizeof ( TRI_shape_sid_t ) * ( fixedEntries + variableEntries ) ) ; <nl> + aaid = * ( ( TRI_shape_aid_t * ) ( shape ) ) ; <nl> + shape = shape + ( sizeof ( TRI_shape_aid_t ) * ( fixedEntries + variableEntries ) ) ; <nl> + <nl> + name = leftShaper - > lookupAttributeId ( leftShaper , aaid ) ; <nl> + newShape = leftShaper - > lookupShapeId ( leftShaper , ssid ) ; <nl> + <nl> + <nl> + printf ( " % s : % u : _fixedEntries : % u \ n " , __FILE__ , __LINE__ , fixedEntries ) ; <nl> + printf ( " % s : % u : _variableEntries : % u \ n " , __FILE__ , __LINE__ , variableEntries ) ; <nl> + printf ( " % s : % u : _sids [ 0 ] : % u \ n " , __FILE__ , __LINE__ , ssid ) ; <nl> + printf ( " % s : % u : _aids [ 0 ] : % u \ n " , __FILE__ , __LINE__ , aaid ) ; <nl> + printf ( " % s : % u : name : % s \ n " , __FILE__ , __LINE__ , name ) ; <nl> + printf ( " % s : % u : type : % d \ n " , __FILE__ , __LINE__ , newShape - > _type ) ; <nl> + <nl> + end oreste * / <nl> + assert ( false ) ; <nl> + switch ( rightType ) { <nl> + case TRI_SHAPE_ILLEGAL : <nl> + case TRI_SHAPE_NULL : <nl> + case TRI_SHAPE_BOOLEAN : <nl> + case TRI_SHAPE_NUMBER : <nl> + case TRI_SHAPE_SHORT_STRING : <nl> + case TRI_SHAPE_LONG_STRING : <nl> + case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> + case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> + case TRI_SHAPE_LIST : <nl> + { <nl> + return 1 ; <nl> + } <nl> + case TRI_SHAPE_ARRAY : <nl> + { <nl> + assert ( false ) ; <nl> + result = 0 ; <nl> + return result ; <nl> + } <nl> + } / / end of switch ( rightType ) <nl> + } <nl> + <nl> + } <nl> + assert ( false ) ; <nl> + } <nl> + <nl> + <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Compare a shapded json object recursively if necessary <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + static int CompareShapedJsonShapedJson ( const TRI_shaped_json_t * left , const TRI_shaped_json_t * right , TRI_shaper_t * leftShaper , TRI_shaper_t * rightShaper ) { <nl> + <nl> + int result ; <nl> + <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / the following order is currently defined for placing an order on documents <nl> + / / undef < null < boolean < number < strings < lists < hash arrays <nl> + / / note : undefined will be treated as NULL pointer not NULL JSON OBJECT <nl> + / / within each type class we have the following order <nl> + / / boolean : false < true <nl> + / / number : natural order <nl> + / / strings : lexicographical <nl> + / / lists : lexicorgraphically and within each slot according to these rules . <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> + <nl> + if ( left = = NULL & & right = = NULL ) { <nl> + return 0 ; <nl> + } <nl> + <nl> + if ( left = = NULL & & right ! = NULL ) { <nl> + return - 1 ; <nl> + } <nl> + <nl> + if ( left ! = NULL & & right = = NULL ) { <nl> + return 1 ; <nl> + } <nl> + <nl> + result = CompareShapeTypes ( left , right , leftShaper , rightShaper ) ; <nl> + <nl> + return result ; <nl> + <nl> + } / / end of function CompareShapedJsonShapedJson <nl> + <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief compares two elements in a skip list <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + static int IndexStaticCompareElementElement ( struct TRI_skiplist_s * skiplist , void * leftElement , void * rightElement , int defaultEqual ) { <nl> + typedef SKIPLIST_ELEMENT_TYPE ( LocalElement_s , LocalElement_t ) ; <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / Compare two elements and determines : <nl> + / / left < right : return - 1 <nl> + / / left = = right : return 0 <nl> + / / left > right : return 1 <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + int compareResult ; <nl> + LocalElement_t * hLeftElement = ( LocalElement_t * ) ( leftElement ) ; <nl> + LocalElement_t * hRightElement = ( LocalElement_t * ) ( rightElement ) ; <nl> + TRI_shaper_t * leftShaper ; <nl> + TRI_shaper_t * rightShaper ; <nl> + size_t j ; <nl> + <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / the following order is currently defined for placing an order on documents <nl> + / / undef < null < boolean < number < strings < lists < hash arrays <nl> + / / note : undefined will be treated as NULL pointer not NULL JSON OBJECT <nl> + / / within each type class we have the following order <nl> + / / boolean : false < true <nl> + / / number : natural order <nl> + / / strings : lexicographical <nl> + / / lists : lexicographically and within each slot according to these rules . <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> + if ( leftElement = = NULL & & rightElement = = NULL ) { <nl> + return 0 ; <nl> + } <nl> + <nl> + if ( leftElement ! = NULL & & rightElement = = NULL ) { <nl> + return 1 ; <nl> + } <nl> + <nl> + if ( leftElement = = NULL & & rightElement ! = NULL ) { <nl> + return - 1 ; <nl> + } <nl> + <nl> + if ( leftElement = = rightElement ) { <nl> + return 0 ; <nl> + } <nl> + <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / This call back function is used when we insert and remove unique skip <nl> + / / list entries . <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> + if ( hLeftElement - > numFields ! = hRightElement - > numFields ) { <nl> + assert ( false ) ; <nl> + } <nl> + <nl> + <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / The document could be the same - - so no further comparison is required . <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + if ( hLeftElement - > data = = hRightElement - > data ) { <nl> + return 0 ; <nl> + } <nl> + <nl> + <nl> + leftShaper = ( ( TRI_doc_collection_t * ) ( hLeftElement - > collection ) ) - > _shaper ; <nl> + rightShaper = ( ( TRI_doc_collection_t * ) ( hRightElement - > collection ) ) - > _shaper ; <nl> + <nl> + for ( j = 0 ; j < hLeftElement - > numFields ; j + + ) { <nl> + compareResult = CompareShapedJsonShapedJson ( ( j + hLeftElement - > fields ) , ( j + hRightElement - > fields ) , leftShaper , rightShaper ) ; <nl> + if ( compareResult ! = 0 ) { <nl> + return compareResult ; <nl> + } <nl> + } <nl> + <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / This is where the difference between CompareKeyElement ( below ) and <nl> + / / CompareElementElement comes into play . Here if the ' keys ' are the same , <nl> + / / but the doc ptr is different ( which it is since we are here ) , then <nl> + / / we return what was requested to be returned : 0 , - 1 or 1 . What is returned <nl> + / / depends on the purpose of calling this callback . <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> + return defaultEqual ; <nl> + } <nl> + <nl> + <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief compares a key and an element <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + static int IndexStaticCompareKeyElement ( struct TRI_skiplist_s * skiplist , void * leftElement , void * rightElement , int defaultEqual ) { <nl> + typedef SKIPLIST_ELEMENT_TYPE ( LocalElement_s , LocalElement_t ) ; <nl> + <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / Compare two elements and determines : <nl> + / / left < right : return - 1 <nl> + / / left = = right : return 0 <nl> + / / left > right : return 1 <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + int compareResult ; <nl> + size_t numFields ; <nl> + LocalElement_t * hLeftElement = ( LocalElement_t * ) ( leftElement ) ; <nl> + LocalElement_t * hRightElement = ( LocalElement_t * ) ( rightElement ) ; <nl> + TRI_shaper_t * leftShaper ; <nl> + TRI_shaper_t * rightShaper ; <nl> + size_t j ; <nl> + <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / the following order is currently defined for placing an order on documents <nl> + / / undef < null < boolean < number < strings < lists < hash arrays <nl> + / / note : undefined will be treated as NULL pointer not NULL JSON OBJECT <nl> + / / within each type class we have the following order <nl> + / / boolean : false < true <nl> + / / number : natural order <nl> + / / strings : lexicographical <nl> + / / lists : lexicorgraphically and within each slot according to these rules . <nl> + / / associative array : ordered keys followed by value of key <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> + if ( leftElement = = NULL & & rightElement = = NULL ) { <nl> + return 0 ; <nl> + } <nl> + <nl> + if ( leftElement = = NULL & & rightElement ! = NULL ) { <nl> + return - 1 ; <nl> + } <nl> + <nl> + if ( leftElement ! = NULL & & rightElement = = NULL ) { <nl> + return 1 ; <nl> + } <nl> + <nl> + if ( leftElement = = rightElement ) { <nl> + return 0 ; <nl> + } <nl> + <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / The document could be the same - - so no further comparison is required . <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + if ( hLeftElement - > data = = hRightElement - > data ) { <nl> + return 0 ; <nl> + } <nl> + <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / This call back function is used when we query the index , as such <nl> + / / the number of fields which we are using for the query may be less than <nl> + / / the number of fields that the index is defined with . <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> + <nl> + if ( hLeftElement - > numFields < hRightElement - > numFields ) { <nl> + numFields = hLeftElement - > numFields ; <nl> + } <nl> + else { <nl> + numFields = hRightElement - > numFields ; <nl> + } <nl> + <nl> + <nl> + leftShaper = ( ( TRI_doc_collection_t * ) ( hLeftElement - > collection ) ) - > _shaper ; <nl> + rightShaper = ( ( TRI_doc_collection_t * ) ( hRightElement - > collection ) ) - > _shaper ; <nl> + <nl> + for ( j = 0 ; j < numFields ; j + + ) { <nl> + / * <nl> + printf ( " % s : % u : % f : % f , % u : % u \ n " , __FILE__ , __LINE__ , <nl> + * ( ( double * ) ( ( j + hLeftElement - > fields ) - > _data . data ) ) , <nl> + * ( ( double * ) ( ( j + hRightElement - > fields ) - > _data . data ) ) , <nl> + ( uint64_t ) ( hLeftElement - > data ) , <nl> + ( uint64_t ) ( hRightElement - > data ) <nl> + ) ; <nl> + * / <nl> + compareResult = CompareShapedJsonShapedJson ( ( j + hLeftElement - > fields ) , <nl> + ( j + hRightElement - > fields ) , <nl> + leftShaper , <nl> + rightShaper ) ; <nl> + if ( compareResult ! = 0 ) { <nl> + return compareResult ; <nl> + } <nl> + } <nl> + <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / The ' keys ' match - - however , we may only have a partial match in reality <nl> + / / if not all keys comprising index have been used . <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + return defaultEqual ; <nl> + } <nl> + <nl> + <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / Non - unique skiplist <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> + <nl> + <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief used to determine the order of two elements <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + static int IndexStaticMultiCompareElementElement ( TRI_skiplist_multi_t * multiSkiplist , void * leftElement , void * rightElement , int defaultEqual ) { <nl> + typedef SKIPLIST_ELEMENT_TYPE ( LocalElement_s , LocalElement_t ) ; <nl> + <nl> + int compareResult ; <nl> + LocalElement_t * hLeftElement = ( LocalElement_t * ) ( leftElement ) ; <nl> + LocalElement_t * hRightElement = ( LocalElement_t * ) ( rightElement ) ; <nl> + TRI_shaper_t * leftShaper ; <nl> + TRI_shaper_t * rightShaper ; <nl> + size_t j ; <nl> + <nl> + <nl> + if ( leftElement = = NULL & & rightElement = = NULL ) { <nl> + return TRI_SKIPLIST_COMPARE_STRICTLY_EQUAL ; <nl> + } <nl> + <nl> + if ( leftElement ! = NULL & & rightElement = = NULL ) { <nl> + return TRI_SKIPLIST_COMPARE_STRICTLY_GREATER ; <nl> + } <nl> + <nl> + if ( leftElement = = NULL & & rightElement ! = NULL ) { <nl> + return TRI_SKIPLIST_COMPARE_STRICTLY_LESS ; <nl> + } <nl> + <nl> + if ( leftElement = = rightElement ) { <nl> + return TRI_SKIPLIST_COMPARE_STRICTLY_EQUAL ; <nl> + } <nl> + <nl> + if ( hLeftElement - > numFields ! = hRightElement - > numFields ) { <nl> + assert ( false ) ; <nl> + } <nl> + <nl> + if ( hLeftElement - > data = = hRightElement - > data ) { <nl> + return TRI_SKIPLIST_COMPARE_STRICTLY_EQUAL ; <nl> + } <nl> + <nl> + <nl> + leftShaper = ( ( TRI_doc_collection_t * ) ( hLeftElement - > collection ) ) - > _shaper ; <nl> + rightShaper = ( ( TRI_doc_collection_t * ) ( hRightElement - > collection ) ) - > _shaper ; <nl> + <nl> + for ( j = 0 ; j < hLeftElement - > numFields ; j + + ) { <nl> + compareResult = CompareShapedJsonShapedJson ( ( j + hLeftElement - > fields ) , ( j + hRightElement - > fields ) , leftShaper , rightShaper ) ; <nl> + if ( compareResult ! = 0 ) { <nl> + return compareResult ; <nl> + } <nl> + } <nl> + <nl> + return defaultEqual ; <nl> + } <nl> + <nl> + <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief used to determine the order of two keys <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + static int IndexStaticMultiCompareKeyElement ( TRI_skiplist_multi_t * multiSkiplist , void * leftElement , void * rightElement , int defaultEqual ) { <nl> + typedef SKIPLIST_ELEMENT_TYPE ( LocalElement_s , LocalElement_t ) ; <nl> + <nl> + int compareResult ; <nl> + size_t numFields ; <nl> + LocalElement_t * hLeftElement = ( LocalElement_t * ) ( leftElement ) ; <nl> + LocalElement_t * hRightElement = ( LocalElement_t * ) ( rightElement ) ; <nl> + TRI_shaper_t * leftShaper ; <nl> + TRI_shaper_t * rightShaper ; <nl> + size_t j ; <nl> + <nl> + if ( leftElement = = NULL & & rightElement = = NULL ) { <nl> + return 0 ; <nl> + } <nl> + <nl> + if ( leftElement ! = NULL & & rightElement = = NULL ) { <nl> + return 1 ; <nl> + } <nl> + <nl> + if ( leftElement = = NULL & & rightElement ! = NULL ) { <nl> + return - 1 ; <nl> + } <nl> + <nl> + <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / The document could be the same - - so no further comparison is required . <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> + if ( hLeftElement - > data = = hRightElement - > data ) { <nl> + return 0 ; <nl> + } <nl> + <nl> + <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + / / This call back function is used when we query the index , as such <nl> + / / the number of fields which we are using for the query may be less than <nl> + / / the number of fields that the index is defined with . <nl> + / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + <nl> + if ( hLeftElement - > numFields < hRightElement - > numFields ) { <nl> + numFields = hLeftElement - > numFields ; <nl> + } <nl> + else { <nl> + numFields = hRightElement - > numFields ; <nl> + } <nl> + <nl> + leftShaper = ( ( TRI_doc_collection_t * ) ( hLeftElement - > collection ) ) - > _shaper ; <nl> + rightShaper = ( ( TRI_doc_collection_t * ) ( hRightElement - > collection ) ) - > _shaper ; <nl> + <nl> + for ( j = 0 ; j < numFields ; j + + ) { <nl> + compareResult = CompareShapedJsonShapedJson ( ( j + hLeftElement - > fields ) , ( j + hRightElement - > fields ) , leftShaper , rightShaper ) ; <nl> + if ( compareResult ! = 0 ) { <nl> + return compareResult ; <nl> + } <nl> + } <nl> + <nl> + return defaultEqual ; <nl> + } <nl> + <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief used to determine the order of two keys <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + static bool IndexStaticMultiEqualElementElement ( TRI_skiplist_multi_t * multiSkiplist , void * leftElement , void * rightElement ) { <nl> + <nl> + typedef SKIPLIST_ELEMENT_TYPE ( LocalElement_s , LocalElement_t ) ; <nl> + <nl> + LocalElement_t * hLeftElement = ( LocalElement_t * ) ( leftElement ) ; <nl> + LocalElement_t * hRightElement = ( LocalElement_t * ) ( rightElement ) ; <nl> + <nl> + if ( leftElement = = rightElement ) { <nl> + return true ; <nl> + } <nl> + <nl> + / * <nl> + printf ( " % s : % u : % f : % f , % u : % u \ n " , __FILE__ , __LINE__ , <nl> + * ( ( double * ) ( ( hLeftElement - > fields ) - > _data . data ) ) , <nl> + * ( ( double * ) ( ( hRightElement - > fields ) - > _data . data ) ) , <nl> + ( uint64_t ) ( hLeftElement - > data ) , <nl> + ( uint64_t ) ( hRightElement - > data ) <nl> + ) ; <nl> + * / <nl> + return ( hLeftElement - > data = = hRightElement - > data ) ; <nl> + } <nl> + <nl> + <nl> + <nl> + <nl> + <nl> + <nl> + <nl> + <nl> + # ifdef __cplusplus <nl> + } <nl> + # endif <nl> + <nl> + # endif <nl> + <nl> + / / Local Variables : <nl> + / / mode : outline - minor <nl> + / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> + / / End : <nl> + <nl> mmm a / SkipLists / skiplist . c <nl> ppp b / SkipLists / skiplist . c <nl> <nl> # include " skiplist . h " <nl> # include < BasicsC / random . h > <nl> <nl> + # include " compare . h " <nl> + <nl> # define SKIPLIST_ABSOLUTE_MAX_HEIGHT 100 <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> static void TRI_DestroySkipListNode ( TRI_skiplist_node_t * node ) { <nl> / / / @ brief Grow the node at the height specified . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - static void GrowNodeHeight ( TRI_skiplist_node_t * node , uint32_t newHeight ) { <nl> + static bool GrowNodeHeight ( TRI_skiplist_node_t * node , uint32_t newHeight ) { <nl> <nl> TRI_skiplist_nb_t * oldColumn = node - > _column ; <nl> uint32_t j ; <nl> <nl> if ( node - > _colLength > = newHeight ) { <nl> - return ; <nl> + return true ; <nl> } <nl> <nl> - node - > _column = TRI_Allocate ( sizeof ( TRI_skiplist_node_t ) * newHeight ) ; <nl> - / * FIXME : memory allocation might fail * / <nl> - memcpy ( node - > _column , oldColumn , node - > _colLength * sizeof ( TRI_skiplist_node_t ) ) ; <nl> + node - > _column = TRI_Allocate ( sizeof ( TRI_skiplist_nb_t ) * newHeight ) ; <nl> + <nl> + if ( node - > _column = = NULL ) { <nl> + / / out of memory ? <nl> + return false ; <nl> + } <nl> + <nl> + if ( oldColumn ! = NULL ) { <nl> + memcpy ( node - > _column , oldColumn , node - > _colLength * sizeof ( TRI_skiplist_nb_t ) ) ; <nl> + TRI_Free ( oldColumn ) ; <nl> + } <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / Initialise the storage <nl> static void GrowNodeHeight ( TRI_skiplist_node_t * node , uint32_t newHeight ) { <nl> ( node - > _column ) [ j ] . _next = NULL ; <nl> } <nl> <nl> - TRI_Free ( oldColumn ) ; <nl> node - > _colLength = newHeight ; <nl> + <nl> + return true ; <nl> } <nl> <nl> <nl> void TRI_InitSkipList ( TRI_skiplist_t * skiplist , size_t elementSize , <nl> int ( * compareKeyElement ) ( TRI_skiplist_t * , void * , void * , int ) , <nl> TRI_skiplist_prob_e probability , uint32_t maximumHeight ) { <nl> <nl> + bool growResult ; <nl> if ( skiplist = = NULL ) { <nl> return ; <nl> } <nl> void TRI_InitSkipList ( TRI_skiplist_t * skiplist , size_t elementSize , <nl> skiplist - > compareElementElement = compareElementElement ; <nl> skiplist - > compareKeyElement = compareKeyElement ; <nl> <nl> + # ifndef USE_STATIC_SKIPLIST_COMPARE <nl> + if ( compareElementElement = = NULL | | compareKeyElement = = NULL ) { <nl> + printf ( " % s : % d : Compare function pointers have a value of NULL \ n " , __FILE__ , __LINE__ ) ; <nl> + assert ( 0 ) ; <nl> + # endif <nl> + <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / Assign the maximum height of the skip list . This maximum height must be <nl> / / no greater than the absolute max height defined as a compile time parameter <nl> void TRI_InitSkipList ( TRI_skiplist_t * skiplist , size_t elementSize , <nl> / / each node will have a height of two . So initialise the start and end nodes <nl> / / with this ' average ' height <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - GrowNodeHeight ( & ( skiplist - > _base . _startNode ) , 2 ) ; <nl> - GrowNodeHeight ( & ( skiplist - > _base . _endNode ) , 2 ) ; <nl> + growResult = GrowNodeHeight ( & ( skiplist - > _base . _startNode ) , 2 ) ; / / may fail <nl> + growResult = growResult & & GrowNodeHeight ( & ( skiplist - > _base . _endNode ) , 2 ) ; / / may fail <nl> + if ( ! growResult ) { <nl> + / / todo : undo growth by cutting down the node height <nl> + return ; <nl> + } <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / Join the empty lists together <nl> bool TRI_InsertKeySkipList ( TRI_skiplist_t * skiplist , void * key , void * element , <nl> TRI_skiplist_node_t * tempLeftNode ; <nl> TRI_skiplist_node_t * tempRightNode ; <nl> int compareResult ; <nl> + bool growResult ; <nl> int j ; <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> bool TRI_InsertKeySkipList ( TRI_skiplist_t * skiplist , void * key , void * element , <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> oldColLength = skiplist - > _base . _startNode . _colLength ; <nl> if ( ( uint32_t ) ( newHeight ) > oldColLength ) { <nl> - GrowNodeHeight ( & ( skiplist - > _base . _startNode ) , newHeight ) ; <nl> - GrowNodeHeight ( & ( skiplist - > _base . _endNode ) , newHeight ) ; <nl> + growResult = GrowNodeHeight ( & ( skiplist - > _base . _startNode ) , newHeight ) ; <nl> + growResult = growResult & & GrowNodeHeight ( & ( skiplist - > _base . _endNode ) , newHeight ) ; <nl> + if ( ! growResult ) { <nl> + / / todo : undo growth by cutting down the node height <nl> + return false ; <nl> + } <nl> JoinNodes ( & ( skiplist - > _base . _startNode ) , & ( skiplist - > _base . _endNode ) , oldColLength , newHeight - 1 ) ; <nl> } <nl> <nl> bool TRI_InsertKeySkipList ( TRI_skiplist_t * skiplist , void * key , void * element , <nl> / / Copy the contents of element into the new node to be inserted . <nl> / / If a duplicate has been found , then we destroy the allocated memory . <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - newNode - > _extraData = NULL ; <nl> + newNode - > _column = NULL ; <nl> newNode - > _colLength = 0 ; <nl> + newNode - > _extraData = NULL ; <nl> memcpy ( & ( newNode - > _element ) , element , skiplist - > _base . _elementSize ) ; <nl> - GrowNodeHeight ( newNode , newHeight ) ; <nl> - <nl> + growResult = GrowNodeHeight ( newNode , newHeight ) ; <nl> + if ( ! growResult ) { <nl> + TRI_FreeSkipListNode ( & ( skiplist - > _base ) , newNode ) ; <nl> + return false ; <nl> + } <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / Determine the path where the new item is to be inserted . If the item <nl> bool TRI_InsertKeySkipList ( TRI_skiplist_t * skiplist , void * key , void * element , <nl> / / Use the callback to determine if the element is less or greater than <nl> / / the next node element . <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + # ifdef USE_STATIC_SKIPLIST_COMPARE <nl> + compareResult = IndexStaticCompareKeyElement ( skiplist , key , & ( nextNode - > _element ) , 0 ) ; <nl> + # else <nl> compareResult = skiplist - > compareKeyElement ( skiplist , key , & ( nextNode - > _element ) , 0 ) ; <nl> + # endif <nl> + <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / The element matches the next element . Overwrite if possible and return . <nl> void * TRI_LeftLookupByKeySkipList ( TRI_skiplist_t * skiplist , void * key ) { <nl> / / Use the callback to determine if the element is less or greater than <nl> / / the next node element . <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + # ifdef USE_STATIC_SKIPLIST_COMPARE <nl> + compareResult = IndexStaticCompareKeyElement ( skiplist , key , & ( nextNode - > _element ) , - 1 ) ; <nl> + # else <nl> compareResult = skiplist - > compareKeyElement ( skiplist , key , & ( nextNode - > _element ) , - 1 ) ; <nl> + # endif <nl> + <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / - 1 is returned if the number of fields ( attributes ) in the key is LESS <nl> void * TRI_LookupByKeySkipList ( TRI_skiplist_t * skiplist , void * key ) { <nl> / / Use the callback to determine if the element is less or greater than <nl> / / the next node element . <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + # ifdef USE_STATIC_SKIPLIST_COMPARE <nl> + compareResult = IndexStaticCompareKeyElement ( skiplist , key , & ( nextNode - > _element ) , 0 ) ; <nl> + # else <nl> compareResult = skiplist - > compareKeyElement ( skiplist , key , & ( nextNode - > _element ) , 0 ) ; <nl> + # endif <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / We have found the item ! <nl> bool TRI_RemoveElementSkipList ( TRI_skiplist_t * skiplist , void * element , void * o <nl> / / Use the callback to determine if the element is less or greater than <nl> / / the next node element . <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + # ifdef USE_STATIC_SKIPLIST_COMPARE <nl> + compareResult = IndexStaticCompareElementElement ( skiplist , element , & ( nextNode - > _element ) , - 1 ) ; <nl> + # else <nl> compareResult = skiplist - > compareElementElement ( skiplist , element , & ( nextNode - > _element ) , - 1 ) ; <nl> + # endif <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / We have found the item ! <nl> void * TRI_RightLookupByKeySkipList ( TRI_skiplist_t * skiplist , void * key ) { <nl> / / Use the callback to determine if the element is less or greater than <nl> / / the next node element . <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - compareResult = skiplist - > compareKeyElement ( skiplist , key , & ( prevNode - > _element ) , 1 ) ; <nl> + # ifdef USE_STATIC_SKIPLIST_COMPARE <nl> + compareResult = IndexStaticCompareKeyElement ( skiplist , key , & ( prevNode - > _element ) , 1 ) ; <nl> + # else <nl> + compareResult = skiplist - > compareKeyElement ( skiplist , key , & ( prevNode - > _element ) , 1 ) ; <nl> + # endif <nl> <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> void TRI_InitSkipListMulti ( TRI_skiplist_multi_t * skiplist , <nl> TRI_skiplist_prob_e probability , <nl> uint32_t maximumHeight ) { <nl> <nl> + bool growResult ; <nl> + <nl> if ( skiplist = = NULL ) { <nl> return ; <nl> } <nl> void TRI_InitSkipListMulti ( TRI_skiplist_multi_t * skiplist , <nl> skiplist - > compareElementElement = compareElementElement ; <nl> skiplist - > compareKeyElement = compareKeyElement ; <nl> skiplist - > equalElementElement = equalElementElement ; <nl> + <nl> + # ifndef USE_STATIC_SKIPLIST_COMPARE <nl> + if ( compareElementElement = = NULL | | compareKeyElement = = NULL | | equalElementElement = = NULL ) { <nl> + printf ( " % s : % d : Compare function pointers have a value of NULL \ n " , __FILE__ , __LINE__ ) ; <nl> + assert ( 0 ) ; <nl> + # endif <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / Assign the maximum height of the skip list . This maximum height must be <nl> void TRI_InitSkipListMulti ( TRI_skiplist_multi_t * skiplist , <nl> / / each node will have a height of two . So initialise the start and end nodes <nl> / / with this ' average ' height <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - GrowNodeHeight ( & ( skiplist - > _base . _startNode ) , 2 ) ; <nl> - GrowNodeHeight ( & ( skiplist - > _base . _endNode ) , 2 ) ; <nl> - <nl> + growResult = GrowNodeHeight ( & ( skiplist - > _base . _startNode ) , 2 ) ; <nl> + growResult = growResult & & GrowNodeHeight ( & ( skiplist - > _base . _endNode ) , 2 ) ; <nl> + if ( ! growResult ) { <nl> + / / todo : truncate he nodes and return <nl> + return ; <nl> + } <nl> + <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / Join the empty lists together <nl> / / [ N ] < mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - > [ N ] <nl> void * TRI_LeftLookupByKeySkipListMulti ( TRI_skiplist_multi_t * skiplist , void * key <nl> / / Use the callback to determine if the element is less or greater than <nl> / / the next node element . <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + # ifdef USE_STATIC_SKIPLIST_COMPARE <nl> + compareResult = IndexStaticMultiCompareKeyElement ( skiplist , key , & ( nextNode - > _element ) , - 1 ) ; <nl> + # else <nl> compareResult = skiplist - > compareKeyElement ( skiplist , key , & ( nextNode - > _element ) , - 1 ) ; <nl> + # endif <nl> <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> bool TRI_InsertElementSkipListMulti ( TRI_skiplist_multi_t * skiplist , void * elemen <nl> TRI_skiplist_node_t * newNode ; <nl> TRI_skiplist_node_t * tempLeftNode ; <nl> TRI_skiplist_node_t * tempRightNode ; <nl> + bool growResult ; <nl> int compareResult ; <nl> int j ; <nl> <nl> bool TRI_InsertElementSkipListMulti ( TRI_skiplist_multi_t * skiplist , void * elemen <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> oldColLength = skiplist - > _base . _startNode . _colLength ; <nl> if ( ( uint32_t ) ( newHeight ) > oldColLength ) { <nl> - GrowNodeHeight ( & ( skiplist - > _base . _startNode ) , newHeight ) ; <nl> - GrowNodeHeight ( & ( skiplist - > _base . _endNode ) , newHeight ) ; <nl> + growResult = GrowNodeHeight ( & ( skiplist - > _base . _startNode ) , newHeight ) ; <nl> + growResult = growResult & & GrowNodeHeight ( & ( skiplist - > _base . _endNode ) , newHeight ) ; <nl> + if ( ! growResult ) { <nl> + / / todo : truncate the nodes and return ; <nl> + return false ; <nl> + } <nl> JoinNodes ( & ( skiplist - > _base . _startNode ) , & ( skiplist - > _base . _endNode ) , oldColLength , newHeight - 1 ) ; <nl> } <nl> <nl> bool TRI_InsertElementSkipListMulti ( TRI_skiplist_multi_t * skiplist , void * elemen <nl> / / Copy the contents of element into the new node to be inserted . <nl> / / If a duplicate has been found , then we destroy the allocated memory . <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - newNode - > _extraData = NULL ; <nl> + newNode - > _column = NULL ; <nl> newNode - > _colLength = 0 ; <nl> + newNode - > _extraData = NULL ; <nl> memcpy ( & ( newNode - > _element ) , element , skiplist - > _base . _elementSize ) ; <nl> - GrowNodeHeight ( newNode , newHeight ) ; <nl> - <nl> + growResult = GrowNodeHeight ( newNode , newHeight ) ; <nl> + if ( ! growResult ) { <nl> + TRI_FreeSkipListNode ( & ( skiplist - > _base ) , newNode ) ; <nl> + return false ; <nl> + } <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / Determine the path where the new item is to be inserted . If the item <nl> bool TRI_InsertElementSkipListMulti ( TRI_skiplist_multi_t * skiplist , void * elemen <nl> / / Use the callback to determine if the element is less or greater than <nl> / / the next node element . <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + # ifdef USE_STATIC_SKIPLIST_COMPARE <nl> + compareResult = IndexStaticMultiCompareElementElement ( skiplist , element , & ( nextNode - > _element ) , - 1 ) ; <nl> + # else <nl> compareResult = skiplist - > compareElementElement ( skiplist , element , & ( nextNode - > _element ) , - 1 ) ; <nl> + # endif <nl> <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> bool TRI_RemoveElementSkipListMulti ( TRI_skiplist_multi_t * skiplist , void * eleme <nl> / / Use the callback to determine if the element is less or greater than <nl> / / the next node element . <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + # ifdef USE_STATIC_SKIPLIST_COMPARE <nl> + compareResult = IndexStaticMultiCompareElementElement ( skiplist , element , & ( nextNode - > _element ) , TRI_SKIPLIST_COMPARE_SLIGHTLY_LESS ) ; <nl> + # else <nl> compareResult = skiplist - > compareElementElement ( skiplist , element , & ( nextNode - > _element ) , TRI_SKIPLIST_COMPARE_SLIGHTLY_LESS ) ; <nl> + # endif <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> / / We have found an item which matches the key <nl> bool TRI_RemoveElementSkipListMulti ( TRI_skiplist_multi_t * skiplist , void * eleme <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> <nl> while ( currentNode ! = NULL ) { <nl> + # ifdef USE_STATIC_SKIPLIST_COMPARE <nl> + if ( IndexStaticMultiEqualElementElement ( skiplist , element , & ( currentNode - > _element ) ) ) { <nl> + break ; <nl> + } <nl> + # else <nl> if ( skiplist - > equalElementElement ( skiplist , element , & ( currentNode - > _element ) ) ) { <nl> break ; <nl> } <nl> + # endif <nl> currentNode = TRI_NextNodeBaseSkipList ( & ( skiplist - > _base ) , currentNode ) ; <nl> } <nl> <nl> void * TRI_RightLookupByKeySkipListMulti ( TRI_skiplist_multi_t * skiplist , void * ke <nl> / / Use the callback to determine if the element is less or greater than <nl> / / the next node element . <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> + # ifdef USE_STATIC_SKIPLIST_COMPARE <nl> + compareResult = IndexStaticMultiCompareKeyElement ( skiplist , key , & ( prevNode - > _element ) , 1 ) ; <nl> + # else <nl> compareResult = skiplist - > compareKeyElement ( skiplist , key , & ( prevNode - > _element ) , 1 ) ; <nl> + # endif <nl> <nl> <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> mmm a / SkipLists / skiplistIndex . c <nl> ppp b / SkipLists / skiplistIndex . c <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> # include " skiplistIndex . h " <nl> - # include " VocBase / document - collection . h " <nl> <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> <nl> static bool skiplistIndex_findHelperIntervalValid ( SkiplistIndex * , TRI_skiplist_iterator_interval_t * ) ; <nl> static bool multiSkiplistIndex_findHelperIntervalValid ( SkiplistIndex * , TRI_skiplist_iterator_interval_t * ) ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Helper method for recursion for CompareShapedJsonShapedJson <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - static int CompareShapeTypes ( const TRI_shaped_json_t * left , const TRI_shaped_json_t * right , TRI_shaper_t * leftShaper , TRI_shaper_t * rightShaper ) { <nl> - <nl> - int result ; <nl> - size_t j ; <nl> - TRI_shape_type_t leftType ; <nl> - TRI_shape_type_t rightType ; <nl> - const TRI_shape_t * leftShape ; <nl> - const TRI_shape_t * rightShape ; <nl> - size_t leftListLength ; <nl> - size_t rightListLength ; <nl> - size_t listLength ; <nl> - TRI_shaped_json_t leftElement ; <nl> - TRI_shaped_json_t rightElement ; <nl> - char * leftString ; <nl> - char * rightString ; <nl> - <nl> - <nl> - leftShape = leftShaper - > lookupShapeId ( leftShaper , left - > _sid ) ; <nl> - rightShape = rightShaper - > lookupShapeId ( rightShaper , right - > _sid ) ; <nl> - leftType = leftShape - > _type ; <nl> - rightType = rightShape - > _type ; <nl> - <nl> - switch ( leftType ) { <nl> - <nl> - case TRI_SHAPE_ILLEGAL : { <nl> - switch ( rightType ) { <nl> - case TRI_SHAPE_ILLEGAL : <nl> - { <nl> - return 0 ; <nl> - } <nl> - case TRI_SHAPE_NULL : <nl> - case TRI_SHAPE_BOOLEAN : <nl> - case TRI_SHAPE_NUMBER : <nl> - case TRI_SHAPE_SHORT_STRING : <nl> - case TRI_SHAPE_LONG_STRING : <nl> - case TRI_SHAPE_ARRAY : <nl> - case TRI_SHAPE_LIST : <nl> - case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> - case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> - { <nl> - return - 1 ; <nl> - } <nl> - } / / end of switch ( rightType ) <nl> - } <nl> - <nl> - case TRI_SHAPE_NULL : { <nl> - switch ( rightType ) { <nl> - case TRI_SHAPE_ILLEGAL : <nl> - { <nl> - return 1 ; <nl> - } <nl> - case TRI_SHAPE_NULL : <nl> - { <nl> - return 0 ; <nl> - } <nl> - case TRI_SHAPE_BOOLEAN : <nl> - case TRI_SHAPE_NUMBER : <nl> - case TRI_SHAPE_SHORT_STRING : <nl> - case TRI_SHAPE_LONG_STRING : <nl> - case TRI_SHAPE_ARRAY : <nl> - case TRI_SHAPE_LIST : <nl> - case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> - case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> - { <nl> - return - 1 ; <nl> - } <nl> - } / / end of switch ( rightType ) <nl> - } <nl> - <nl> - case TRI_SHAPE_BOOLEAN : { <nl> - switch ( rightType ) { <nl> - case TRI_SHAPE_ILLEGAL : <nl> - case TRI_SHAPE_NULL : <nl> - { <nl> - return 1 ; <nl> - } <nl> - case TRI_SHAPE_BOOLEAN : <nl> - { <nl> - / / check which is false and which is true ! <nl> - if ( * ( ( TRI_shape_boolean_t * ) ( left - > _data . data ) ) = = * ( ( TRI_shape_boolean_t * ) ( right - > _data . data ) ) ) { <nl> - return 0 ; <nl> - } <nl> - if ( * ( ( TRI_shape_boolean_t * ) ( left - > _data . data ) ) < * ( ( TRI_shape_boolean_t * ) ( right - > _data . data ) ) ) { <nl> - return - 1 ; <nl> - } <nl> - return 1 ; <nl> - } <nl> - case TRI_SHAPE_NUMBER : <nl> - case TRI_SHAPE_SHORT_STRING : <nl> - case TRI_SHAPE_LONG_STRING : <nl> - case TRI_SHAPE_ARRAY : <nl> - case TRI_SHAPE_LIST : <nl> - case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> - case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> - { <nl> - return - 1 ; <nl> - } <nl> - } / / end of switch ( rightType ) <nl> - } <nl> - <nl> - case TRI_SHAPE_NUMBER : { <nl> - switch ( rightType ) { <nl> - case TRI_SHAPE_ILLEGAL : <nl> - case TRI_SHAPE_NULL : <nl> - case TRI_SHAPE_BOOLEAN : <nl> - { <nl> - return 1 ; <nl> - } <nl> - case TRI_SHAPE_NUMBER : <nl> - { <nl> - / / compare the numbers . <nl> - if ( * ( ( TRI_shape_number_t * ) ( left - > _data . data ) ) = = * ( ( TRI_shape_number_t * ) ( right - > _data . data ) ) ) { <nl> - return 0 ; <nl> - } <nl> - if ( * ( ( TRI_shape_number_t * ) ( left - > _data . data ) ) < * ( ( TRI_shape_number_t * ) ( right - > _data . data ) ) ) { <nl> - return - 1 ; <nl> - } <nl> - return 1 ; <nl> - } <nl> - case TRI_SHAPE_SHORT_STRING : <nl> - case TRI_SHAPE_LONG_STRING : <nl> - case TRI_SHAPE_ARRAY : <nl> - case TRI_SHAPE_LIST : <nl> - case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> - case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> - { <nl> - return - 1 ; <nl> - } <nl> - } / / end of switch ( rightType ) <nl> - } <nl> - <nl> - case TRI_SHAPE_SHORT_STRING : <nl> - case TRI_SHAPE_LONG_STRING : <nl> - { <nl> - switch ( rightType ) { <nl> - case TRI_SHAPE_ILLEGAL : <nl> - case TRI_SHAPE_NULL : <nl> - case TRI_SHAPE_BOOLEAN : <nl> - case TRI_SHAPE_NUMBER : <nl> - { <nl> - return 1 ; <nl> - } <nl> - case TRI_SHAPE_SHORT_STRING : <nl> - case TRI_SHAPE_LONG_STRING : <nl> - { <nl> - / / compare strings <nl> - / / extract the strings <nl> - if ( leftType = = TRI_SHAPE_SHORT_STRING ) { <nl> - leftString = ( char * ) ( sizeof ( TRI_shape_length_short_string_t ) + left - > _data . data ) ; <nl> - } <nl> - else { <nl> - leftString = ( char * ) ( sizeof ( TRI_shape_length_long_string_t ) + left - > _data . data ) ; <nl> - } <nl> - <nl> - if ( rightType = = TRI_SHAPE_SHORT_STRING ) { <nl> - rightString = ( char * ) ( sizeof ( TRI_shape_length_short_string_t ) + right - > _data . data ) ; <nl> - } <nl> - else { <nl> - rightString = ( char * ) ( sizeof ( TRI_shape_length_long_string_t ) + right - > _data . data ) ; <nl> - } <nl> - <nl> - result = strcmp ( leftString , rightString ) ; <nl> - return result ; <nl> - } <nl> - case TRI_SHAPE_ARRAY : <nl> - case TRI_SHAPE_LIST : <nl> - case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> - case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> - { <nl> - return - 1 ; <nl> - } <nl> - } / / end of switch ( rightType ) <nl> - } <nl> - <nl> - case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> - case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> - case TRI_SHAPE_LIST : <nl> - { <nl> - switch ( rightType ) { <nl> - case TRI_SHAPE_ILLEGAL : <nl> - case TRI_SHAPE_NULL : <nl> - case TRI_SHAPE_BOOLEAN : <nl> - case TRI_SHAPE_NUMBER : <nl> - case TRI_SHAPE_SHORT_STRING : <nl> - case TRI_SHAPE_LONG_STRING : <nl> - { <nl> - return 1 ; <nl> - } <nl> - case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> - case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> - case TRI_SHAPE_LIST : <nl> - { <nl> - / / unfortunately recursion : check the types of all the entries <nl> - leftListLength = * ( ( TRI_shape_length_list_t * ) ( left - > _data . data ) ) ; <nl> - rightListLength = * ( ( TRI_shape_length_list_t * ) ( right - > _data . data ) ) ; <nl> - <nl> - / / determine the smallest list <nl> - if ( leftListLength > rightListLength ) { <nl> - listLength = rightListLength ; <nl> - } <nl> - else { <nl> - listLength = leftListLength ; <nl> - } <nl> - <nl> - for ( j = 0 ; j < listLength ; + + j ) { <nl> - <nl> - if ( leftType = = TRI_SHAPE_HOMOGENEOUS_LIST ) { <nl> - TRI_AtHomogeneousListShapedJson ( ( const TRI_homogeneous_list_shape_t * ) ( leftShape ) , <nl> - left , j , & leftElement ) ; <nl> - } <nl> - else if ( leftType = = TRI_SHAPE_HOMOGENEOUS_SIZED_LIST ) { <nl> - TRI_AtHomogeneousSizedListShapedJson ( ( const TRI_homogeneous_sized_list_shape_t * ) ( leftShape ) , <nl> - left , j , & leftElement ) ; <nl> - } <nl> - else { <nl> - TRI_AtListShapedJson ( ( const TRI_list_shape_t * ) ( leftShape ) , left , j , & leftElement ) ; <nl> - } <nl> - <nl> - <nl> - if ( rightType = = TRI_SHAPE_HOMOGENEOUS_LIST ) { <nl> - TRI_AtHomogeneousListShapedJson ( ( const TRI_homogeneous_list_shape_t * ) ( rightShape ) , <nl> - right , j , & rightElement ) ; <nl> - } <nl> - else if ( rightType = = TRI_SHAPE_HOMOGENEOUS_SIZED_LIST ) { <nl> - TRI_AtHomogeneousSizedListShapedJson ( ( const TRI_homogeneous_sized_list_shape_t * ) ( rightShape ) , <nl> - right , j , & rightElement ) ; <nl> - } <nl> - else { <nl> - TRI_AtListShapedJson ( ( const TRI_list_shape_t * ) ( rightShape ) , right , j , & rightElement ) ; <nl> - } <nl> - <nl> - result = CompareShapeTypes ( & leftElement , & rightElement , leftShaper , rightShaper ) ; <nl> - if ( result ! = 0 ) { <nl> - return result ; <nl> - } <nl> - } <nl> - <nl> - / / up to listLength everything matches <nl> - if ( leftListLength < rightListLength ) { <nl> - return - 1 ; <nl> - } <nl> - else if ( leftListLength > rightListLength ) { <nl> - return 1 ; <nl> - } <nl> - return 0 ; <nl> - } <nl> - <nl> - <nl> - case TRI_SHAPE_ARRAY : <nl> - { <nl> - return - 1 ; <nl> - } <nl> - } / / end of switch ( rightType ) <nl> - } <nl> - <nl> - case TRI_SHAPE_ARRAY : <nl> - { <nl> - / * start oreste : <nl> - char * shape = ( char * ) ( leftShape ) ; <nl> - uint64_t fixedEntries ; <nl> - uint64_t variableEntries ; <nl> - uint64_t ssid ; <nl> - uint64_t aaid ; <nl> - char * name ; <nl> - TRI_shape_t * newShape ; <nl> - <nl> - shape = shape + sizeof ( TRI_shape_t ) ; <nl> - fixedEntries = * ( ( TRI_shape_size_t * ) ( shape ) ) ; <nl> - shape = shape + sizeof ( TRI_shape_size_t ) ; <nl> - variableEntries = * ( ( TRI_shape_size_t * ) ( shape ) ) ; <nl> - shape = shape + sizeof ( TRI_shape_size_t ) ; <nl> - ssid = * ( ( TRI_shape_sid_t * ) ( shape ) ) ; <nl> - shape = shape + ( sizeof ( TRI_shape_sid_t ) * ( fixedEntries + variableEntries ) ) ; <nl> - aaid = * ( ( TRI_shape_aid_t * ) ( shape ) ) ; <nl> - shape = shape + ( sizeof ( TRI_shape_aid_t ) * ( fixedEntries + variableEntries ) ) ; <nl> - <nl> - name = leftShaper - > lookupAttributeId ( leftShaper , aaid ) ; <nl> - newShape = leftShaper - > lookupShapeId ( leftShaper , ssid ) ; <nl> - <nl> - <nl> - printf ( " % s : % u : _fixedEntries : % u \ n " , __FILE__ , __LINE__ , fixedEntries ) ; <nl> - printf ( " % s : % u : _variableEntries : % u \ n " , __FILE__ , __LINE__ , variableEntries ) ; <nl> - printf ( " % s : % u : _sids [ 0 ] : % u \ n " , __FILE__ , __LINE__ , ssid ) ; <nl> - printf ( " % s : % u : _aids [ 0 ] : % u \ n " , __FILE__ , __LINE__ , aaid ) ; <nl> - printf ( " % s : % u : name : % s \ n " , __FILE__ , __LINE__ , name ) ; <nl> - printf ( " % s : % u : type : % d \ n " , __FILE__ , __LINE__ , newShape - > _type ) ; <nl> - <nl> - end oreste * / <nl> - assert ( false ) ; <nl> - switch ( rightType ) { <nl> - case TRI_SHAPE_ILLEGAL : <nl> - case TRI_SHAPE_NULL : <nl> - case TRI_SHAPE_BOOLEAN : <nl> - case TRI_SHAPE_NUMBER : <nl> - case TRI_SHAPE_SHORT_STRING : <nl> - case TRI_SHAPE_LONG_STRING : <nl> - case TRI_SHAPE_HOMOGENEOUS_LIST : <nl> - case TRI_SHAPE_HOMOGENEOUS_SIZED_LIST : <nl> - case TRI_SHAPE_LIST : <nl> - { <nl> - return 1 ; <nl> - } <nl> - case TRI_SHAPE_ARRAY : <nl> - { <nl> - assert ( false ) ; <nl> - result = 0 ; <nl> - return result ; <nl> - } <nl> - } / / end of switch ( rightType ) <nl> - } <nl> - <nl> - } <nl> - assert ( false ) ; <nl> - } <nl> - <nl> - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Compare a shapded json object recursively if necessary <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - static int CompareShapedJsonShapedJson ( const TRI_shaped_json_t * left , const TRI_shaped_json_t * right , TRI_shaper_t * leftShaper , TRI_shaper_t * rightShaper ) { <nl> - <nl> - int result ; <nl> - <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / the following order is currently defined for placing an order on documents <nl> - / / undef < null < boolean < number < strings < lists < hash arrays <nl> - / / note : undefined will be treated as NULL pointer not NULL JSON OBJECT <nl> - / / within each type class we have the following order <nl> - / / boolean : false < true <nl> - / / number : natural order <nl> - / / strings : lexicographical <nl> - / / lists : lexicorgraphically and within each slot according to these rules . <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - <nl> - <nl> - if ( left = = NULL & & right = = NULL ) { <nl> - return 0 ; <nl> - } <nl> - <nl> - if ( left = = NULL & & right ! = NULL ) { <nl> - return - 1 ; <nl> - } <nl> - <nl> - if ( left ! = NULL & & right = = NULL ) { <nl> - return 1 ; <nl> - } <nl> - <nl> - result = CompareShapeTypes ( left , right , leftShaper , rightShaper ) ; <nl> - <nl> - return result ; <nl> - <nl> - } / / end of function CompareShapedJsonShapedJson <nl> - <nl> <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> void SkiplistIndexFree ( SkiplistIndex * slIndex ) { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief compares two elements in a skip list <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - static int CompareElementElement ( struct TRI_skiplist_s * skiplist , void * leftElement , void * rightElement , int defaultEqual ) { <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / Compare two elements and determines : <nl> - / / left < right : return - 1 <nl> - / / left = = right : return 0 <nl> - / / left > right : return 1 <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - int compareResult ; <nl> - SkiplistIndexElement * hLeftElement = ( SkiplistIndexElement * ) ( leftElement ) ; <nl> - SkiplistIndexElement * hRightElement = ( SkiplistIndexElement * ) ( rightElement ) ; <nl> - TRI_shaper_t * leftShaper ; <nl> - TRI_shaper_t * rightShaper ; <nl> - size_t j ; <nl> - <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / the following order is currently defined for placing an order on documents <nl> - / / undef < null < boolean < number < strings < lists < hash arrays <nl> - / / note : undefined will be treated as NULL pointer not NULL JSON OBJECT <nl> - / / within each type class we have the following order <nl> - / / boolean : false < true <nl> - / / number : natural order <nl> - / / strings : lexicographical <nl> - / / lists : lexicorgraphically and within each slot according to these rules . <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - <nl> - if ( leftElement = = NULL & & rightElement = = NULL ) { <nl> - return 0 ; <nl> - } <nl> - <nl> - if ( leftElement ! = NULL & & rightElement = = NULL ) { <nl> - return 1 ; <nl> - } <nl> - <nl> - if ( leftElement = = NULL & & rightElement ! = NULL ) { <nl> - return - 1 ; <nl> - } <nl> - <nl> - if ( leftElement = = rightElement ) { <nl> - return 0 ; <nl> - } <nl> - <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / This call back function is used when we insert and remove unique skip <nl> - / / list entries . <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - <nl> - if ( hLeftElement - > numFields ! = hRightElement - > numFields ) { <nl> - assert ( false ) ; <nl> - } <nl> - <nl> - <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / The document could be the same - - so no further comparison is required . <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - if ( hLeftElement - > data = = hRightElement - > data ) { <nl> - return 0 ; <nl> - } <nl> - <nl> - <nl> - leftShaper = ( ( TRI_doc_collection_t * ) ( hLeftElement - > collection ) ) - > _shaper ; <nl> - rightShaper = ( ( TRI_doc_collection_t * ) ( hRightElement - > collection ) ) - > _shaper ; <nl> - <nl> - for ( j = 0 ; j < hLeftElement - > numFields ; j + + ) { <nl> - compareResult = CompareShapedJsonShapedJson ( ( j + hLeftElement - > fields ) , ( j + hRightElement - > fields ) , leftShaper , rightShaper ) ; <nl> - if ( compareResult ! = 0 ) { <nl> - return compareResult ; <nl> - } <nl> - } <nl> - <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / This is where the difference between CompareKeyElement ( below ) and <nl> - / / CompareElementElement comes into play . Here if the ' keys ' are the same , <nl> - / / but the doc ptr is different ( which it is since we are here ) , then <nl> - / / we return what was requested to be returned : 0 , - 1 or 1 . What is returned <nl> - / / depends on the purpose of calling this callback . <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - <nl> - return defaultEqual ; <nl> - } <nl> - <nl> - <nl> - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief compares a key and an element <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - static int CompareKeyElement ( struct TRI_skiplist_s * skiplist , void * leftElement , void * rightElement , int defaultEqual ) { <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / Compare two elements and determines : <nl> - / / left < right : return - 1 <nl> - / / left = = right : return 0 <nl> - / / left > right : return 1 <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - int compareResult ; <nl> - size_t numFields ; <nl> - SkiplistIndexElement * hLeftElement = ( SkiplistIndexElement * ) ( leftElement ) ; <nl> - SkiplistIndexElement * hRightElement = ( SkiplistIndexElement * ) ( rightElement ) ; <nl> - TRI_shaper_t * leftShaper ; <nl> - TRI_shaper_t * rightShaper ; <nl> - size_t j ; <nl> - <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / the following order is currently defined for placing an order on documents <nl> - / / undef < null < boolean < number < strings < lists < hash arrays <nl> - / / note : undefined will be treated as NULL pointer not NULL JSON OBJECT <nl> - / / within each type class we have the following order <nl> - / / boolean : false < true <nl> - / / number : natural order <nl> - / / strings : lexicographical <nl> - / / lists : lexicorgraphically and within each slot according to these rules . <nl> - / / associative array : ordered keys followed by value of key <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - <nl> - if ( leftElement = = NULL & & rightElement = = NULL ) { <nl> - return 0 ; <nl> - } <nl> - <nl> - if ( leftElement = = NULL & & rightElement ! = NULL ) { <nl> - return - 1 ; <nl> - } <nl> - <nl> - if ( leftElement ! = NULL & & rightElement = = NULL ) { <nl> - return 1 ; <nl> - } <nl> - <nl> - if ( leftElement = = rightElement ) { <nl> - return 0 ; <nl> - } <nl> - <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / The document could be the same - - so no further comparison is required . <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - if ( hLeftElement - > data = = hRightElement - > data ) { <nl> - return 0 ; <nl> - } <nl> - <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / This call back function is used when we query the index , as such <nl> - / / the number of fields which we are using for the query may be less than <nl> - / / the number of fields that the index is defined with . <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - <nl> - <nl> - if ( hLeftElement - > numFields < hRightElement - > numFields ) { <nl> - numFields = hLeftElement - > numFields ; <nl> - } <nl> - else { <nl> - numFields = hRightElement - > numFields ; <nl> - } <nl> - <nl> - <nl> - leftShaper = ( ( TRI_doc_collection_t * ) ( hLeftElement - > collection ) ) - > _shaper ; <nl> - rightShaper = ( ( TRI_doc_collection_t * ) ( hRightElement - > collection ) ) - > _shaper ; <nl> - <nl> - for ( j = 0 ; j < numFields ; j + + ) { <nl> - / * <nl> - printf ( " % s : % u : % f : % f , % u : % u \ n " , __FILE__ , __LINE__ , <nl> - * ( ( double * ) ( ( j + hLeftElement - > fields ) - > _data . data ) ) , <nl> - * ( ( double * ) ( ( j + hRightElement - > fields ) - > _data . data ) ) , <nl> - ( uint64_t ) ( hLeftElement - > data ) , <nl> - ( uint64_t ) ( hRightElement - > data ) <nl> - ) ; <nl> - * / <nl> - compareResult = CompareShapedJsonShapedJson ( ( j + hLeftElement - > fields ) , <nl> - ( j + hRightElement - > fields ) , <nl> - leftShaper , <nl> - rightShaper ) ; <nl> - if ( compareResult ! = 0 ) { <nl> - return compareResult ; <nl> - } <nl> - } <nl> - <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / The ' keys ' match - - however , we may only have a partial match in reality <nl> - / / if not all keys comprising index have been used . <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - return defaultEqual ; <nl> - } <nl> - <nl> - <nl> <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> SkiplistIndex * SkiplistIndex_new ( ) { <nl> return NULL ; <nl> } <nl> <nl> + TRI_InitSkipList ( skiplistIndex - > skiplist . uniqueSkiplist , <nl> + sizeof ( SkiplistIndexElement ) , <nl> + NULL , NULL , TRI_SKIPLIST_PROB_HALF , 40 ) ; <nl> + / * <nl> TRI_InitSkipList ( skiplistIndex - > skiplist . uniqueSkiplist , <nl> sizeof ( SkiplistIndexElement ) , <nl> CompareElementElement , <nl> CompareKeyElement , <nl> TRI_SKIPLIST_PROB_HALF , 40 ) ; <nl> - <nl> + * / <nl> return skiplistIndex ; <nl> } <nl> <nl> bool SkiplistIndex_update ( SkiplistIndex * skiplistIndex , const SkiplistIndexEleme <nl> <nl> <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief used to determine the order of two elements <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - static int MultiCompareElementElement ( TRI_skiplist_multi_t * multiSkiplist , void * leftElement , void * rightElement , int defaultEqual ) { <nl> - <nl> - int compareResult ; <nl> - SkiplistIndexElement * hLeftElement = ( SkiplistIndexElement * ) ( leftElement ) ; <nl> - SkiplistIndexElement * hRightElement = ( SkiplistIndexElement * ) ( rightElement ) ; <nl> - TRI_shaper_t * leftShaper ; <nl> - TRI_shaper_t * rightShaper ; <nl> - size_t j ; <nl> - <nl> - <nl> - if ( leftElement = = NULL & & rightElement = = NULL ) { <nl> - return TRI_SKIPLIST_COMPARE_STRICTLY_EQUAL ; <nl> - } <nl> - <nl> - if ( leftElement ! = NULL & & rightElement = = NULL ) { <nl> - return TRI_SKIPLIST_COMPARE_STRICTLY_GREATER ; <nl> - } <nl> - <nl> - if ( leftElement = = NULL & & rightElement ! = NULL ) { <nl> - return TRI_SKIPLIST_COMPARE_STRICTLY_LESS ; <nl> - } <nl> - <nl> - if ( leftElement = = rightElement ) { <nl> - return TRI_SKIPLIST_COMPARE_STRICTLY_EQUAL ; <nl> - } <nl> - <nl> - if ( hLeftElement - > numFields ! = hRightElement - > numFields ) { <nl> - assert ( false ) ; <nl> - } <nl> - <nl> - if ( hLeftElement - > data = = hRightElement - > data ) { <nl> - return TRI_SKIPLIST_COMPARE_STRICTLY_EQUAL ; <nl> - } <nl> - <nl> - <nl> - leftShaper = ( ( TRI_doc_collection_t * ) ( hLeftElement - > collection ) ) - > _shaper ; <nl> - rightShaper = ( ( TRI_doc_collection_t * ) ( hRightElement - > collection ) ) - > _shaper ; <nl> - <nl> - for ( j = 0 ; j < hLeftElement - > numFields ; j + + ) { <nl> - compareResult = CompareShapedJsonShapedJson ( ( j + hLeftElement - > fields ) , ( j + hRightElement - > fields ) , leftShaper , rightShaper ) ; <nl> - if ( compareResult ! = 0 ) { <nl> - return compareResult ; <nl> - } <nl> - } <nl> - <nl> - return defaultEqual ; <nl> - } <nl> - <nl> - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief used to determine the order of two keys <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - static int MultiCompareKeyElement ( TRI_skiplist_multi_t * multiSkiplist , void * leftElement , void * rightElement , int defaultEqual ) { <nl> - <nl> - int compareResult ; <nl> - size_t numFields ; <nl> - SkiplistIndexElement * hLeftElement = ( SkiplistIndexElement * ) ( leftElement ) ; <nl> - SkiplistIndexElement * hRightElement = ( SkiplistIndexElement * ) ( rightElement ) ; <nl> - TRI_shaper_t * leftShaper ; <nl> - TRI_shaper_t * rightShaper ; <nl> - size_t j ; <nl> - <nl> - if ( leftElement = = NULL & & rightElement = = NULL ) { <nl> - return 0 ; <nl> - } <nl> - <nl> - if ( leftElement ! = NULL & & rightElement = = NULL ) { <nl> - return 1 ; <nl> - } <nl> - <nl> - if ( leftElement = = NULL & & rightElement ! = NULL ) { <nl> - return - 1 ; <nl> - } <nl> - <nl> - <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / The document could be the same - - so no further comparison is required . <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - <nl> - if ( hLeftElement - > data = = hRightElement - > data ) { <nl> - return 0 ; <nl> - } <nl> - <nl> - <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - / / This call back function is used when we query the index , as such <nl> - / / the number of fields which we are using for the query may be less than <nl> - / / the number of fields that the index is defined with . <nl> - / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> - <nl> - if ( hLeftElement - > numFields < hRightElement - > numFields ) { <nl> - numFields = hLeftElement - > numFields ; <nl> - } <nl> - else { <nl> - numFields = hRightElement - > numFields ; <nl> - } <nl> - <nl> - leftShaper = ( ( TRI_doc_collection_t * ) ( hLeftElement - > collection ) ) - > _shaper ; <nl> - rightShaper = ( ( TRI_doc_collection_t * ) ( hRightElement - > collection ) ) - > _shaper ; <nl> - <nl> - for ( j = 0 ; j < numFields ; j + + ) { <nl> - compareResult = CompareShapedJsonShapedJson ( ( j + hLeftElement - > fields ) , ( j + hRightElement - > fields ) , leftShaper , rightShaper ) ; <nl> - if ( compareResult ! = 0 ) { <nl> - return compareResult ; <nl> - } <nl> - } <nl> - <nl> - return defaultEqual ; <nl> - } <nl> - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief used to determine the order of two keys <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - static bool MultiEqualElementElement ( TRI_skiplist_multi_t * multiSkiplist , void * leftElement , void * rightElement ) { <nl> - <nl> - SkiplistIndexElement * hLeftElement = ( SkiplistIndexElement * ) ( leftElement ) ; <nl> - SkiplistIndexElement * hRightElement = ( SkiplistIndexElement * ) ( rightElement ) ; <nl> - <nl> - if ( leftElement = = rightElement ) { <nl> - return true ; <nl> - } <nl> - <nl> - / * <nl> - printf ( " % s : % u : % f : % f , % u : % u \ n " , __FILE__ , __LINE__ , <nl> - * ( ( double * ) ( ( hLeftElement - > fields ) - > _data . data ) ) , <nl> - * ( ( double * ) ( ( hRightElement - > fields ) - > _data . data ) ) , <nl> - ( uint64_t ) ( hLeftElement - > data ) , <nl> - ( uint64_t ) ( hRightElement - > data ) <nl> - ) ; <nl> - * / <nl> - return ( hLeftElement - > data = = hRightElement - > data ) ; <nl> - } <nl> - <nl> <nl> <nl> <nl> SkiplistIndex * MultiSkiplistIndex_new ( ) { <nl> return NULL ; <nl> } <nl> <nl> + TRI_InitSkipListMulti ( skiplistIndex - > skiplist . nonUniqueSkiplist , <nl> + sizeof ( SkiplistIndexElement ) , <nl> + NULL , NULL , NULL , TRI_SKIPLIST_PROB_HALF , 40 ) ; <nl> + / * <nl> TRI_InitSkipListMulti ( skiplistIndex - > skiplist . nonUniqueSkiplist , <nl> sizeof ( SkiplistIndexElement ) , <nl> MultiCompareElementElement , <nl> MultiCompareKeyElement , <nl> MultiEqualElementElement , <nl> TRI_SKIPLIST_PROB_HALF , 40 ) ; <nl> - <nl> + * / <nl> return skiplistIndex ; <nl> } <nl> <nl> mmm a / SkipLists / sl - operator . c <nl> ppp b / SkipLists / sl - operator . c <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief removes allocated memory <nl> + / / / @ brief Creates a new Skiplist operator <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> TRI_sl_operator_t * CreateSLOperator ( TRI_sl_operator_type_e operatorType , <nl> TRI_sl_operator_t * CreateSLOperator ( TRI_sl_operator_type_e operatorType , <nl> switch ( operatorType ) { <nl> case TRI_SL_AND_OPERATOR : <nl> case TRI_SL_NOT_OPERATOR : <nl> - case TRI_SL_OR_OPERATOR : <nl> - { <nl> - newLogicalOperator = ( TRI_sl_logical_operator_t * ) TRI_Allocate ( sizeof ( TRI_sl_logical_operator_t ) ) ; <nl> + case TRI_SL_OR_OPERATOR : { <nl> + <nl> + newLogicalOperator = ( TRI_sl_logical_operator_t * ) TRI_Allocate ( sizeof ( TRI_sl_logical_operator_t ) ) ; <nl> if ( ! newLogicalOperator ) { <nl> return NULL ; <nl> } <nl> TRI_sl_operator_t * CreateSLOperator ( TRI_sl_operator_type_e operatorType , <nl> case TRI_SL_GT_OPERATOR : <nl> case TRI_SL_NE_OPERATOR : <nl> case TRI_SL_LE_OPERATOR : <nl> - case TRI_SL_LT_OPERATOR : <nl> - { <nl> - newRelationOperator = ( TRI_sl_relation_operator_t * ) TRI_Allocate ( sizeof ( TRI_sl_relation_operator_t ) ) ; <nl> + case TRI_SL_LT_OPERATOR : { <nl> + <nl> + newRelationOperator = ( TRI_sl_relation_operator_t * ) TRI_Allocate ( sizeof ( TRI_sl_relation_operator_t ) ) ; <nl> if ( ! newRelationOperator ) { <nl> return NULL ; <nl> } <nl> <nl> - / * FIXME : memory allocation might fail * / <nl> newRelationOperator - > _base . _type = operatorType ; <nl> newRelationOperator - > _parameters = parameters ; <nl> newRelationOperator - > _fields = fields ; <nl> TRI_sl_operator_t * CreateSLOperator ( TRI_sl_operator_type_e operatorType , <nl> return newOperator ; <nl> } <nl> <nl> + <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Destroys and frees any memory associated with a Skiplist operator <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> void ClearSLOperator ( TRI_sl_operator_t * slOperator ) { <nl> <nl> TRI_sl_logical_operator_t * logicalOperator ; <nl> void ClearSLOperator ( TRI_sl_operator_t * slOperator ) { <nl> switch ( slOperator - > _type ) { <nl> case TRI_SL_AND_OPERATOR : <nl> case TRI_SL_NOT_OPERATOR : <nl> - case TRI_SL_OR_OPERATOR : <nl> - { <nl> + case TRI_SL_OR_OPERATOR : { <nl> + <nl> logicalOperator = ( TRI_sl_logical_operator_t * ) ( slOperator ) ; <nl> ClearSLOperator ( logicalOperator - > _left ) ; <nl> ClearSLOperator ( logicalOperator - > _right ) ; <nl> - <nl> TRI_Free ( logicalOperator ) ; <nl> break ; <nl> + <nl> } <nl> <nl> + <nl> case TRI_SL_EQ_OPERATOR : <nl> case TRI_SL_GE_OPERATOR : <nl> case TRI_SL_GT_OPERATOR : <nl> case TRI_SL_NE_OPERATOR : <nl> case TRI_SL_LE_OPERATOR : <nl> - case TRI_SL_LT_OPERATOR : <nl> - { <nl> + case TRI_SL_LT_OPERATOR : { <nl> size_t i ; <nl> <nl> relationOperator = ( TRI_sl_relation_operator_t * ) ( slOperator ) ; <nl> if ( relationOperator - > _parameters ! = NULL ) { <nl> TRI_FreeJson ( relationOperator - > _parameters ) ; <nl> } <nl> - <nl> - / / relationOperator - > _fields contains _numFields shapedJson objects <nl> - for ( i = 0 ; i < relationOperator - > _numFields ; + + i ) { <nl> - / / destroy each individual shapedJson object <nl> - TRI_shaped_json_t * shaped = relationOperator - > _fields + i ; <nl> - TRI_DestroyShapedJson ( shaped ) ; <nl> - } <nl> - / / free the memory pointer <nl> - TRI_Free ( relationOperator - > _fields ) ; <nl> + <nl> + if ( relationOperator - > _fields ! = NULL ) { <nl> + / / relationOperator - > _fields contains _numFields shapedJson objects <nl> + for ( i = 0 ; i < relationOperator - > _numFields ; + + i ) { <nl> + / / destroy each individual shapedJson object <nl> + TRI_shaped_json_t * shaped = relationOperator - > _fields + i ; <nl> + TRI_DestroyShapedJson ( shaped ) ; <nl> + } <nl> + / / free the memory pointer <nl> + TRI_Free ( relationOperator - > _fields ) ; <nl> + } <nl> TRI_Free ( relationOperator ) ; <nl> break ; <nl> } <nl> } / / end of switch statement <nl> } <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Makes a deep copy of a Skiplist operator <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> TRI_sl_operator_t * CopySLOperator ( TRI_sl_operator_t * slOperator ) { <nl> <nl> TRI_sl_operator_t * CopySLOperator ( TRI_sl_operator_t * slOperator ) { <nl> switch ( slOperator - > _type ) { <nl> case TRI_SL_AND_OPERATOR : <nl> case TRI_SL_NOT_OPERATOR : <nl> - case TRI_SL_OR_OPERATOR : <nl> - { <nl> - oldLogicalOperator = ( TRI_sl_logical_operator_t * ) ( slOperator ) ; <nl> - newLogicalOperator = ( TRI_sl_logical_operator_t * ) ( TRI_Allocate ( sizeof ( TRI_sl_logical_operator_t ) ) ) ; <nl> - / * FIXME : memory allocation might fail * / <nl> - newLogicalOperator - > _base . _type = slOperator - > _type ; <nl> - newLogicalOperator - > _left = CopySLOperator ( oldLogicalOperator - > _left ) ; <nl> - newLogicalOperator - > _right = CopySLOperator ( oldLogicalOperator - > _right ) ; <nl> - newOperator = & ( newLogicalOperator - > _base ) ; <nl> + case TRI_SL_OR_OPERATOR : { <nl> + <nl> + oldLogicalOperator = ( TRI_sl_logical_operator_t * ) ( slOperator ) ; <nl> + newLogicalOperator = ( TRI_sl_logical_operator_t * ) ( TRI_Allocate ( sizeof ( TRI_sl_logical_operator_t ) ) ) ; <nl> + if ( newLogicalOperator ! = NULL ) { <nl> + newLogicalOperator - > _base . _type = slOperator - > _type ; <nl> + newLogicalOperator - > _left = CopySLOperator ( oldLogicalOperator - > _left ) ; <nl> + newLogicalOperator - > _right = CopySLOperator ( oldLogicalOperator - > _right ) ; <nl> + newOperator = & ( newLogicalOperator - > _base ) ; <nl> + } <nl> break ; <nl> + <nl> } <nl> <nl> case TRI_SL_EQ_OPERATOR : <nl> TRI_sl_operator_t * CopySLOperator ( TRI_sl_operator_t * slOperator ) { <nl> case TRI_SL_GT_OPERATOR : <nl> case TRI_SL_NE_OPERATOR : <nl> case TRI_SL_LE_OPERATOR : <nl> - case TRI_SL_LT_OPERATOR : <nl> - { <nl> - oldRelationOperator = ( TRI_sl_relation_operator_t * ) ( slOperator ) ; <nl> - newRelationOperator = ( TRI_sl_relation_operator_t * ) ( TRI_Allocate ( sizeof ( TRI_sl_relation_operator_t ) ) ) ; <nl> - / * FIXME : memory allocation might fail * / <nl> - newRelationOperator - > _base . _type = slOperator - > _type ; <nl> - newRelationOperator - > _parameters = TRI_CopyJson ( oldRelationOperator - > _parameters ) ; <nl> - newRelationOperator - > _fields = TRI_CopyShapedJson ( oldRelationOperator - > _fields ) ; <nl> - newRelationOperator - > _numFields = oldRelationOperator - > _numFields ; <nl> - newRelationOperator - > _collection = oldRelationOperator - > _collection ; <nl> - newOperator = & ( newRelationOperator - > _base ) ; <nl> + case TRI_SL_LT_OPERATOR : { <nl> + <nl> + oldRelationOperator = ( TRI_sl_relation_operator_t * ) ( slOperator ) ; <nl> + newRelationOperator = ( TRI_sl_relation_operator_t * ) ( TRI_Allocate ( sizeof ( TRI_sl_relation_operator_t ) ) ) ; <nl> + if ( newRelationOperator ! = NULL ) { <nl> + newRelationOperator - > _base . _type = slOperator - > _type ; <nl> + newRelationOperator - > _parameters = TRI_CopyJson ( oldRelationOperator - > _parameters ) ; <nl> + if ( newRelationOperator - > _fields ! = NULL ) { <nl> + newRelationOperator - > _fields = TRI_CopyShapedJson ( oldRelationOperator - > _fields ) ; <nl> + } <nl> + newRelationOperator - > _numFields = oldRelationOperator - > _numFields ; <nl> + newRelationOperator - > _collection = oldRelationOperator - > _collection ; <nl> + newOperator = & ( newRelationOperator - > _base ) ; <nl> + } <nl> break ; <nl> + <nl> } <nl> } <nl> <nl> return newOperator ; <nl> } <nl> <nl> + <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief a skiplist operator with all its linked sub information <nl> + / / / @ brief Destroys and frees any memory associated with a Skiplist operator <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> void TRI_FreeSLOperator ( TRI_sl_operator_t * slOperator ) { <nl> mmm a / VocBase / index . c <nl> ppp b / VocBase / index . c <nl> static void FillLookupSLOperator ( TRI_sl_operator_t * slOperator , TRI_doc_collecti <nl> switch ( slOperator - > _type ) { <nl> case TRI_SL_AND_OPERATOR : <nl> case TRI_SL_NOT_OPERATOR : <nl> - case TRI_SL_OR_OPERATOR : <nl> - { <nl> + case TRI_SL_OR_OPERATOR : { <nl> logicalOperator = ( TRI_sl_logical_operator_t * ) ( slOperator ) ; <nl> FillLookupSLOperator ( logicalOperator - > _left , collection ) ; <nl> FillLookupSLOperator ( logicalOperator - > _right , collection ) ; <nl> static void FillLookupSLOperator ( TRI_sl_operator_t * slOperator , TRI_doc_collecti <nl> case TRI_SL_GT_OPERATOR : <nl> case TRI_SL_NE_OPERATOR : <nl> case TRI_SL_LE_OPERATOR : <nl> - case TRI_SL_LT_OPERATOR : <nl> - { <nl> + case TRI_SL_LT_OPERATOR : { <nl> relationOperator = ( TRI_sl_relation_operator_t * ) ( slOperator ) ; <nl> relationOperator - > _numFields = relationOperator - > _parameters - > _value . _objects . _length ; <nl> - relationOperator - > _fields = TRI_Allocate ( sizeof ( TRI_shaped_json_t ) * relationOperator - > _numFields ) ; <nl> - / * TODO FIXME : memory allocation might fail * / <nl> relationOperator - > _collection = collection ; <nl> - <nl> - for ( j = 0 ; j < relationOperator - > _numFields ; + + j ) { <nl> - jsonObject = ( TRI_json_t * ) ( TRI_AtVector ( & ( relationOperator - > _parameters - > _value . _objects ) , j ) ) ; <nl> - shapedObject = TRI_ShapedJsonJson ( collection - > _shaper , jsonObject ) ; <nl> - if ( shapedObject ) { <nl> - relationOperator - > _fields [ j ] = * shapedObject ; / / shallow copy here is ok <nl> - TRI_Free ( shapedObject ) ; / / don ' t require storage anymore <nl> + relationOperator - > _fields = TRI_Allocate ( sizeof ( TRI_shaped_json_t ) * relationOperator - > _numFields ) ; <nl> + if ( relationOperator - > _fields ! = NULL ) { <nl> + for ( j = 0 ; j < relationOperator - > _numFields ; + + j ) { <nl> + jsonObject = ( TRI_json_t * ) ( TRI_AtVector ( & ( relationOperator - > _parameters - > _value . _objects ) , j ) ) ; <nl> + shapedObject = TRI_ShapedJsonJson ( collection - > _shaper , jsonObject ) ; <nl> + if ( shapedObject ) { <nl> + relationOperator - > _fields [ j ] = * shapedObject ; / / shallow copy here is ok <nl> + TRI_Free ( shapedObject ) ; / / don ' t require storage anymore <nl> + } <nl> } <nl> - } <nl> + } <nl> + else { <nl> + relationOperator - > _numFields = 0 ; <nl> + } <nl> break ; <nl> } <nl> } <nl> | merged with oreste | arangodb/arangodb | 88843a497208592eb530ceaa56353ec414faaf8c | 2012-04-27T12:39:42Z |
mmm a / cocos / 2d / CCParticleSystemQuad . cpp <nl> ppp b / cocos / 2d / CCParticleSystemQuad . cpp <nl> void ParticleSystemQuad : : setTotalParticles ( int tp ) <nl> size_t indicesSize = sizeof ( _indices [ 0 ] ) * tp * 6 * 1 ; <nl> <nl> _particleData . release ( ) ; <nl> - if ( _particleData . init ( tp ) ) <nl> + if ( ! _particleData . init ( tp ) ) <nl> { <nl> CCLOG ( " Particle system : not enough memory " ) ; <nl> return ; <nl> | ParticleSystemQuad : Fixed incorrectly generate errors " Particle system : not enough memory " | cocos2d/cocos2d-x | 05c235f9fe00f46daeb5c259ccb76ca194878f18 | 2015-10-10T09:55:05Z |
mmm a / plugins / producer_api_plugin / producer_api_plugin . cpp <nl> ppp b / plugins / producer_api_plugin / producer_api_plugin . cpp <nl> void producer_api_plugin : : plugin_startup ( ) { <nl> INVOKE_V_V ( producer , resume ) , 201 ) , <nl> CALL ( producer , producer , paused , <nl> INVOKE_R_V ( producer , paused ) , 201 ) , <nl> + CALL ( producer , producer , get_runtime_options , <nl> + INVOKE_R_V ( producer , get_runtime_options ) , 201 ) , <nl> + CALL ( producer , producer , update_runtime_options , <nl> + INVOKE_V_R ( producer , update_runtime_options , producer_plugin : : runtime_options ) , 201 ) , <nl> } ) ; <nl> } <nl> <nl> mmm a / plugins / producer_plugin / include / eosio / producer_plugin / producer_plugin . hpp <nl> ppp b / plugins / producer_plugin / include / eosio / producer_plugin / producer_plugin . hpp <nl> class producer_plugin : public appbase : : plugin < producer_plugin > { <nl> public : <nl> APPBASE_PLUGIN_REQUIRES ( ( chain_plugin ) ) <nl> <nl> + struct runtime_options { <nl> + fc : : optional < int32_t > max_transaction_time ; <nl> + fc : : optional < int32_t > max_irreversible_block_age ; <nl> + } ; <nl> + <nl> producer_plugin ( ) ; <nl> virtual ~ producer_plugin ( ) ; <nl> <nl> class producer_plugin : public appbase : : plugin < producer_plugin > { <nl> void pause ( ) ; <nl> void resume ( ) ; <nl> bool paused ( ) const ; <nl> + void update_runtime_options ( const runtime_options & options ) ; <nl> + runtime_options get_runtime_options ( ) const ; <nl> <nl> signal < void ( const chain : : producer_confirmation & ) > confirmed_block ; <nl> private : <nl> std : : shared_ptr < class producer_plugin_impl > my ; <nl> } ; <nl> <nl> - } / / eosiio <nl> + } / / eosio <nl> + <nl> + FC_REFLECT ( eosio : : producer_plugin : : runtime_options , ( max_transaction_time ) ( max_irreversible_block_age ) ) ; <nl> mmm a / plugins / producer_plugin / producer_plugin . cpp <nl> ppp b / plugins / producer_plugin / producer_plugin . cpp <nl> class producer_plugin_impl : public std : : enable_shared_from_this < producer_plugin <nl> boost : : program_options : : variables_map _options ; <nl> bool _production_enabled = false ; <nl> bool _pause_production = false ; <nl> - uint32_t _required_producer_participation = uint32_t ( config : : required_producer_participation ) ; <nl> uint32_t _production_skip_flags = 0 ; / / eosio : : chain : : skip_nothing ; <nl> <nl> std : : map < chain : : public_key_type , chain : : private_key_type > _private_keys ; <nl> class producer_plugin_impl : public std : : enable_shared_from_this < producer_plugin <nl> <nl> auto deadline = fc : : time_point : : now ( ) + fc : : milliseconds ( _max_transaction_time_ms ) ; <nl> bool deadline_is_subjective = false ; <nl> - if ( _pending_block_mode = = pending_block_mode : : producing & & block_time < deadline ) { <nl> + if ( _max_transaction_time_ms < 0 | | ( _pending_block_mode = = pending_block_mode : : producing & & block_time < deadline ) ) { <nl> deadline_is_subjective = true ; <nl> deadline = block_time ; <nl> } <nl> class producer_plugin_impl : public std : : enable_shared_from_this < producer_plugin <nl> } <nl> <nl> bool production_disabled_by_policy ( ) { <nl> - return ! _production_enabled | | _pause_production | | get_irreversible_block_age ( ) > = _max_irreversible_block_age_us ; <nl> + return ! _production_enabled | | _pause_production | | ( _max_irreversible_block_age_us . count ( ) > = 0 & & get_irreversible_block_age ( ) > = _max_irreversible_block_age_us ) ; <nl> } <nl> <nl> enum class start_block_result { <nl> void producer_plugin : : set_program_options ( <nl> " Limits the maximum time ( in milliseconds ) that is allowed a pushed transaction ' s code to execute before being considered invalid " ) <nl> ( " max - irreversible - block - age " , bpo : : value < int32_t > ( ) - > default_value ( 30 * 60 ) , <nl> " Limits the maximum age ( in seconds ) of the DPOS Irreversible Block for a chain this node will produce blocks on " ) <nl> - ( " required - participation " , boost : : program_options : : value < uint32_t > ( ) <nl> - - > default_value ( uint32_t ( config : : required_producer_participation / config : : percent_1 ) ) <nl> - - > notifier ( [ this ] ( uint32_t e ) { <nl> - my - > _required_producer_participation = std : : min ( e , 100u ) * config : : percent_1 ; <nl> - } ) , <nl> - " Percent of producers ( 0 - 100 ) that must be participating in order to produce blocks " ) <nl> ( " producer - name , p " , boost : : program_options : : value < vector < string > > ( ) - > composing ( ) - > multitoken ( ) , <nl> " ID of producer controlled by this node ( e . g . inita ; may specify multiple times ) " ) <nl> ( " private - key " , boost : : program_options : : value < vector < string > > ( ) - > composing ( ) - > multitoken ( ) - > default_value ( { fc : : json : : to_string ( private_key_default ) } , <nl> bool producer_plugin : : paused ( ) const { <nl> return my - > _pause_production ; <nl> } <nl> <nl> + void producer_plugin : : update_runtime_options ( const runtime_options & options ) { <nl> + bool check_speculating = false ; <nl> + <nl> + if ( options . max_transaction_time ) { <nl> + my - > _max_transaction_time_ms = * options . max_transaction_time ; <nl> + } <nl> + <nl> + if ( options . max_irreversible_block_age ) { <nl> + my - > _max_irreversible_block_age_us = fc : : seconds ( * options . max_irreversible_block_age ) ; <nl> + check_speculating = true ; <nl> + } <nl> + <nl> + if ( check_speculating & & my - > _pending_block_mode = = pending_block_mode : : speculating ) { <nl> + chain : : controller & chain = app ( ) . get_plugin < chain_plugin > ( ) . chain ( ) ; <nl> + chain . abort_block ( ) ; <nl> + my - > schedule_production_loop ( ) ; <nl> + } <nl> + } <nl> + <nl> + producer_plugin : : runtime_options producer_plugin : : get_runtime_options ( ) const { <nl> + return { <nl> + my - > _max_transaction_time_ms , <nl> + my - > _max_irreversible_block_age_us . count ( ) < 0 ? - 1 : my - > _max_irreversible_block_age_us . count ( ) / 1 ' 000 ' 000 <nl> + } ; <nl> + } <nl> + <nl> + <nl> <nl> optional < fc : : time_point > producer_plugin_impl : : calculate_next_block_time ( const account_name & producer_name ) const { <nl> chain : : controller & chain = app ( ) . get_plugin < chain_plugin > ( ) . chain ( ) ; <nl> producer_plugin_impl : : start_block_result producer_plugin_impl : : start_block ( ) { <nl> } else if ( _pause_production ) { <nl> elog ( " Not producing block because production is explicitly paused " ) ; <nl> _pending_block_mode = pending_block_mode : : speculating ; <nl> + } else if ( irreversible_block_age > = _max_irreversible_block_age_us ) { <nl> + elog ( " Not producing block because the irreversible block is too old [ age : $ { age } s , max : $ { max } s ] " , ( " age " , irreversible_block_age . count ( ) / 1 ' 000 ' 000 ) ( " max " , _max_irreversible_block_age_us . count ( ) / 1 ' 000 ' 000 ) ) ; <nl> + _pending_block_mode = pending_block_mode : : speculating ; <nl> } <nl> <nl> if ( _pending_block_mode = = pending_block_mode : : producing ) { <nl> producer_plugin_impl : : start_block_result producer_plugin_impl : : start_block ( ) { <nl> try { <nl> auto deadline = fc : : time_point : : now ( ) + fc : : milliseconds ( _max_transaction_time_ms ) ; <nl> bool deadline_is_subjective = false ; <nl> - if ( _pending_block_mode = = pending_block_mode : : producing & & block_time < deadline ) { <nl> + if ( _max_transaction_time_ms < 0 | | ( _pending_block_mode = = pending_block_mode : : producing & & block_time < deadline ) ) { <nl> deadline_is_subjective = true ; <nl> deadline = block_time ; <nl> } <nl> producer_plugin_impl : : start_block_result producer_plugin_impl : : start_block ( ) { <nl> try { <nl> auto deadline = fc : : time_point : : now ( ) + fc : : milliseconds ( _max_transaction_time_ms ) ; <nl> bool deadline_is_subjective = false ; <nl> - if ( _pending_block_mode = = pending_block_mode : : producing & & block_time < deadline ) { <nl> + if ( _max_transaction_time_ms < 0 | | ( _pending_block_mode = = pending_block_mode : : producing & & block_time < deadline ) ) { <nl> deadline_is_subjective = true ; <nl> deadline = block_time ; <nl> } <nl> | EOSIO / eos Expose runtime options in the producer api plugin | EOSIO/eos | c6540cf89568a49f91d109264515a39371aa0323 | 2018-05-29T15:14:01Z |
mmm a / vnpy / app / cta_strategy / backtesting . py <nl> ppp b / vnpy / app / cta_strategy / backtesting . py <nl> def send_limit_order ( <nl> offset = offset , <nl> price = price , <nl> volume = volume , <nl> - status = Status . NOTTRADED , <nl> + status = Status . SUBMITTING , <nl> gateway_name = self . gateway_name , <nl> ) <nl> order . datetime = self . datetime <nl> | [ Fix ] closes | vnpy/vnpy | 5d4d5129a92628701f9ebec2e1b17a8ef35b5af7 | 2019-07-19T13:16:06Z |
mmm a / tensorflow / tensorboard / components / vz_projector / util . ts <nl> ppp b / tensorflow / tensorboard / components / vz_projector / util . ts <nl> export function getURLParams ( url : string ) : { [ key : string ] : string } { <nl> } <nl> <nl> const queryEntries = queryString . split ( ' & ' ) ; <nl> - let queryParams = { } ; <nl> + let queryParams : { [ key : string ] : string } = { } ; <nl> for ( let i = 0 ; i < queryEntries . length ; i + + ) { <nl> let queryEntryComponents = queryEntries [ i ] . split ( ' = ' ) ; <nl> queryParams [ queryEntryComponents [ 0 ] . toLowerCase ( ) ] = <nl> mmm a / tensorflow / tensorboard / dist / tf - tensorboard . html <nl> ppp b / tensorflow / tensorboard / dist / tf - tensorboard . html <nl> <nl> var Globals ; <nl> ( function ( Globals ) { <nl> / / The names of TensorBoard tabs . <nl> - Globals . TABS = [ ' scalars ' , ' images ' , ' audio ' , ' graphs ' , ' distributions ' , <nl> - ' histograms ' , ' embeddings ' ] ; <nl> + Globals . TABS = [ <nl> + ' scalars ' , ' images ' , ' audio ' , ' graphs ' , ' distributions ' , ' histograms ' , <nl> + ' embeddings ' <nl> + ] ; <nl> / / If true , TensorBoard stores its hash in the URI state . <nl> / / If false , tab switching in TensorBoard will not update location hash , <nl> / / because hash updates interfere with wct_tests . <nl> < h3 > [ [ name ] ] < / h3 > <nl> < template is = " dom - repeat " items = " [ [ namesMatchingRegex ] ] " > <nl> < div class = " run - row " > <nl> < div class = " icon - container checkbox - container vertical - align - container " > <nl> - < paper - checkbox class = " checkbox vertical - align - center " name = " [ [ item ] ] " checked $ = " [ [ _isChecked ( item , runToIsCheckedMapping . * ) ] ] " on - change = " _checkboxChange " > < / paper - checkbox > <nl> + < paper - checkbox class = " checkbox vertical - align - center " name = " [ [ item ] ] " checked $ = " [ [ _isChecked ( item , runsDisabled . * ) ] ] " on - change = " _checkboxChange " > < / paper - checkbox > <nl> <nl> < / div > <nl> < div class = " icon - container isolator - container vertical - align - container " > <nl> < h3 > [ [ name ] ] < / h3 > <nl> Polymer ( { <nl> is : " tf - multi - checkbox " , <nl> properties : { <nl> - names : Array , / / All the runs in consideration <nl> + names : { <nl> + type : Array , <nl> + value : function ( ) { return [ ] ; } , <nl> + } , / / All the runs in consideration <nl> regexInput : { <nl> type : String , <nl> value : TF . URIStorage . getStringInitializer ( " regexInput " , " " ) , <nl> < h3 > [ [ name ] ] < / h3 > <nl> type : Array , <nl> computed : " computeNamesMatchingRegex ( names . * , regex ) " <nl> } , / / Runs that match the regex <nl> - runToIsCheckedMapping : { <nl> + runsDisabled : { <nl> type : Object , <nl> - value : TF . URIStorage . getObjectInitializer ( ' runToIsCheckedMapping ' , { } ) , <nl> - } , / / run name - > Boolean ( if its enabled ) <nl> + value : TF . URIStorage . getObjectInitializer ( ' runsDisabled ' , { } ) , <nl> + } , / / Every run that is disabled is stored in the map ( with value true ) <nl> / / ( Allows state to persist across regex filtering ) <nl> outSelected : { <nl> type : Array , <nl> notify : true , <nl> - computed : ' computeOutSelected ( namesMatchingRegex . * , runToIsCheckedMapping . * ) ' <nl> + computed : ' computeOutSelected ( namesMatchingRegex . * , runsDisabled . * ) ' <nl> } , <nl> colorScale : { <nl> type : Object , <nl> < h3 > [ [ name ] ] < / h3 > <nl> ' dom - change ' : ' synchronizeColors ' , <nl> } , <nl> observers : [ <nl> - " _initializeRunToIsCheckedMapping ( names . * ) " , <nl> - " _setIsolatorIcon ( runToIsCheckedMapping ) " , <nl> - " _storeRunToIsCheckedMapping ( runToIsCheckedMapping ) " , <nl> + " _setIsolatorIcon ( runsDisabled , names ) " , <nl> + " _storeRunToIsCheckedMapping ( runsDisabled ) " , <nl> ] , <nl> - _storeRunToIsCheckedMapping : TF . URIStorage . getObjectObserver ( ' runToIsCheckedMapping ' , { } ) , <nl> + _storeRunToIsCheckedMapping : TF . URIStorage . getObjectObserver ( ' runsDisabled ' , { } ) , <nl> _makeRegex : function ( regex ) { <nl> try { <nl> return new RegExp ( regex ) <nl> < h3 > [ [ name ] ] < / h3 > <nl> } <nl> } , <nl> _setIsolatorIcon : function ( ) { <nl> - var runMap = this . runToIsCheckedMapping ; <nl> - var numChecked = _ . filter ( _ . values ( runMap ) ) . length ; <nl> + var runMap = this . runsDisabled ; <nl> + var numChecked = this . names . length - _ . filter ( _ . values ( runMap ) ) . length ; <nl> var buttons = Array . prototype . slice . call ( this . querySelectorAll ( " . isolator " ) ) ; <nl> <nl> buttons . forEach ( function ( b ) { <nl> - if ( numChecked = = = 1 & & runMap [ b . name ] ) { <nl> + if ( numChecked = = = 1 & & ! runMap [ b . name ] ) { <nl> b . icon = " radio - button - checked " ; <nl> } else { <nl> b . icon = " radio - button - unchecked " ; <nl> } <nl> } ) ; <nl> } , <nl> - _initializeRunToIsCheckedMapping : function ( change ) { <nl> - var runToIsCheckedMapping = _ . clone ( this . runToIsCheckedMapping ) ; <nl> - <nl> - this . names . forEach ( function ( n ) { <nl> - if ( runToIsCheckedMapping [ n ] = = null ) { <nl> - / / runs default to on <nl> - runToIsCheckedMapping [ n ] = true ; <nl> - } <nl> - } ) ; <nl> - this . runToIsCheckedMapping = runToIsCheckedMapping ; <nl> - } , <nl> computeNamesMatchingRegex : function ( __ , ___ ) { <nl> var regex = this . regex ; <nl> return this . names . filter ( function ( n ) { <nl> < h3 > [ [ name ] ] < / h3 > <nl> } ) ; <nl> } , <nl> computeOutSelected : function ( __ , ___ ) { <nl> - var runToIsCheckedMapping = this . runToIsCheckedMapping ; <nl> + var runsDisabled = this . runsDisabled ; <nl> return this . namesMatchingRegex . filter ( function ( n ) { <nl> - return runToIsCheckedMapping [ n ] ; <nl> + return ! runsDisabled [ n ] ; <nl> } ) ; <nl> } , <nl> synchronizeColors : function ( e ) { <nl> < h3 > [ [ name ] ] < / h3 > <nl> / / If user clicks on the label for one run , enable it and disable all other runs . <nl> <nl> var name = Polymer . dom ( e ) . localTarget . name ; <nl> - var _this = this ; <nl> - _ . keys ( this . runToIsCheckedMapping ) . forEach ( function ( k ) { <nl> - _this . runToIsCheckedMapping [ k ] = false ; <nl> - } ) ; <nl> - this . runToIsCheckedMapping [ name ] = true ; <nl> - / / we can ' t use notifyPath because the run names may have periods <nl> - this . runToIsCheckedMapping = _ . clone ( this . runToIsCheckedMapping ) ; <nl> + var newDisabled = { } ; <nl> + this . names . forEach ( function ( n ) { <nl> + newDisabled [ n ] = true ; <nl> + } ) <nl> + delete newDisabled [ name ] ; <nl> + this . runsDisabled = newDisabled ; <nl> } , <nl> _checkboxChange : function ( e ) { <nl> var target = Polymer . dom ( e ) . localTarget ; <nl> - var name = target . name ; <nl> - var checked = target . checked ; <nl> - this . runToIsCheckedMapping [ name ] = checked ; <nl> + if ( target . checked ) { <nl> + delete this . runsDisabled [ target . name ] ; <nl> + } else { <nl> + this . runsDisabled [ target . name ] = true ; <nl> + } <nl> / / n . b . notifyPath won ' t work because run names may have periods . <nl> - this . runToIsCheckedMapping = _ . clone ( this . runToIsCheckedMapping ) ; <nl> + this . runsDisabled = _ . clone ( this . runsDisabled ) ; <nl> } , <nl> _isChecked : function ( item , outSelectedChange ) { <nl> - return this . runToIsCheckedMapping [ item ] ; <nl> + return this . runsDisabled [ item ] = = undefined ; <nl> } , <nl> _initializeRuns : function ( change ) { <nl> this . outSelected = change . base . slice ( ) ; <nl> < h3 > [ [ name ] ] < / h3 > <nl> toggleAll : function ( ) { <nl> var _this = this ; <nl> var allOn = this . namesMatchingRegex <nl> - . filter ( function ( n ) { return ! _this . runToIsCheckedMapping [ n ] } ) <nl> + . filter ( function ( n ) { return _this . runsDisabled [ n ] } ) <nl> . length = = = 0 ; <nl> - this . namesMatchingRegex . forEach ( function ( n ) { _this . runToIsCheckedMapping [ n ] = ! allOn } ) ; <nl> - this . runToIsCheckedMapping = _ . clone ( this . runToIsCheckedMapping ) ; <nl> + let newRunsDisabled = { } <nl> + if ( allOn ) { <nl> + this . names . forEach ( function ( n ) { <nl> + newRunsDisabled [ n ] = true ; <nl> + } ) <nl> + } <nl> + this . runsDisabled = newRunsDisabled ; <nl> } , <nl> } ) ; <nl> < / script > <nl> < h3 id = " tooltip - help " class = " tooltip - container " > <nl> datum : null , <nl> dataset : null , <nl> } ; <nl> - var centerBBox = _this . gridlines . content ( ) . node ( ) . getBBox ( ) ; <nl> - var points = plot . datasets ( ) . map ( function ( dataset ) { return _this . findClosestPoint ( target , dataset ) ; } ) ; <nl> - var pointsToCircle = points . filter ( function ( p ) { return p ! = null & & <nl> - Plottable . Utils . DOM . intersectsBBox ( p . x , p . y , centerBBox ) ; } ) ; <nl> - var pts = pointsComponent . content ( ) . selectAll ( ' . point ' ) . data ( pointsToCircle , function ( p ) { return p . dataset . metadata ( ) . name ; } ) ; <nl> - if ( points . length ! = = 0 ) { <nl> - pts . enter ( ) . append ( ' circle ' ) . classed ( ' point ' , true ) ; <nl> - pts . attr ( ' r ' , VZ . ChartHelpers . TOOLTIP_CIRCLE_SIZE ) <nl> + var bbox = _this . gridlines . content ( ) . node ( ) . getBBox ( ) ; <nl> + / / pts is the closets point to the tooltip for each dataset <nl> + var pts = plot . datasets ( ) <nl> + . map ( function ( dataset ) { return _this . findClosestPoint ( target , dataset ) ; } ) <nl> + . filter ( function ( x ) { return x ! = null ; } ) ; <nl> + var intersectsBBox = Plottable . Utils . DOM . intersectsBBox ; <nl> + / / We draw tooltips for points that are NaN , or are currently visible <nl> + var ptsForTooltips = pts . filter ( function ( p ) { return intersectsBBox ( p . x , p . y , bbox ) | | isNaN ( p . datum . scalar ) ; } ) ; <nl> + / / Only draw little indicator circles for the non - NaN points <nl> + var ptsToCircle = ptsForTooltips . filter ( function ( p ) { return ! isNaN ( p . datum . scalar ) ; } ) ; <nl> + var ptsSelection = pointsComponent . content ( ) . selectAll ( ' . point ' ) . data ( ptsToCircle , function ( p ) { return p . dataset . metadata ( ) . name ; } ) ; <nl> + if ( pts . length ! = = 0 ) { <nl> + ptsSelection . enter ( ) . append ( ' circle ' ) . classed ( ' point ' , true ) ; <nl> + ptsSelection . attr ( ' r ' , VZ . ChartHelpers . TOOLTIP_CIRCLE_SIZE ) <nl> . attr ( ' cx ' , function ( p ) { return p . x ; } ) <nl> . attr ( ' cy ' , function ( p ) { return p . y ; } ) <nl> . style ( ' stroke ' , ' none ' ) <nl> . attr ( ' fill ' , function ( p ) { return _this . colorScale . scale ( p . dataset . metadata ( ) . name ) ; } ) ; <nl> - pts . exit ( ) . remove ( ) ; <nl> - _this . drawTooltips ( points , target ) ; <nl> + ptsSelection . exit ( ) . remove ( ) ; <nl> + _this . drawTooltips ( ptsForTooltips , target ) ; <nl> } <nl> else { <nl> hideTooltips ( ) ; <nl> < h3 id = " tooltip - help " class = " tooltip - container " > <nl> _ . sortBy ( points , function ( d ) { return valueSortMethod ( d . datum , - 1 , d . dataset ) ; } ) <nl> . reverse ( ) ; <nl> } <nl> + else if ( this . tooltipSortingMethod = = = ' nearest ' ) { <nl> + points = _ . sortBy ( points , dist ) ; <nl> + } <nl> else { <nl> / / The ' default ' sorting method maintains the order of names passed to <nl> / / setVisibleSeries ( ) . However we reverse that order when defining the <nl> < h3 id = " tooltip - help " class = " tooltip - container " > <nl> . append ( ' tr ' ) ; <nl> / / Grey out the point if any of the following are true : <nl> / / - The cursor is outside of the x - extent of the dataset <nl> - / / - The point is rendered above or below the screen <nl> / / - The point ' s y value is NaN <nl> rows . classed ( ' distant ' , function ( d ) { <nl> var firstPoint = d . dataset . data ( ) [ 0 ] ; <nl> < h3 id = " tooltip - help " class = " tooltip - container " > <nl> var firstX = _this . xScale . scale ( _this . xAccessor ( firstPoint , 0 , d . dataset ) ) ; <nl> var lastX = _this . xScale . scale ( _this . xAccessor ( lastPoint , 0 , d . dataset ) ) ; <nl> var s = _this . smoothingEnabled ? d . datum . smoothed : d . datum . scalar ; <nl> - var yD = _this . yScale . domain ( ) ; <nl> - return target . x < firstX | | target . x > lastX | | s < yD [ 0 ] | | <nl> - s > yD [ 1 ] | | isNaN ( s ) ; <nl> + return target . x < firstX | | target . x > lastX | | isNaN ( s ) ; <nl> } ) ; <nl> rows . classed ( ' closest ' , function ( p ) { return dist ( p ) = = = closestDist ; } ) ; <nl> / / It is a bit hacky that we are manually applying the width to the swatch <nl> < h3 id = " tooltip - help " class = " tooltip - container " > <nl> * - " default " - Sort the tooltip by input order . <nl> * - " ascending " - Sort the tooltip by ascending value . <nl> * - " descending " - Sort the tooltip by descending value . <nl> + * - " nearest " - Sort the tooltip by closest to cursor . <nl> * / <nl> tooltipSortingMethod : { <nl> type : String , <nl> < h3 id = " tooltip - help " class = " tooltip - container " > <nl> < paper - item > default < / paper - item > <nl> < paper - item > descending < / paper - item > <nl> < paper - item > ascending < / paper - item > <nl> + < paper - item > nearest < / paper - item > <nl> < / paper - menu > <nl> < / paper - dropdown - menu > <nl> < / div > <nl> < h3 id = " tooltip - help " class = " tooltip - container " > <nl> } <nl> <nl> img { <nl> + image - rendering : - moz - crisp - edges ; <nl> image - rendering : pixelated ; <nl> display : block ; <nl> width : 100 % ; <nl> < h3 id = " tooltip - help " class = " tooltip - container " > <nl> <nl> < dom - module id = " tf - image - dashboard " assetpath = " . . / tf - image - dashboard / " > <nl> < template > <nl> + < paper - dialog with - backdrop = " " id = " actual - image - size - dialog " > < / paper - dialog > <nl> < div id = " plumbing " > <nl> < tf - color - scale id = " colorScale " runs = " [ [ runs ] ] " out - color - scale = " { { _colorScale } } " > < / tf - color - scale > <nl> < / div > <nl> < h3 id = " tooltip - help " class = " tooltip - container " > <nl> < tf - panes - helper categories = " [ [ _categories ] ] " color - scale = " [ [ _colorScale ] ] " data - type = " [ [ dataType ] ] " data - provider = " [ [ dataProvider ] ] " run2tag = " [ [ run2tag ] ] " selected - runs = " [ [ _selectedRuns ] ] " repeat - for - runs = " " > <nl> < template > <nl> < tf - image - loader color - scale = " [ [ _colorScale ] ] " > < / tf - image - loader > <nl> + < paper - icon - button class = " actual - size - button " icon = " aspect - ratio " on - tap = " _showActualSize " title = " Show the image at its true pixel size " > < / paper - icon - button > <nl> < / template > <nl> < / tf - panes - helper > <nl> < / div > <nl> < h3 id = " tooltip - help " class = " tooltip - container " > <nl> - - card - expanded - width : 700px ; <nl> - - card - expanded - height : auto ; <nl> } <nl> + <nl> + . actual - size - button { <nl> + background : # fff ; <nl> + border - radius : 100 % ; <nl> + bottom : - 35px ; <nl> + color : # 2196f3 ; <nl> + height : 32px ; <nl> + left : 35px ; <nl> + padding : 4px ; <nl> + pointer - events : auto ; <nl> + position : absolute ; <nl> + width : 32px ; <nl> + } <nl> + <nl> + . actual - size - button - selected { <nl> + background : var ( - - tb - ui - light - accent ) ; <nl> + } <nl> + <nl> + # actual - image - size - dialog { <nl> + overflow : auto ; <nl> + } <nl> < / style > <nl> < / template > <nl> < script > <nl> < h3 id = " tooltip - help " class = " tooltip - container " > <nl> this . fire ( " rendered " ) ; <nl> } ) ; <nl> } , <nl> + _showActualSize : function ( e ) { <nl> + var currentTarget = Polymer . dom ( e . currentTarget ) ; <nl> + var card = currentTarget . node . closest ( ' . card ' ) ; <nl> + <nl> + / / Create a full - size copy of the image . <nl> + var newImage = card . querySelector ( ' # img ' ) . cloneNode ( ) ; <nl> + newImage . style . height = ' auto ' ; <nl> + newImage . style . width = ' auto ' ; <nl> + newImage . style . margin = 0 ; <nl> + newImage . style . padding = 0 ; <nl> + newImage . classList . add ( " actual - size - image " ) ; <nl> + <nl> + / / When the user clicks on the image , empty and close the dialog . <nl> + var dialog = this . $ $ ( ' # actual - image - size - dialog ' ) ; <nl> + newImage . addEventListener ( ' click ' , function ( ) { <nl> + dialog . close ( ) ; <nl> + } ) ; <nl> + <nl> + / / Update dialog content . Show the dialog . <nl> + dialog . innerHTML = ' ' ; <nl> + dialog . appendChild ( newImage ) ; <nl> + dialog . open ( ) ; <nl> + } <nl> } ) ; <nl> < / script > <nl> < / dom - module > <nl> < h3 id = " tooltip - help " class = " tooltip - container " > <nl> * displayed . <nl> * / <nl> maxControlDegree : 4 , <nl> + / * * <nl> + * Maximum in ( for outbound bridge paths ) or out ( for inbound bridge paths ) <nl> + * degree of a node allowed for a bridge path to be rendered to it from a <nl> + * subhierarchy of nodes . Having a max prevents having too many nodes emanate <nl> + * from a subhierarchy and crowding up . <nl> + * / <nl> + maxBridgePathDegree : 4 , <nl> / * * <nl> * Types patterns for predefined out - extract nodes , which are <nl> * sink - like nodes that will be extracted from the main graph . <nl> < h3 id = " tooltip - help " class = " tooltip - container " > <nl> var _b = inbound ? <nl> [ renderNodeInfo . inAnnotations , childRenderInfo . inAnnotations ] : <nl> [ renderNodeInfo . outAnnotations , childRenderInfo . outAnnotations ] , childAnnotations = _b [ 1 ] ; <nl> - / / Do not render a bridge path to a node if the node is extracted into the <nl> - / / auxiliary graph for having a high degree . If we are not sure now , <nl> - / / default to not rendering a bridge path . <nl> - var isOtherHighDegree = true ; <nl> - if ( otherRenderInfo ) { <nl> - isOtherHighDegree = inbound ? otherRenderInfo . isOutExtract : <nl> - otherRenderInfo . isInExtract ; <nl> - } <nl> + / / Don ' t render a bridge path if the other node has in or out degree above <nl> + / / a threshold , lest bridge paths emanating out of a metagraph crowd up , <nl> + / / as was the case for the Fatcat LSTM lstm_1 > lstm_1 metagraph . <nl> + var otherDegreeCount = ( inbound ? otherCounts . out : otherCounts . in ) [ otherName ] ; <nl> + var isOtherHighDegree = otherDegreeCount > PARAMS . maxBridgePathDegree ; <nl> / / The adjoining render metaedge info from the parent ' s coreGraph , if any . <nl> / / It will either be a Metaedge involving this node directly , if it <nl> / / previously came from a metagraph , or it ' ll be a Metaedge involving <nl> < h2 > [ [ title ] ] < / h2 > <nl> } ; <nl> } <nl> <nl> + paper - button { <nl> + background : # e3e3e3 ; <nl> + margin - left : 0 ; <nl> + text - transform : none ; <nl> + } <nl> + <nl> paper - dropdown - menu paper - item { <nl> font - size : 13px ; <nl> } <nl> < h2 > [ [ title ] ] < / h2 > <nl> color : black ; <nl> font - weight : 500 ; <nl> } <nl> + <nl> + h4 { <nl> + margin : 36px 0 10px 0 ; <nl> + } <nl> + <nl> + . dismiss - dialog - note { <nl> + margin - top : 25px ; <nl> + font - size : 11px ; <nl> + text - align : right ; <nl> + } <nl> < / style > <nl> < / template > <nl> < / dom - module > <nl> < h2 > [ [ title ] ] < / h2 > <nl> top : 60px ; <nl> max - height : 50vh ; <nl> } <nl> - paper - button { <nl> - border : 1px solid # ccc ; <nl> - } <nl> <nl> # save - container { <nl> text - align : center ; <nl> < h2 > [ [ title ] ] < / h2 > <nl> BOOKMARKS ( [ [ savedStates . length ] ] ) <nl> < paper - icon - button icon = " help " class = " help - icon " > < / paper - icon - button > <nl> < paper - tooltip animation - delay = " 0 " position = " top " offset = " 0 " > <nl> - The bookmarks drawer allows you to bookmark a set of views into the <nl> - projection , saving camera position , selected points , as well as any <nl> - highlighting you may have . You can then download the file to save it <nl> - permanently , and later upload it to again view your bookmarks . <nl> + Open this drawer to save a set of views of the projection , including <nl> + selected points . A file containing the bookmarks can then be saved and <nl> + later loaded to view them . <nl> < / paper - tooltip > <nl> < / div > <nl> < div id = " icon - container " > <nl> < h2 > [ [ title ] ] < / h2 > <nl> < / template > <nl> <nl> < div id = " action - buttons - container " > <nl> - < paper - icon - button class = " upload - download - icon - button " icon = " file - download " title = " Download bookmarks " on - tap = " _downloadFile " > < / paper - icon - button > <nl> - < paper - icon - button class = " upload - download - icon - button " icon = " file - upload " title = " Upload bookmarks " on - tap = " _uploadFile " > < / paper - icon - button > <nl> + < paper - icon - button class = " upload - download - icon - button " icon = " save " title = " Save bookmarks " disabled = " [ [ ! hasStates ] ] " on - tap = " _downloadFile " > < / paper - icon - button > <nl> + < paper - icon - button class = " upload - download - icon - button " icon = " file - upload " title = " Load bookmarks " on - tap = " _uploadFile " > < / paper - icon - button > <nl> < paper - icon - button class = " add - icon - button ink - fab " icon = " add " title = " Add bookmark " on - tap = " _addBookmark " > < / paper - icon - button > <nl> < input type = " file " id = " state - file " name = " state - file " > <nl> < / div > <nl> < h2 > [ [ title ] ] < / h2 > <nl> < / template > <nl> <nl> < / template > <nl> - < / dom - module > < dom - module id = " vz - projector - data - panel " assetpath = " . . / vz - projector / " > <nl> + < / dom - module > < link rel = " import " href = " . . / paper - dialog - scrollable / paper - dialog - scrollable . html " > <nl> + < dom - module id = " vz - projector - data - panel " assetpath = " . . / vz - projector / " > <nl> < template > <nl> < style include = " vz - projector - styles " > < / style > <nl> < style > <nl> < h2 > [ [ title ] ] < / h2 > <nl> padding - left : 20px ; <nl> } <nl> <nl> - # upload - buttons { <nl> + # normalize - data - checkbox { <nl> + margin : 10px 0 ; <nl> + } <nl> + <nl> + # projector - config - template { <nl> + - - paper - input - container - input : { <nl> + font - family : monospace ; <nl> + font - size : 12px ; <nl> + } ; <nl> + } <nl> + <nl> + # generate - share - url { <nl> + padding : 16px ; <nl> + margin - left : 24px ; <nl> + } <nl> + <nl> + # projector - share - button - container { <nl> + margin : 10px 0 ; <nl> + } <nl> + <nl> + . config - checkbox { <nl> + display : inline - block ; <nl> + font - size : 11px ; <nl> + margin - left : 10px ; <nl> + } <nl> + <nl> + . projector - config - options { <nl> + margin - top : 24px ; <nl> + } <nl> + <nl> + . projector - config - dialog - container { <nl> + padding : 24px ; <nl> + } <nl> + <nl> + . code { <nl> + background - color : # f7f7f7 ; <nl> + display : table ; <nl> + font - family : monospace ; <nl> + margin - top : 7px ; <nl> + padding : 15px ; <nl> + } <nl> + <nl> + . delimiter { <nl> + color : # B71C1C ; <nl> + } <nl> + <nl> + . upload - step { <nl> display : flex ; <nl> justify - content : space - between ; <nl> } <nl> <nl> - # normalize - data - checkbox { <nl> - margin : 10px 0 ; <nl> + . upload - step paper - button { <nl> + margin - left : 30px ; <nl> } <nl> + <nl> < / style > <nl> < div class = " title " > DATA < / div > <nl> < div class = " container " > <nl> < h2 > [ [ title ] ] < / h2 > <nl> it unit norm . <nl> < / paper - tooltip > <nl> < / paper - checkbox > <nl> - <nl> - < div id = " upload - buttons " > <nl> - <nl> - < div > <nl> - < button id = " upload " title = " Upload a TSV file " class = " ink - button " > Upload data < / button > <nl> - < input type = " file " id = " file " name = " file " > <nl> - < / div > <nl> - <nl> - < div > <nl> - < button id = " upload - metadata " title = " Upload a TSV metadata file " class = " ink - button " > Upload Metadata < / button > <nl> - < input type = " file " id = " file - metadata " name = " file - metadata " > <nl> - < / div > <nl> + < div > <nl> + < span > <nl> + < paper - tooltip position = " bottom " animation - delay = " 0 " fit - to - visible - bounds = " " > <nl> + Upload data from your computer <nl> + < / paper - tooltip > <nl> + < paper - button id = " upload " title = " Upload data " class = " ink - button " onclick = " dataDialog . open ( ) " > Upload data < / paper - button > <nl> + < / span > <nl> + < span > <nl> + < paper - tooltip position = " bottom " animation - delay = " 0 " fit - to - visible - bounds = " " > <nl> + Publish your embedding visualization and data <nl> + < / paper - tooltip > <nl> + < paper - button id = " host - embedding " class = " ink - button " onclick = " projectorConfigDialog . open ( ) " > Publish < / paper - button > <nl> + < / span > <nl> < / div > <nl> - < div class = " dirs " > <nl> + < div class = " dirs " > <nl> < div class = " dir " > Checkpoint : < span id = " checkpoint - file " > < / span > < / div > <nl> < div class = " dir " > Metadata : < span id = " metadata - file " > < / span > < / div > <nl> < / div > <nl> + < div > <nl> + < paper - dialog id = " dataDialog " with - backdrop = " " > <nl> + < h2 > Upload data from your computer < / h2 > <nl> + < paper - dialog - scrollable > <nl> + < div class = " data - step " > <nl> + < div class = " upload - step " > <nl> + < div > < b > Step 1 : Upload a TSV file of vectors . < / b > < / div > <nl> + < div > <nl> + < paper - button id = " upload - tensors " title = " Upload a TSV tensor file " > Upload < / paper - button > <nl> + < input type = " file " id = " file " name = " file " > <nl> + < / div > <nl> + < / div > <nl> + Example of 3 vectors with dimension 4 : <nl> + < div class = " code " > <nl> + 0 . 1 < span class = " delimiter " > \ t < / span > 0 . 2 < span class = " delimiter " > \ t < / span > 0 . 5 < span class = " delimiter " > \ t < / span > 0 . 9 < br > <nl> + 0 . 2 < span class = " delimiter " > \ t < / span > 0 . 1 < span class = " delimiter " > \ t < / span > 5 . 0 < span class = " delimiter " > \ t < / span > 0 . 2 < br > <nl> + 0 . 4 < span class = " delimiter " > \ t < / span > 0 . 1 < span class = " delimiter " > \ t < / span > 7 . 0 < span class = " delimiter " > \ t < / span > 0 . 8 <nl> + < / div > <nl> + < p > <nl> + <nl> + < / p > <nl> + < / div > <nl> + < div style = " height : 60px ; " > < / div > <nl> + < div class = " data - step " > <nl> + < div class = " upload - step " > <nl> + < div > < b > Step 2 ( optional ) : Upload a TSV file of metadata . < / b > < / div > <nl> + < div > <nl> + < paper - button id = " upload - metadata " title = " Upload a TSV metadata file " class = " ink - button " > Upload < / paper - button > <nl> + < input type = " file " id = " file - metadata " name = " file - metadata " > <nl> + < / div > <nl> + < / div > <nl> + Example of 3 data points and 2 columns . < br > <nl> + Note that the first row is a header . <nl> + < div class = " code " > <nl> + < b > Pokémon < span class = " delimiter " > \ t < / span > Species < / b > < br > <nl> + Wartortle < span class = " delimiter " > \ t < / span > Turtle < br > <nl> + Venusaur < span class = " delimiter " > \ t < / span > Seed < br > <nl> + Charmeleon < span class = " delimiter " > \ t < / span > Flame <nl> + < / div > <nl> + < / div > <nl> + < / paper - dialog - scrollable > <nl> + < div class = " dismiss - dialog - note " > Click outside to dismiss . < / div > <nl> + < / paper - dialog > <nl> + < paper - dialog id = " projectorConfigDialog " with - backdrop = " " > <nl> + < h2 > Publish your embedding visualization and data < / h2 > <nl> + < paper - dialog - scrollable > <nl> + < div > <nl> + < p > <nl> + If you ' d like to share your visualization with the world , follow these simple steps . <nl> + See < a target = " _blank " href = " https : / / www . tensorflow . org / versions / master / how_tos / embedding_viz / index . md " > this tutorial < / a > for more . <nl> + < / p > <nl> + < h4 > Step 1 : Make your data public . < / h4 > <nl> + < p > <nl> + Host your tensors , metadata ( optional ) , sprite image ( optional ) , and bookmarks ( optional ) TSV files < i > publicly < / i > on the web . <nl> + < / p > <nl> + < p > <nl> + One option is using a < a target = " _blank " href = " https : / / gist . github . com / " > github gist < / a > . <nl> + < / p > <nl> + < / div > <nl> + < div > <nl> + < h4 > Step 2 : Fill out the projector config from the template below . < / h4 > <nl> + < div class = " projector - config - options " > <nl> + < i > Optional : < / i > <nl> + < div class = " config - checkbox " > <nl> + < paper - checkbox id = " config - metadata - checkbox " checked = " " > Metadata < / paper - checkbox > <nl> + < / div > <nl> + < div class = " config - checkbox " > <nl> + < paper - checkbox id = " config - sprite - checkbox " > Sprite < / paper - checkbox > <nl> + < / div > <nl> + < div class = " config - checkbox " > <nl> + < paper - checkbox id = " config - bookmarks - checkbox " > Bookmarks < / paper - checkbox > <nl> + < / div > <nl> + < / div > <nl> + < / div > <nl> + < paper - textarea id = " projector - config - template " label = " template_projector_config . json " > < / paper - textarea > <nl> + < div > <nl> + < h4 > Step 3 : Host the projector config and paste the URL below . < / h4 > <nl> + After you have hosted the projector config JSON file you build above , paste the URL to the config below . <nl> + < / div > <nl> + < paper - input id = " projector - config - url " label = " Path to projector config " > < / paper - input > <nl> + < paper - input id = " projector - share - url " label = " Your shareable URL " readonly = " " > < / paper - input > <nl> + < div id = " projector - share - button - container " > <nl> + < a target = " _blank " id = " projector - share - url - link " > <nl> + < paper - button title = " Test your shareable URL " class = " ink - button " > Test your shareable URL < / paper - button > <nl> + < / a > <nl> + < / div > <nl> + < / paper - dialog - scrollable > <nl> + < div class = " dismiss - dialog - note " > Click outside to dismiss . < / div > <nl> + < / paper - dialog > <nl> + < / div > <nl> < / div > <nl> <nl> < / template > <nl> < h2 > [ [ title ] ] < / h2 > <nl> <nl> . dropdown - item { <nl> justify - content : space - between ; <nl> + min - height : 35px ; <nl> } <nl> <nl> # z - container { <nl> < h2 > [ [ title ] ] < / h2 > <nl> height : 20px ; <nl> color : rgba ( 0 , 0 , 0 , 0 . 7 ) ; <nl> } <nl> + <nl> + . item - details { <nl> + color : gray ; <nl> + font - size : 12px ; <nl> + margin - left : 5px ; <nl> + } <nl> + <nl> + . pca - dropdown { <nl> + width : 100 % ; <nl> + } <nl> + <nl> + . pca - dropdown paper - listbox { <nl> + width : 135px ; <nl> + } <nl> + <nl> + . dropdown - item . header { <nl> + border - bottom : 1px solid # aaa ; <nl> + color : # 333 ; <nl> + font - weight : bold ; <nl> + } <nl> + <nl> + # total - variance { <nl> + color : rgba ( 0 , 0 , 0 , 0 . 7 ) ; <nl> + } <nl> < / style > <nl> < div id = " main " > <nl> < div class = " ink - panel - header " > <nl> < h2 > [ [ title ] ] < / h2 > <nl> < div data - panel = " pca " class = " ink - panel - content " > <nl> < div class = " two - columns " > <nl> < div > <nl> - < paper - dropdown - menu style = " width : 100 % " vertical - align = " bottom " no - animations = " " label = " X " > <nl> + < paper - dropdown - menu class = " pca - dropdown " vertical - align = " bottom " no - animations = " " label = " X " > <nl> < paper - listbox attr - for - selected = " value " class = " dropdown - content " selected = " { { pcaX } } " > <nl> + < paper - item disabled = " " class = " dropdown - item header " > <nl> + < div > # < / div > <nl> + < div > Variance ( % ) < / div > <nl> + < / paper - item > <nl> < template is = " dom - repeat " items = " [ [ pcaComponents ] ] " > <nl> - < paper - item class = " dropdown - item " value = " [ [ item ] ] " label = " Component # [ [ _addOne ( item ) ] ] " > <nl> - Component # [ [ _addOne ( item ) ] ] <nl> + < paper - item class = " dropdown - item " value = " [ [ item . id ] ] " label = " Component # [ [ item . componentNumber ] ] " > <nl> + < div > [ [ item . componentNumber ] ] < / div > <nl> + < div class = " item - details " > [ [ item . percVariance ] ] < / div > <nl> < / paper - item > <nl> < / template > <nl> < / paper - listbox > <nl> < / paper - dropdown - menu > <nl> - < paper - dropdown - menu no - animations = " " vertical - align = " bottom " label = " Z " disabled = " [ [ ! hasPcaZ ] ] " id = " z - dropdown " > <nl> + < paper - dropdown - menu class = " pca - dropdown " no - animations = " " vertical - align = " bottom " label = " Z " disabled = " [ [ ! hasPcaZ ] ] " id = " z - dropdown " > <nl> < paper - listbox attr - for - selected = " value " class = " dropdown - content " selected = " { { pcaZ } } " > <nl> + < paper - item disabled = " " class = " dropdown - item header " > <nl> + < div > # < / div > <nl> + < div > Variance ( % ) < / div > <nl> + < / paper - item > <nl> < template is = " dom - repeat " items = " [ [ pcaComponents ] ] " > <nl> - < paper - item class = " dropdown - item " value = " [ [ item ] ] " label = " Component # [ [ _addOne ( item ) ] ] " > <nl> - Component # [ [ _addOne ( item ) ] ] <nl> + < paper - item class = " dropdown - item " value = " [ [ item . id ] ] " label = " Component # [ [ item . componentNumber ] ] " > <nl> + < div > [ [ item . componentNumber ] ] < / div > <nl> + < div class = " item - details " > [ [ item . percVariance ] ] < / div > <nl> < / paper - item > <nl> < / template > <nl> < / paper - listbox > <nl> < / paper - dropdown - menu > <nl> < / div > <nl> < div > <nl> - < paper - dropdown - menu style = " width : 100 % " vertical - align = " bottom " no - animations = " " label = " Y " > <nl> + < paper - dropdown - menu class = " pca - dropdown " vertical - align = " bottom " no - animations = " " label = " Y " > <nl> < paper - listbox attr - for - selected = " value " class = " dropdown - content " selected = " { { pcaY } } " > <nl> + < paper - item disabled = " " class = " dropdown - item header " > <nl> + < div > # < / div > <nl> + < div > Variance ( % ) < / div > <nl> + < / paper - item > <nl> < template is = " dom - repeat " items = " [ [ pcaComponents ] ] " > <nl> - < paper - item class = " dropdown - item " value = " [ [ item ] ] " label = " Component # [ [ _addOne ( item ) ] ] " > <nl> - Component # [ [ _addOne ( item ) ] ] <nl> + < paper - item class = " dropdown - item " value = " [ [ item . id ] ] " label = " Component # [ [ item . componentNumber ] ] " > <nl> + < div > [ [ item . componentNumber ] ] < / div > <nl> + < div class = " item - details " > [ [ item . percVariance ] ] < / div > <nl> < / paper - item > <nl> < / template > <nl> < / paper - listbox > <nl> < h2 > [ [ title ] ] < / h2 > <nl> PCA is approximate . <nl> < paper - icon - button icon = " help " class = " help - icon " > < / paper - icon - button > <nl> < / p > <nl> - < paper - tooltip for = " pca - sampling " position = " bottom " animation - delay = " 0 " fit - to - visible - bounds = " " > <nl> + < div id = " total - variance " > Total variance < / div > <nl> + < paper - tooltip for = " pca - sampling " position = " top " animation - delay = " 0 " fit - to - visible - bounds = " " > <nl> For fast results , the data was randomly projected down to [ [ getPcaSampledDim ( ) ] ] dimensions . <nl> < / paper - tooltip > <nl> < / div > <nl> < h2 > [ [ title ] ] < / h2 > <nl> < / dom - module > <nl> < link rel = " import " href = " . . / paper - listbox / paper - listbox . html " > <nl> < link rel = " import " href = " . . / iron - icons / image - icons . html " > <nl> + < link rel = " import " href = " . . / paper - toast / paper - toast . html " > <nl> + < link rel = " import " href = " . . / paper - styles / typography . html " > <nl> + <nl> < dom - module id = " vz - projector " assetpath = " . . / vz - projector / " > <nl> < template > <nl> < style include = " vz - projector - styles " > < / style > <nl> < h2 > [ [ title ] ] < / h2 > <nl> transition : opacity 0 . 3s ease - out , height 0 . 3s ease - out ; <nl> } <nl> <nl> - # warning - msg { <nl> - display : none ; <nl> - position : fixed ; <nl> - top : 10px ; <nl> - left : 10px ; <nl> - padding : 5px ; <nl> - font - weight : 500 ; <nl> - color : black ; <nl> - background - color : # ffb3b6 ; <nl> - border : 1px solid black ; <nl> - } <nl> - <nl> . brush . extent { <nl> stroke : # fff ; <nl> fill - opacity : . 125 ; <nl> < h2 > [ [ title ] ] < / h2 > <nl> padding : 20px ; <nl> } <nl> <nl> - # help3dDialog . note { <nl> - margin - top : 25px ; <nl> - font - size : 11px ; <nl> - text - align : right ; <nl> - } <nl> - <nl> # help3dDialog h3 { <nl> margin - top : 20px ; <nl> margin - bottom : 5px ; <nl> < h2 > [ [ title ] ] < / h2 > <nl> border - top : 1px solid rgba ( 0 , 0 , 0 , 0 . 1 ) ; <nl> overflow - y : auto ; <nl> } <nl> + <nl> + # toast { <nl> + display : flex ; <nl> + align - items : center ; <nl> + - - paper - toast - color : # eeff41 ; <nl> + } <nl> < / style > <nl> < paper - dialog id = " wrapper - notify - msg " modal = " " > <nl> < div id = " notify - msgs " > < / div > <nl> < / paper - dialog > <nl> - < div id = " warning - msg " > < / div > <nl> < div id = " container " > <nl> < div id = " left - pane " class = " ink - panel " > <nl> < vz - projector - data - panel id = " data - panel " > < / vz - projector - data - panel > <nl> < h3 > 3D controls < / h3 > <nl> < h3 > 2D controls < / h3 > <nl> < b > Pan < / b > Mouse left click . < br > <nl> < b > Zoom < / b > Mouse wheel . <nl> - < div class = " note " > Click anywhere to dismiss . < / div > <nl> + < div class = " dismiss - dialog - note " > Click anywhere to dismiss . < / div > <nl> < / div > <nl> < / paper - dialog > <nl> < / div > <nl> < h3 > 2D controls < / h3 > <nl> < / div > <nl> < / div > <nl> < / div > <nl> + < paper - toast id = " toast " always - on - top = " " > < / paper - toast > <nl> <nl> < / template > <nl> < / dom - module > <nl> < h2 > Settings < / h2 > <nl> } ( ) ) ; <nl> exports . TSNE = TSNE ; <nl> <nl> - } , { " . / sptree " : 23 } ] , 2 : [ function ( require , module , exports ) { <nl> + } , { " . / sptree " : 21 } ] , 2 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> var logging = require ( ' . / logging ' ) ; <nl> / * * Data provider that loads data from a demo folder . * / <nl> var DemoDataProvider = ( function ( ) { <nl> - function DemoDataProvider ( ) { <nl> - / * * List of demo datasets for showing the capabilities of the tool . * / <nl> - this . DEMO_CONFIG = { <nl> - embeddings : [ <nl> - { <nl> - tensorName : ' Word2Vec 5K ' , <nl> - tensorShape : [ 5000 , 200 ] , <nl> - tensorPath : ' word2vec_5000_200d_tensors . tsv ' , <nl> - metadataPath : ' word2vec_5000_200d_labels . tsv ' <nl> - } , <nl> - { <nl> - tensorName : ' Word2Vec 10K ' , <nl> - tensorShape : [ 10000 , 200 ] , <nl> - tensorPath : ' word2vec_10000_200d_tensors . tsv ' , <nl> - metadataPath : ' word2vec_10000_200d_labels . tsv ' <nl> - } , <nl> - { <nl> - tensorName : ' Word2Vec All ' , <nl> - tensorShape : [ 71291 , 200 ] , <nl> - tensorPath : ' word2vec_full_200d_tensors . tsv ' , <nl> - metadataPath : ' word2vec_full_200d_labels . tsv ' <nl> - } , <nl> - { <nl> - tensorName : ' SmartReply 5K ' , <nl> - tensorShape : [ 5000 , 256 ] , <nl> - tensorPath : ' smartreply_5000_256d_tensors . tsv ' , <nl> - metadataPath : ' smartreply_5000_256d_labels . tsv ' <nl> - } , <nl> - { <nl> - tensorName : ' SmartReply All ' , <nl> - tensorShape : [ 35860 , 256 ] , <nl> - tensorPath : ' smartreply_full_256d_tensors . tsv ' , <nl> - metadataPath : ' smartreply_full_256d_labels . tsv ' <nl> - } , <nl> - { <nl> - tensorName : ' Mnist with images 10K ' , <nl> - tensorShape : [ 10000 , 784 ] , <nl> - tensorPath : ' mnist_10k_784d_tensors . tsv ' , <nl> - metadataPath : ' mnist_10k_784d_labels . tsv ' , <nl> - sprite : { <nl> - imagePath : ' mnist_10k_sprite . png ' , <nl> - singleImageDim : [ 28 , 28 ] <nl> - } <nl> - } , <nl> - { <nl> - tensorName : ' Iris ' , <nl> - tensorShape : [ 150 , 4 ] , <nl> - tensorPath : ' iris_tensors . tsv ' , <nl> - metadataPath : ' iris_labels . tsv ' <nl> - } , <nl> - { <nl> - tensorName : ' Unit Cube ' , <nl> - tensorShape : [ 8 , 3 ] , <nl> - tensorPath : ' cube_tensors . tsv ' , <nl> - metadataPath : ' cube_metadata . tsv ' <nl> - } <nl> - ] , <nl> - modelCheckpointPath : ' Demo datasets ' <nl> - } ; <nl> - / * * Name of the folder where the demo datasets are stored . * / <nl> - this . DEMO_FOLDER = ' data ' ; <nl> + function DemoDataProvider ( projectorConfigPath ) { <nl> + this . projectorConfigPath = projectorConfigPath ; <nl> } <nl> DemoDataProvider . prototype . getEmbeddingInfo = function ( tensorName ) { <nl> - var embeddings = this . DEMO_CONFIG . embeddings ; <nl> + var embeddings = this . projectorConfig . embeddings ; <nl> for ( var i = 0 ; i < embeddings . length ; i + + ) { <nl> var embedding = embeddings [ i ] ; <nl> if ( embedding . tensorName = = = tensorName ) { <nl> < h2 > Settings < / h2 > <nl> callback ( [ ' Demo ' ] ) ; <nl> } ; <nl> DemoDataProvider . prototype . retrieveProjectorConfig = function ( run , callback ) { <nl> - callback ( this . DEMO_CONFIG ) ; <nl> + var _this = this ; <nl> + var msgId = logging . setModalMessage ( ' Fetching projector config . . . ' ) ; <nl> + d3 . json ( this . projectorConfigPath , function ( err , projectorConfig ) { <nl> + if ( err ) { <nl> + logging . setModalMessage ( ' Error : ' + err . responseText ) ; <nl> + return ; <nl> + } <nl> + logging . setModalMessage ( null , msgId ) ; <nl> + _this . projectorConfig = projectorConfig ; <nl> + callback ( projectorConfig ) ; <nl> + } ) ; <nl> } ; <nl> DemoDataProvider . prototype . getDefaultTensor = function ( run , callback ) { <nl> - callback ( ' SmartReply 5K ' ) ; <nl> + / / Return the first tensor as the default tensor . <nl> + callback ( this . projectorConfig . embeddings [ 0 ] . tensorName ) ; <nl> } ; <nl> DemoDataProvider . prototype . retrieveTensor = function ( run , tensorName , callback ) { <nl> var embedding = this . getEmbeddingInfo ( tensorName ) ; <nl> var separator = embedding . tensorPath . substr ( - 3 ) = = = ' tsv ' ? ' \ t ' : ' ' ; <nl> - var url = this . DEMO_FOLDER + " / " + embedding . tensorPath ; <nl> + var url = " " + embedding . tensorPath ; <nl> logging . setModalMessage ( ' Fetching tensors . . . ' , data_provider_1 . TENSORS_MSG_ID ) ; <nl> d3 . text ( url , function ( error , dataString ) { <nl> if ( error ) { <nl> < h2 > Settings < / h2 > <nl> } ; <nl> DemoDataProvider . prototype . retrieveSpriteAndMetadata = function ( run , tensorName , callback ) { <nl> var embedding = this . getEmbeddingInfo ( tensorName ) ; <nl> - var metadataPath = null ; <nl> - if ( embedding . metadataPath ) { <nl> - metadataPath = this . DEMO_FOLDER + " / " + embedding . metadataPath ; <nl> - } <nl> var spriteImagePath = null ; <nl> if ( embedding . sprite & & embedding . sprite . imagePath ) { <nl> - spriteImagePath = this . DEMO_FOLDER + " / " + embedding . sprite . imagePath ; <nl> + spriteImagePath = embedding . sprite . imagePath ; <nl> } <nl> - dataProvider . retrieveSpriteAndMetadataInfo ( metadataPath , spriteImagePath , embedding . sprite , callback ) ; <nl> + dataProvider . retrieveSpriteAndMetadataInfo ( embedding . metadataPath , spriteImagePath , embedding . sprite , callback ) ; <nl> } ; <nl> DemoDataProvider . prototype . getBookmarks = function ( run , tensorName , callback ) { <nl> - callback ( [ ] ) ; <nl> + var embedding = this . getEmbeddingInfo ( tensorName ) ; <nl> + var msgId = logging . setModalMessage ( ' Fetching bookmarks . . . ' ) ; <nl> + d3 . json ( embedding . bookmarksPath , function ( err , bookmarks ) { <nl> + if ( err ) { <nl> + logging . setModalMessage ( ' Error : ' + err . responseText ) ; <nl> + return ; <nl> + } <nl> + logging . setModalMessage ( null , msgId ) ; <nl> + callback ( bookmarks ) ; <nl> + } ) ; <nl> } ; <nl> return DemoDataProvider ; <nl> } ( ) ) ; <nl> exports . DemoDataProvider = DemoDataProvider ; <nl> <nl> - } , { " . / data " : 6 , " . / data - provider " : 5 , " . / logging " : 12 } ] , 3 : [ function ( require , module , exports ) { <nl> + } , { " . / data " : 6 , " . / data - provider " : 5 , " . / logging " : 11 } ] , 3 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> throw ' The shape doesn \ ' t match the length of the flattened array ' ; <nl> } <nl> for ( var i = 0 ; i < n ; i + + ) { <nl> - var vector = [ ] ; <nl> var offset = i * d ; <nl> - for ( var j = 0 ; j < d ; j + + ) { <nl> - vector . push ( tensor [ offset + + ] ) ; <nl> - } <nl> points . push ( { <nl> - vector : vector , <nl> + vector : new Float32Array ( tensor . slice ( offset , offset + d ) ) , <nl> metadata : { } , <nl> projections : null , <nl> - projectedPoint : null , <nl> index : i <nl> } ) ; <nl> } <nl> < h2 > Settings < / h2 > <nl> var data_provider_1 = require ( ' . / data - provider ' ) ; <nl> var dataProvider = require ( ' . / data - provider ' ) ; <nl> var logging = require ( ' . / logging ' ) ; <nl> + / / Limit for the number of data points we receive from the server . <nl> + var LIMIT_NUM_POINTS = 100000 ; <nl> / * * <nl> * Data provider that loads data provided by a python server ( usually backed <nl> * by a checkpoint file ) . <nl> < h2 > Settings < / h2 > <nl> } ) ; <nl> } ; <nl> ServerDataProvider . prototype . retrieveTensor = function ( run , tensorName , callback ) { <nl> + var _this = this ; <nl> / / Get the tensor . <nl> logging . setModalMessage ( ' Fetching tensor values . . . ' , data_provider_1 . TENSORS_MSG_ID ) ; <nl> - d3 . text ( this . routePrefix + " / tensor ? run = " + run + " & name = " + tensorName , function ( err , tsv ) { <nl> - if ( err ) { <nl> - logging . setModalMessage ( ' Error : ' + err . responseText ) ; <nl> - return ; <nl> + var xhr = new XMLHttpRequest ( ) ; <nl> + xhr . open ( ' GET ' , ( this . routePrefix + " / tensor ? " ) + <nl> + ( " run = " + run + " & name = " + tensorName + " & num_rows = " + LIMIT_NUM_POINTS ) ) ; <nl> + xhr . responseType = ' arraybuffer ' ; <nl> + xhr . onprogress = function ( ev ) { <nl> + if ( ev . lengthComputable ) { <nl> + var percent = ( ev . loaded * 100 / ev . total ) . toFixed ( 1 ) ; <nl> + logging . setModalMessage ( ' Fetching tensor values : ' + percent + ' % ' , data_provider_1 . TENSORS_MSG_ID ) ; <nl> } <nl> - dataProvider . parseTensors ( tsv ) . then ( function ( dataPoints ) { <nl> - callback ( new data_1 . DataSet ( dataPoints ) ) ; <nl> + } ; <nl> + xhr . onload = function ( ) { <nl> + var data = new Float32Array ( xhr . response ) ; <nl> + _this . getEmbeddingInfo ( run , tensorName , function ( embedding ) { <nl> + if ( embedding . tensorShape [ 0 ] > LIMIT_NUM_POINTS ) { <nl> + logging . setWarningMessage ( ( " Showing the first " + LIMIT_NUM_POINTS . toLocaleString ( ) ) + <nl> + ( " of " + embedding . tensorShape [ 0 ] . toLocaleString ( ) + " data points " ) ) ; <nl> + } <nl> + var dim = embedding . tensorShape [ 1 ] ; <nl> + dataProvider . parseTensorsFromFloat32Array ( data , dim ) . then ( function ( dataPoints ) { <nl> + callback ( new data_1 . DataSet ( dataPoints ) ) ; <nl> + } ) ; <nl> } ) ; <nl> - } ) ; <nl> + } ; <nl> + xhr . onerror = function ( ) { <nl> + logging . setModalMessage ( ' Error : ' + xhr . responseText ) ; <nl> + } ; <nl> + xhr . send ( null ) ; <nl> } ; <nl> ServerDataProvider . prototype . retrieveSpriteAndMetadata = function ( run , tensorName , callback ) { <nl> var _this = this ; <nl> < h2 > Settings < / h2 > <nl> var metadataPath = null ; <nl> if ( embedding . metadataPath ) { <nl> metadataPath = <nl> - _this . routePrefix + " / metadata ? run = " + run + " & name = " + tensorName ; <nl> + ( _this . routePrefix + " / metadata ? " ) + <nl> + ( " run = " + run + " & name = " + tensorName + " & num_rows = " + LIMIT_NUM_POINTS ) ; <nl> } <nl> var spriteImagePath = null ; <nl> if ( embedding . sprite & & embedding . sprite . imagePath ) { <nl> < h2 > Settings < / h2 > <nl> } ( ) ) ; <nl> exports . ServerDataProvider = ServerDataProvider ; <nl> <nl> - } , { " . / data " : 6 , " . / data - provider " : 5 , " . / logging " : 12 } ] , 5 : [ function ( require , module , exports ) { <nl> + } , { " . / data " : 6 , " . / data - provider " : 5 , " . / logging " : 11 } ] , 5 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> var util_1 = require ( ' . / util ' ) ; <nl> / * * Maximum number of colors supported in the color map . * / <nl> var NUM_COLORS_COLOR_MAP = 50 ; <nl> + var MAX_SPRITE_IMAGE_SIZE_PX = 8192 ; <nl> exports . METADATA_MSG_ID = ' metadata ' ; <nl> exports . TENSORS_MSG_ID = ' tensors ' ; <nl> function parseRawTensors ( content , callback ) { <nl> < h2 > Settings < / h2 > <nl> vector : null , <nl> index : data . length , <nl> projections : null , <nl> - projectedPoint : null <nl> } ; <nl> / / If the first label is not a number , take it as the label . <nl> if ( isNaN ( row [ 0 ] ) | | numDim = = = row . length - 1 ) { <nl> dataPoint . metadata [ ' label ' ] = row [ 0 ] ; <nl> - dataPoint . vector = row . slice ( 1 ) . map ( Number ) ; <nl> + dataPoint . vector = new Float32Array ( row . slice ( 1 ) . map ( Number ) ) ; <nl> } <nl> else { <nl> - dataPoint . vector = row . map ( Number ) ; <nl> + dataPoint . vector = new Float32Array ( row . map ( Number ) ) ; <nl> } <nl> data . push ( dataPoint ) ; <nl> if ( numDim = = null ) { <nl> < h2 > Settings < / h2 > <nl> } ) ; <nl> } <nl> exports . parseTensors = parseTensors ; <nl> + / * * Parses a tsv text file . * / <nl> + function parseTensorsFromFloat32Array ( data , dim ) { <nl> + return util_1 . runAsyncTask ( ' Parsing tensors . . . ' , function ( ) { <nl> + var N = data . length / dim ; <nl> + var dataPoints = [ ] ; <nl> + var offset = 0 ; <nl> + for ( var i = 0 ; i < N ; + + i ) { <nl> + dataPoints . push ( { <nl> + metadata : { } , <nl> + vector : data . subarray ( offset , offset + dim ) , <nl> + index : i , <nl> + projections : null , <nl> + } ) ; <nl> + offset + = dim ; <nl> + } <nl> + return dataPoints ; <nl> + } , exports . TENSORS_MSG_ID ) . then ( function ( dataPoints ) { <nl> + logging . setModalMessage ( null , exports . TENSORS_MSG_ID ) ; <nl> + return dataPoints ; <nl> + } ) ; <nl> + } <nl> + exports . parseTensorsFromFloat32Array = parseTensorsFromFloat32Array ; <nl> function analyzeMetadata ( columnNames , pointsMetadata ) { <nl> var columnStats = columnNames . map ( function ( name ) { <nl> return { <nl> < h2 > Settings < / h2 > <nl> logging . setModalMessage ( null , spriteMsgId ) ; <nl> } <nl> var metadata = values [ 0 ] , spriteImage = values [ 1 ] ; <nl> - metadata . spriteImage = spriteImage ; <nl> - metadata . spriteMetadata = spriteMetadata ; <nl> - callback ( metadata ) ; <nl> + if ( spriteImage & & ( spriteImage . height > MAX_SPRITE_IMAGE_SIZE_PX | | <nl> + spriteImage . width > MAX_SPRITE_IMAGE_SIZE_PX ) ) { <nl> + logging . setModalMessage ( ( " Error : Sprite image of dimensions " + spriteImage . width + " px x " ) + <nl> + ( spriteImage . height + " px exceeds maximum dimensions " ) + <nl> + ( MAX_SPRITE_IMAGE_SIZE_PX + " px x " + MAX_SPRITE_IMAGE_SIZE_PX + " px " ) ) ; <nl> + } <nl> + else { <nl> + metadata . spriteImage = spriteImage ; <nl> + metadata . spriteMetadata = spriteMetadata ; <nl> + callback ( metadata ) ; <nl> + } <nl> } ) ; <nl> } <nl> exports . retrieveSpriteAndMetadataInfo = retrieveSpriteAndMetadataInfo ; <nl> <nl> - } , { " . / data " : 6 , " . / logging " : 12 , " . / util " : 24 } ] , 6 : [ function ( require , module , exports ) { <nl> + } , { " . / data " : 6 , " . / logging " : 11 , " . / util " : 22 } ] , 6 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> " use strict " ; <nl> var bh_tsne_1 = require ( ' . / bh_tsne ' ) ; <nl> var knn = require ( ' . / knn ' ) ; <nl> - var util_1 = require ( ' . / util ' ) ; <nl> var logging = require ( ' . / logging ' ) ; <nl> + var util_1 = require ( ' . / util ' ) ; <nl> var vector = require ( ' . / vector ' ) ; <nl> / * * Checks to see if the browser supports webgl . * / <nl> function hasWebGLSupport ( ) { <nl> < h2 > Settings < / h2 > <nl> } <nl> return accessors ; <nl> } ; <nl> - DataSet . prototype . hasMeaningfulVisualization = function ( projection ) { <nl> + DataSet . prototype . projectionCanBeRendered = function ( projection ) { <nl> if ( projection ! = = ' tsne ' ) { <nl> return true ; <nl> } <nl> < h2 > Settings < / h2 > <nl> metadata : dp . metadata , <nl> index : dp . index , <nl> vector : dp . vector . slice ( ) , <nl> - projectedPoint : [ 0 , 0 , 0 ] , <nl> projections : { } <nl> } ; <nl> } ) ; <nl> < h2 > Settings < / h2 > <nl> vectors = vector . projectRandom ( vectors , exports . PCA_SAMPLE_DIM ) ; <nl> } <nl> var sigma = numeric . div ( numeric . dot ( numeric . transpose ( vectors ) , vectors ) , vectors . length ) ; <nl> - var U ; <nl> - U = numeric . svd ( sigma ) . U ; <nl> + var svd = numeric . svd ( sigma ) ; <nl> + var variances = svd . S ; <nl> + var totalVariance = 0 ; <nl> + for ( var i = 0 ; i < variances . length ; + + i ) { <nl> + totalVariance + = variances [ i ] ; <nl> + } <nl> + for ( var i = 0 ; i < variances . length ; + + i ) { <nl> + variances [ i ] / = totalVariance ; <nl> + } <nl> + _this . fracVariancesExplained = variances ; <nl> + var U = svd . U ; <nl> var pcaVectors = vectors . map ( function ( vector ) { <nl> var newV = [ ] ; <nl> for ( var d = 0 ; d < NUM_PCA_COMPONENTS ; d + + ) { <nl> < h2 > Settings < / h2 > <nl> } <nl> exports . stateGetAccessorDimensions = stateGetAccessorDimensions ; <nl> <nl> - } , { " . / bh_tsne " : 1 , " . / knn " : 10 , " . / logging " : 12 , " . / util " : 24 , " . / vector " : 25 } ] , 7 : [ function ( require , module , exports ) { <nl> + } , { " . / bh_tsne " : 1 , " . / knn " : 9 , " . / logging " : 11 , " . / util " : 22 , " . / vector " : 23 } ] , 7 : [ function ( require , module , exports ) { <nl> <nl> } , { } ] , 8 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> < h2 > Settings < / h2 > <nl> <nl> http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - " use strict " ; <nl> - <nl> - } , { } ] , 10 : [ function ( require , module , exports ) { <nl> - / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> Unless required by applicable law or agreed to in writing , software <nl> distributed under the License is distributed on an " AS IS " BASIS , <nl> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> < h2 > Settings < / h2 > <nl> } <nl> exports . findKNNofPoint = findKNNofPoint ; <nl> <nl> - } , { " . / heap " : 8 , " . / logging " : 12 , " . / util " : 24 , " . / vector " : 25 } ] , 11 : [ function ( require , module , exports ) { <nl> + } , { " . / heap " : 8 , " . / logging " : 11 , " . / util " : 22 , " . / vector " : 23 } ] , 10 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> } ( ) ) ; <nl> exports . CollisionGrid = CollisionGrid ; <nl> <nl> - } , { } ] , 12 : [ function ( require , module , exports ) { <nl> + } , { } ] , 11 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> " use strict " ; <nl> / * * Duration in ms for showing warning messages to the user * / <nl> - var WARNING_DURATION_MS = 5000 ; <nl> + var WARNING_DURATION_MS = 10000 ; <nl> / * * <nl> * Animation duration for the user message which should be + 20ms more than the <nl> * ` transition ` css property in ` . notify - msg ` in ` vz - projector . html ` . <nl> < h2 > Settings < / h2 > <nl> * Shows a warning message to the user for a certain amount of time . <nl> * / <nl> function setWarningMessage ( msg ) { <nl> - var warningMsg = dom . querySelector ( ' # warning - msg ' ) ; <nl> - var warningDiv = d3 . select ( warningMsg ) ; <nl> - warningDiv . style ( ' display ' , ' block ' ) . text ( ' Warning : ' + msg ) ; <nl> - / / Hide the warning message after a certain timeout . <nl> - setTimeout ( function ( ) { <nl> - warningDiv . style ( ' display ' , ' none ' ) ; <nl> - } , WARNING_DURATION_MS ) ; <nl> + var toast = dom . querySelector ( ' # toast ' ) ; <nl> + toast . text = msg ; <nl> + toast . duration = WARNING_DURATION_MS ; <nl> + toast . open ( ) ; <nl> } <nl> exports . setWarningMessage = setWarningMessage ; <nl> <nl> + } , { } ] , 12 : [ function ( require , module , exports ) { <nl> + / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + " use strict " ; <nl> + <nl> } , { } ] , 13 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> < h2 > Settings < / h2 > <nl> var TRACE_END_HUE = 360 ; <nl> var TRACE_SATURATION = 1 ; <nl> var TRACE_LIGHTNESS = . 3 ; <nl> + var TRACE_DEFAULT_OPACITY = . 2 ; <nl> + var TRACE_DEFAULT_LINEWIDTH = 2 ; <nl> + var TRACE_SELECTED_OPACITY = . 9 ; <nl> + var TRACE_SELECTED_LINEWIDTH = 3 ; <nl> + var TRACE_DESELECTED_OPACITY = . 05 ; <nl> + var SCATTER_PLOT_CUBE_LENGTH = 2 ; <nl> / * * <nl> * Interprets projector events and assembes the arrays and commands necessary <nl> * to use the ScatterPlot to render the current projected data set . <nl> < h2 > Settings < / h2 > <nl> var ProjectorScatterPlotAdapter = ( function ( ) { <nl> function ProjectorScatterPlotAdapter ( ) { <nl> } <nl> + ProjectorScatterPlotAdapter . prototype . generatePointPositionArray = function ( ds , pointAccessors ) { <nl> + if ( ds = = null ) { <nl> + return new Float32Array ( 0 ) ; <nl> + } <nl> + var xScaler = d3 . scale . linear ( ) ; <nl> + var yScaler = d3 . scale . linear ( ) ; <nl> + var zScaler = null ; <nl> + { <nl> + / / Determine max and min of each axis of our data . <nl> + var xExtent = d3 . extent ( ds . points , function ( p , i ) { return pointAccessors [ 0 ] ( i ) ; } ) ; <nl> + var yExtent = d3 . extent ( ds . points , function ( p , i ) { return pointAccessors [ 1 ] ( i ) ; } ) ; <nl> + var range = [ - SCATTER_PLOT_CUBE_LENGTH / 2 , SCATTER_PLOT_CUBE_LENGTH / 2 ] ; <nl> + xScaler . domain ( xExtent ) . range ( range ) ; <nl> + yScaler . domain ( yExtent ) . range ( range ) ; <nl> + if ( pointAccessors [ 2 ] ! = null ) { <nl> + var zExtent = d3 . extent ( ds . points , function ( p , i ) { return pointAccessors [ 2 ] ( i ) ; } ) ; <nl> + zScaler = d3 . scale . linear ( ) ; <nl> + zScaler . domain ( zExtent ) . range ( range ) ; <nl> + } <nl> + } <nl> + var positions = new Float32Array ( ds . points . length * 3 ) ; <nl> + var dst = 0 ; <nl> + ds . points . forEach ( function ( d , i ) { <nl> + positions [ dst + + ] = xScaler ( pointAccessors [ 0 ] ( i ) ) ; <nl> + positions [ dst + + ] = yScaler ( pointAccessors [ 1 ] ( i ) ) ; <nl> + positions [ dst + + ] = 0 . 0 ; <nl> + } ) ; <nl> + if ( zScaler ) { <nl> + dst = 2 ; <nl> + ds . points . forEach ( function ( d , i ) { <nl> + positions [ dst ] = zScaler ( pointAccessors [ 2 ] ( i ) ) ; <nl> + dst + = 3 ; <nl> + } ) ; <nl> + } <nl> + return positions ; <nl> + } ; <nl> ProjectorScatterPlotAdapter . prototype . generateVisibleLabelRenderParams = function ( ds , selectedPointIndices , neighborsOfFirstPoint , hoverPointIndex ) { <nl> if ( ds = = null ) { <nl> return null ; <nl> < h2 > Settings < / h2 > <nl> } <nl> return traceColorArrayMap ; <nl> } ; <nl> + ProjectorScatterPlotAdapter . prototype . generateLineSegmentOpacityArray = function ( ds , selectedPoints ) { <nl> + if ( ds = = null ) { <nl> + return new Float32Array ( 0 ) ; <nl> + } <nl> + var opacities = new Float32Array ( ds . traces . length ) ; <nl> + if ( selectedPoints . length > 0 ) { <nl> + opacities . fill ( TRACE_DESELECTED_OPACITY ) ; <nl> + var i = ds . points [ selectedPoints [ 0 ] ] . traceIndex ; <nl> + opacities [ i ] = TRACE_SELECTED_OPACITY ; <nl> + } <nl> + else { <nl> + opacities . fill ( TRACE_DEFAULT_OPACITY ) ; <nl> + } <nl> + return opacities ; <nl> + } ; <nl> + ProjectorScatterPlotAdapter . prototype . generateLineSegmentWidthArray = function ( ds , selectedPoints ) { <nl> + if ( ds = = null ) { <nl> + return new Float32Array ( 0 ) ; <nl> + } <nl> + var widths = new Float32Array ( ds . traces . length ) ; <nl> + widths . fill ( TRACE_DEFAULT_LINEWIDTH ) ; <nl> + if ( selectedPoints . length > 0 ) { <nl> + var i = ds . points [ selectedPoints [ 0 ] ] . traceIndex ; <nl> + widths [ i ] = TRACE_SELECTED_LINEWIDTH ; <nl> + } <nl> + return widths ; <nl> + } ; <nl> ProjectorScatterPlotAdapter . prototype . getDefaultPointInTraceColor = function ( index , totalPoints ) { <nl> var hue = TRACE_START_HUE + <nl> ( TRACE_END_HUE - TRACE_START_HUE ) * index / totalPoints ; <nl> < h2 > Settings < / h2 > <nl> return LabelRenderParams ; <nl> } ( ) ) ; <nl> exports . LabelRenderParams = LabelRenderParams ; <nl> + / * * Details about the camera projection being used to render the scene . * / <nl> + ( function ( CameraType ) { <nl> + CameraType [ CameraType [ " Perspective " ] = 0 ] = " Perspective " ; <nl> + CameraType [ CameraType [ " Orthographic " ] = 1 ] = " Orthographic " ; <nl> + } ) ( exports . CameraType | | ( exports . CameraType = { } ) ) ; <nl> + var CameraType = exports . CameraType ; <nl> / * * <nl> * RenderContext contains all of the state required to color and render the data <nl> * set . ScatterPlot passes this to every attached visualizer as part of the <nl> * render callback . <nl> * TODO ( nicholsonc ) : This should only contain the data that ' s changed between <nl> - * each frame . Data like colors / scale factors / labels should be recomputed <nl> + * each frame . Data like colors / scale factors / labels should be reapplied <nl> * only when they change . <nl> * / <nl> var RenderContext = ( function ( ) { <nl> - function RenderContext ( camera , cameraTarget , screenWidth , screenHeight , nearestCameraSpacePointZ , farthestCameraSpacePointZ , pointColors , pointScaleFactors , labelAccessor , labels , traceColors ) { <nl> + function RenderContext ( camera , cameraType , cameraTarget , screenWidth , screenHeight , nearestCameraSpacePointZ , farthestCameraSpacePointZ , backgroundColor , pointColors , pointScaleFactors , labelAccessor , labels , traceColors , traceOpacities , traceWidths ) { <nl> this . camera = camera ; <nl> + this . cameraType = cameraType ; <nl> this . cameraTarget = cameraTarget ; <nl> this . screenWidth = screenWidth ; <nl> this . screenHeight = screenHeight ; <nl> this . nearestCameraSpacePointZ = nearestCameraSpacePointZ ; <nl> this . farthestCameraSpacePointZ = farthestCameraSpacePointZ ; <nl> + this . backgroundColor = backgroundColor ; <nl> this . pointColors = pointColors ; <nl> this . pointScaleFactors = pointScaleFactors ; <nl> this . labelAccessor = labelAccessor ; <nl> this . labels = labels ; <nl> this . traceColors = traceColors ; <nl> + this . traceOpacities = traceOpacities ; <nl> + this . traceWidths = traceWidths ; <nl> } <nl> return RenderContext ; <nl> } ( ) ) ; <nl> < h2 > Settings < / h2 > <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> " use strict " ; <nl> var renderContext_1 = require ( ' . / renderContext ' ) ; <nl> - var scatterPlotVisualizerAxes_1 = require ( ' . / scatterPlotVisualizerAxes ' ) ; <nl> - var util_1 = require ( ' . / util ' ) ; <nl> + var util = require ( ' . / util ' ) ; <nl> var vector_1 = require ( ' . / vector ' ) ; <nl> var BACKGROUND_COLOR = 0xffffff ; <nl> / * * <nl> < h2 > Settings < / h2 > <nl> var CTRL_KEY = 17 ; <nl> var START_CAMERA_POS_3D = new THREE . Vector3 ( 0 . 45 , 0 . 9 , 1 . 6 ) ; <nl> var START_CAMERA_TARGET_3D = new THREE . Vector3 ( 0 , 0 , 0 ) ; <nl> - var START_CAMERA_POS_2D = new THREE . Vector3 ( 0 , 0 , 1 ) ; <nl> + var START_CAMERA_POS_2D = new THREE . Vector3 ( 0 , 0 , 4 ) ; <nl> var START_CAMERA_TARGET_2D = new THREE . Vector3 ( 0 , 0 , 0 ) ; <nl> var ORBIT_MOUSE_ROTATION_SPEED = 1 ; <nl> var ORBIT_ANIMATION_ROTATION_CYCLE_IN_SECONDS = 7 ; <nl> < h2 > Settings < / h2 > <nl> * array of visualizers and dispatches application events to them . <nl> * / <nl> var ScatterPlot = ( function ( ) { <nl> - function ScatterPlot ( container , labelAccessor , selectionContext , hoverContext ) { <nl> + function ScatterPlot ( container , labelAccessor , projectorEventContext ) { <nl> this . visualizers = [ ] ; <nl> this . onCameraMoveListeners = [ ] ; <nl> this . backgroundColor = BACKGROUND_COLOR ; <nl> < h2 > Settings < / h2 > <nl> this . mouseIsDown = false ; <nl> this . isDragSequence = false ; <nl> this . containerNode = container . node ( ) ; <nl> - this . selectionContext = selectionContext ; <nl> - this . hoverContext = hoverContext ; <nl> + this . projectorEventContext = projectorEventContext ; <nl> this . getLayoutValues ( ) ; <nl> this . labelAccessor = labelAccessor ; <nl> - this . xScale = d3 . scale . linear ( ) ; <nl> - this . yScale = d3 . scale . linear ( ) ; <nl> - this . zScale = d3 . scale . linear ( ) ; <nl> this . scene = new THREE . Scene ( ) ; <nl> this . renderer = <nl> new THREE . WebGLRenderer ( { alpha : true , premultipliedAlpha : false } ) ; <nl> < h2 > Settings < / h2 > <nl> this . setDimensions ( 3 ) ; <nl> this . recreateCamera ( this . makeDefaultCameraDef ( this . dimensionality ) ) ; <nl> this . renderer . render ( this . scene , this . camera ) ; <nl> - this . addAxesToScene ( ) ; <nl> this . addInteractionListeners ( ) ; <nl> } <nl> ScatterPlot . prototype . addInteractionListeners = function ( ) { <nl> < h2 > Settings < / h2 > <nl> / / controls ( e . g . on mouse up , after dragging ) . <nl> cameraControls . addEventListener ( ' end ' , function ( ) { } ) ; <nl> } ; <nl> + ScatterPlot . prototype . makeOrbitControls = function ( camera , cameraDef , cameraIs3D ) { <nl> + if ( this . orbitCameraControls ! = null ) { <nl> + this . orbitCameraControls . dispose ( ) ; <nl> + } <nl> + var occ = new THREE . OrbitControls ( camera , this . renderer . domElement ) ; <nl> + occ . target0 = new THREE . Vector3 ( cameraDef . target [ 0 ] , cameraDef . target [ 1 ] , cameraDef . target [ 2 ] ) ; <nl> + occ . position0 = new THREE . Vector3 ( ) . copy ( camera . position ) ; <nl> + occ . zoom0 = cameraDef . zoom ; <nl> + occ . enableRotate = cameraIs3D ; <nl> + occ . autoRotate = false ; <nl> + occ . rotateSpeed = ORBIT_MOUSE_ROTATION_SPEED ; <nl> + if ( cameraIs3D ) { <nl> + occ . mouseButtons . ORBIT = THREE . MOUSE . LEFT ; <nl> + occ . mouseButtons . PAN = THREE . MOUSE . RIGHT ; <nl> + } <nl> + else { <nl> + occ . mouseButtons . ORBIT = null ; <nl> + occ . mouseButtons . PAN = THREE . MOUSE . LEFT ; <nl> + } <nl> + occ . reset ( ) ; <nl> + this . camera = camera ; <nl> + this . orbitCameraControls = occ ; <nl> + this . addCameraControlsEventListeners ( this . orbitCameraControls ) ; <nl> + } ; <nl> ScatterPlot . prototype . makeCamera3D = function ( cameraDef , w , h ) { <nl> var camera ; <nl> { <nl> < h2 > Settings < / h2 > <nl> var at = new THREE . Vector3 ( cameraDef . target [ 0 ] , cameraDef . target [ 1 ] , cameraDef . target [ 2 ] ) ; <nl> camera . lookAt ( at ) ; <nl> camera . zoom = cameraDef . zoom ; <nl> - } <nl> - var occ = new THREE . OrbitControls ( camera , this . renderer . domElement ) ; <nl> - occ . enableRotate = true ; <nl> - occ . rotateSpeed = ORBIT_MOUSE_ROTATION_SPEED ; <nl> - occ . mouseButtons . ORBIT = THREE . MOUSE . LEFT ; <nl> - occ . mouseButtons . PAN = THREE . MOUSE . RIGHT ; <nl> - if ( this . orbitCameraControls ! = null ) { <nl> - this . orbitCameraControls . dispose ( ) ; <nl> + camera . updateProjectionMatrix ( ) ; <nl> } <nl> this . camera = camera ; <nl> - this . orbitCameraControls = occ ; <nl> - this . addCameraControlsEventListeners ( this . orbitCameraControls ) ; <nl> + this . makeOrbitControls ( camera , cameraDef , true ) ; <nl> } ; <nl> ScatterPlot . prototype . makeCamera2D = function ( cameraDef , w , h ) { <nl> var camera ; <nl> < h2 > Settings < / h2 > <nl> camera . up = new THREE . Vector3 ( 0 , 1 , 0 ) ; <nl> camera . lookAt ( target ) ; <nl> camera . zoom = cameraDef . zoom ; <nl> - } <nl> - var occ = new THREE . OrbitControls ( camera , this . renderer . domElement ) ; <nl> - occ . target = target ; <nl> - occ . enableRotate = false ; <nl> - occ . enableDamping = false ; <nl> - occ . autoRotate = false ; <nl> - occ . mouseButtons . ORBIT = null ; <nl> - occ . mouseButtons . PAN = THREE . MOUSE . LEFT ; <nl> - if ( this . orbitCameraControls ! = null ) { <nl> - this . orbitCameraControls . dispose ( ) ; <nl> + camera . updateProjectionMatrix ( ) ; <nl> } <nl> this . camera = camera ; <nl> - this . orbitCameraControls = occ ; <nl> - this . addCameraControlsEventListeners ( occ ) ; <nl> + this . makeOrbitControls ( camera , cameraDef , false ) ; <nl> } ; <nl> ScatterPlot . prototype . makeDefaultCameraDef = function ( dimensionality ) { <nl> var def = new CameraDef ( ) ; <nl> < h2 > Settings < / h2 > <nl> } <nl> / / Only call event handlers if the click originated from the scatter plot . <nl> if ( ! this . isDragSequence & & notify ) { <nl> - var selection = this . nearestPoint ? [ this . nearestPoint ] : [ ] ; <nl> - this . selectionContext . notifySelectionChanged ( selection ) ; <nl> + var selection = ( this . nearestPoint ! = null ) ? [ this . nearestPoint ] : [ ] ; <nl> + this . projectorEventContext . notifySelectionChanged ( selection ) ; <nl> } <nl> this . isDragSequence = false ; <nl> this . render ( ) ; <nl> < h2 > Settings < / h2 > <nl> } <nl> else if ( ! this . mouseIsDown ) { <nl> this . setNearestPointToMouse ( e ) ; <nl> - this . hoverContext . notifyHoverOverPoint ( this . nearestPoint ) ; <nl> + this . projectorEventContext . notifyHoverOverPoint ( this . nearestPoint ) ; <nl> } <nl> } ; <nl> / * * For using ctrl + left click as right click , and for circle select * / <nl> < h2 > Settings < / h2 > <nl> } ; <nl> / * * Returns the squared distance to the mouse for the i - th point . * / <nl> ScatterPlot . prototype . getDist2ToMouse = function ( i , e ) { <nl> - var point = util_1 . getProjectedPointFromIndex ( this . dataSet , i ) ; <nl> - var screenCoords = util_1 . vector3DToScreenCoords ( this . camera , this . width , this . height , point ) ; <nl> + var p = util . vector3FromPackedArray ( this . worldSpacePointPositions , i ) ; <nl> + var screenCoords = util . vector3DToScreenCoords ( this . camera , this . width , this . height , p ) ; <nl> var dpr = window . devicePixelRatio | | 1 ; <nl> return vector_1 . dist_2D ( [ e . offsetX * dpr , e . offsetY * dpr ] , [ screenCoords [ 0 ] , screenCoords [ 1 ] ] ) ; <nl> } ; <nl> ScatterPlot . prototype . adjustSelectionSphere = function ( e ) { <nl> - var _this = this ; <nl> var dist = this . getDist2ToMouse ( this . nearestPoint , e ) / 100 ; <nl> this . selectionSphere . scale . set ( dist , dist , dist ) ; <nl> var selectedPoints = [ ] ; <nl> - this . dataSet . points . forEach ( function ( point ) { <nl> - var pt = point . projectedPoint ; <nl> - var pointVect = new THREE . Vector3 ( pt [ 0 ] , pt [ 1 ] , pt [ 2 ] ) ; <nl> - var distPointToSphereOrigin = _this . selectionSphere . position . clone ( ) . sub ( pointVect ) . length ( ) ; <nl> + var n = this . worldSpacePointPositions . length ; <nl> + for ( var i = 0 ; i < n ; + + i ) { <nl> + var p = util . vector3FromPackedArray ( this . worldSpacePointPositions , i ) ; <nl> + var distPointToSphereOrigin = this . selectionSphere . position . clone ( ) . sub ( p ) . length ( ) ; <nl> if ( distPointToSphereOrigin < dist ) { <nl> - selectedPoints . push ( _this . dataSet . points . indexOf ( point ) ) ; <nl> + selectedPoints . push ( i ) ; <nl> } <nl> - } ) ; <nl> - this . selectionContext . notifySelectionChanged ( selectedPoints ) ; <nl> - } ; <nl> - ScatterPlot . prototype . removeAll = function ( ) { <nl> - var _this = this ; <nl> - this . visualizers . forEach ( function ( v ) { <nl> - v . removeAllFromScene ( _this . scene ) ; <nl> - } ) ; <nl> + } <nl> + this . projectorEventContext . notifySelectionChanged ( selectedPoints ) ; <nl> } ; <nl> ScatterPlot . prototype . createSelectionSphere = function ( ) { <nl> var geometry = new THREE . SphereGeometry ( 1 , 300 , 100 ) ; <nl> < h2 > Settings < / h2 > <nl> } ) ; <nl> this . selectionSphere = new THREE . Mesh ( geometry , material ) ; <nl> this . selectionSphere . scale . set ( 0 , 0 , 0 ) ; <nl> - var pos = this . dataSet . points [ this . nearestPoint ] . projectedPoint ; <nl> + var p = util . vector3FromPackedArray ( this . worldSpacePointPositions , this . nearestPoint ) ; <nl> this . scene . add ( this . selectionSphere ) ; <nl> - this . selectionSphere . position . set ( pos [ 0 ] , pos [ 1 ] , pos [ 2 ] ) ; <nl> + this . selectionSphere . position . copy ( p ) ; <nl> } ; <nl> ScatterPlot . prototype . getLayoutValues = function ( ) { <nl> this . width = this . containerNode . offsetWidth ; <nl> this . height = Math . max ( 1 , this . containerNode . offsetHeight ) ; <nl> return [ this . width , this . height ] ; <nl> } ; <nl> - / * * <nl> - * Returns an x , y , z value for each item of our data based on the accessor <nl> - * methods . <nl> - * / <nl> - ScatterPlot . prototype . getPointsCoordinates = function ( ) { <nl> - var _this = this ; <nl> - var xAccessor = this . pointAccessors [ 0 ] ; <nl> - var yAccessor = this . pointAccessors [ 1 ] ; <nl> - var zAccessor = this . pointAccessors [ 2 ] ; <nl> - / / Determine max and min of each axis of our data . <nl> - var xExtent = d3 . extent ( this . dataSet . points , function ( p , i ) { return xAccessor ( i ) ; } ) ; <nl> - var yExtent = d3 . extent ( this . dataSet . points , function ( p , i ) { return yAccessor ( i ) ; } ) ; <nl> - var range = [ - CUBE_LENGTH / 2 , CUBE_LENGTH / 2 ] ; <nl> - this . xScale . domain ( xExtent ) . range ( range ) ; <nl> - this . yScale . domain ( yExtent ) . range ( range ) ; <nl> - if ( zAccessor ) { <nl> - var zExtent = d3 . extent ( this . dataSet . points , function ( p , i ) { return zAccessor ( i ) ; } ) ; <nl> - this . zScale . domain ( zExtent ) . range ( range ) ; <nl> - } <nl> - / / Determine 3d coordinates of each data point . <nl> - this . dataSet . points . forEach ( function ( d , i ) { <nl> - d . projectedPoint [ 0 ] = _this . xScale ( xAccessor ( i ) ) ; <nl> - d . projectedPoint [ 1 ] = _this . yScale ( yAccessor ( i ) ) ; <nl> - } ) ; <nl> - if ( zAccessor ) { <nl> - this . dataSet . points . forEach ( function ( d , i ) { <nl> - d . projectedPoint [ 2 ] = _this . zScale ( zAccessor ( i ) ) ; <nl> - } ) ; <nl> - } <nl> - else { <nl> - this . dataSet . points . forEach ( function ( d , i ) { <nl> - d . projectedPoint [ 2 ] = 0 ; <nl> - } ) ; <nl> - } <nl> - } ; <nl> - ScatterPlot . prototype . addAxesToScene = function ( ) { <nl> - this . addVisualizer ( new scatterPlotVisualizerAxes_1 . ScatterPlotVisualizerAxes ( ) ) ; <nl> - } ; <nl> ScatterPlot . prototype . sceneIs3D = function ( ) { <nl> return this . dimensionality = = = 3 ; <nl> } ; <nl> + ScatterPlot . prototype . remove3dAxis = function ( ) { <nl> + var axes = this . scene . getObjectByName ( ' axes ' ) ; <nl> + if ( axes ! = null ) { <nl> + this . scene . remove ( axes ) ; <nl> + } <nl> + } ; <nl> + ScatterPlot . prototype . add3dAxis = function ( ) { <nl> + var axes = new THREE . AxisHelper ( ) ; <nl> + axes . name = ' axes ' ; <nl> + this . scene . add ( axes ) ; <nl> + } ; <nl> / * * Set 2d vs 3d mode . * / <nl> ScatterPlot . prototype . setDimensions = function ( dimensionality ) { <nl> if ( ( dimensionality ! = = 2 ) & & ( dimensionality ! = = 3 ) ) { <nl> < h2 > Settings < / h2 > <nl> this . dimensionality = dimensionality ; <nl> var def = this . cameraDef | | this . makeDefaultCameraDef ( dimensionality ) ; <nl> this . recreateCamera ( def ) ; <nl> + this . remove3dAxis ( ) ; <nl> + if ( dimensionality = = = 3 ) { <nl> + this . add3dAxis ( ) ; <nl> + } <nl> } ; <nl> / * * Gets the current camera information , suitable for serialization . * / <nl> ScatterPlot . prototype . getCameraDef = function ( ) { <nl> < h2 > Settings < / h2 > <nl> } ; <nl> / * * Adds a visualizer to the set , will start dispatching events to it * / <nl> ScatterPlot . prototype . addVisualizer = function ( visualizer ) { <nl> - this . visualizers . push ( visualizer ) ; <nl> - if ( this . dataSet ) { <nl> - visualizer . onDataSet ( this . dataSet ) ; <nl> + if ( this . scene ) { <nl> + visualizer . setScene ( this . scene ) ; <nl> } <nl> if ( this . labelAccessor ) { <nl> visualizer . onSetLabelAccessor ( this . labelAccessor ) ; <nl> } <nl> - if ( this . scene ) { <nl> - visualizer . onRecreateScene ( this . scene , this . sceneIs3D ( ) , this . backgroundColor ) ; <nl> + visualizer . onResize ( this . width , this . height ) ; <nl> + if ( this . dataSet ) { <nl> + visualizer . onPointPositionsChanged ( this . worldSpacePointPositions , this . dataSet ) ; <nl> } <nl> + this . visualizers . push ( visualizer ) ; <nl> } ; <nl> / * * Removes all visualizers attached to this scatter plot . * / <nl> ScatterPlot . prototype . removeAllVisualizers = function ( ) { <nl> - this . removeAll ( ) ; <nl> + this . visualizers . forEach ( function ( v ) { return v . dispose ( ) ; } ) ; <nl> this . visualizers = [ ] ; <nl> - this . addAxesToScene ( ) ; <nl> } ; <nl> - ScatterPlot . prototype . recreateScene = function ( ) { <nl> + / * * Update scatter plot with a new array of packed xyz point positions . * / <nl> + ScatterPlot . prototype . setPointPositions = function ( dataSet , worldSpacePointPositions ) { <nl> var _this = this ; <nl> - this . removeAll ( ) ; <nl> - this . visualizers . forEach ( function ( v ) { <nl> - v . onRecreateScene ( _this . scene , _this . sceneIs3D ( ) , _this . backgroundColor ) ; <nl> - } ) ; <nl> - this . resize ( false ) ; <nl> - this . render ( ) ; <nl> - } ; <nl> - / * * Sets the data for the scatter plot . * / <nl> - ScatterPlot . prototype . setDataSet = function ( dataSet ) { <nl> - this . removeAll ( ) ; <nl> this . dataSet = dataSet ; <nl> - this . nearestPoint = null ; <nl> - this . visualizers . forEach ( function ( v ) { <nl> - v . onDataSet ( dataSet ) ; <nl> - } ) ; <nl> - this . render ( ) ; <nl> - } ; <nl> - ScatterPlot . prototype . update = function ( ) { <nl> - var _this = this ; <nl> - this . getPointsCoordinates ( ) ; <nl> + this . worldSpacePointPositions = worldSpacePointPositions ; <nl> this . visualizers . forEach ( function ( v ) { <nl> - v . onUpdate ( _this . dataSet ) ; <nl> + v . onPointPositionsChanged ( worldSpacePointPositions , _this . dataSet ) ; <nl> } ) ; <nl> - this . render ( ) ; <nl> } ; <nl> ScatterPlot . prototype . render = function ( ) { <nl> - if ( ! this . dataSet ) { <nl> + if ( this . dataSet = = null ) { <nl> return ; <nl> } <nl> - / / place the light near the camera <nl> { <nl> var lightPos = this . camera . position . clone ( ) ; <nl> lightPos . x + = 1 ; <nl> lightPos . y + = 1 ; <nl> this . light . position . set ( lightPos . x , lightPos . y , lightPos . z ) ; <nl> } <nl> - var cameraSpacePointExtents = util_1 . getNearFarPoints ( this . dataSet , this . camera . position , this . orbitCameraControls . target ) ; <nl> - var rc = new renderContext_1 . RenderContext ( this . camera , this . orbitCameraControls . target , this . width , this . height , cameraSpacePointExtents [ 0 ] , cameraSpacePointExtents [ 1 ] , this . pointColors , this . pointScaleFactors , this . labelAccessor , this . labels , this . traceColors ) ; <nl> + var cameraType = ( this . camera instanceof THREE . PerspectiveCamera ) ? <nl> + renderContext_1 . CameraType . Perspective : <nl> + renderContext_1 . CameraType . Orthographic ; <nl> + var cameraSpacePointExtents = util . getNearFarPoints ( this . worldSpacePointPositions , this . camera . position , this . orbitCameraControls . target ) ; <nl> + var rc = new renderContext_1 . RenderContext ( this . camera , cameraType , this . orbitCameraControls . target , this . width , this . height , cameraSpacePointExtents [ 0 ] , cameraSpacePointExtents [ 1 ] , this . backgroundColor , this . pointColors , this . pointScaleFactors , this . labelAccessor , this . labels , this . traceColors , this . traceOpacities , this . traceWidths ) ; <nl> / / Render first pass to picking target . This render fills pickingTexture <nl> / / with colors that are actually point ids , so that sampling the texture at <nl> / / the mouse ' s current x , y coordinates will reveal the data point that the <nl> < h2 > Settings < / h2 > <nl> } ) ; <nl> this . renderer . render ( this . scene , this . camera ) ; <nl> } ; <nl> - ScatterPlot . prototype . setPointAccessors = function ( pointAccessors ) { <nl> - this . pointAccessors = pointAccessors ; <nl> - } ; <nl> ScatterPlot . prototype . setLabelAccessor = function ( labelAccessor ) { <nl> this . labelAccessor = labelAccessor ; <nl> this . visualizers . forEach ( function ( v ) { <nl> < h2 > Settings < / h2 > <nl> ScatterPlot . prototype . setTraceColors = function ( colors ) { <nl> this . traceColors = colors ; <nl> } ; <nl> - ScatterPlot . prototype . getMode = function ( ) { return this . mode ; } ; <nl> + ScatterPlot . prototype . setTraceOpacities = function ( opacities ) { <nl> + this . traceOpacities = opacities ; <nl> + } ; <nl> + ScatterPlot . prototype . setTraceWidths = function ( widths ) { <nl> + this . traceWidths = widths ; <nl> + } ; <nl> + ScatterPlot . prototype . getMode = function ( ) { <nl> + return this . mode ; <nl> + } ; <nl> ScatterPlot . prototype . resetZoom = function ( ) { <nl> this . recreateCamera ( this . makeDefaultCameraDef ( this . dimensionality ) ) ; <nl> this . render ( ) ; <nl> < h2 > Settings < / h2 > <nl> . selectAll ( ' canvas ' ) <nl> . style ( ' filter ' , isNight ? ' invert ( 100 % ) ' : null ) ; <nl> } ; <nl> - ScatterPlot . prototype . showAxes = function ( show ) { } ; <nl> - ScatterPlot . prototype . showTickLabels = function ( show ) { } ; <nl> ScatterPlot . prototype . resize = function ( render ) { <nl> if ( render = = = void 0 ) { render = true ; } <nl> var _a = [ this . width , this . height ] , oldW = _a [ 0 ] , oldH = _a [ 1 ] ; <nl> < h2 > Settings < / h2 > <nl> } ( ) ) ; <nl> exports . ScatterPlot = ScatterPlot ; <nl> <nl> - } , { " . / renderContext " : 14 , " . / scatterPlotVisualizerAxes " : 18 , " . / util " : 24 , " . / vector " : 25 } ] , 16 : [ function ( require , module , exports ) { <nl> - arguments [ 4 ] [ 9 ] [ 0 ] . apply ( exports , arguments ) <nl> - } , { " dup " : 9 } ] , 17 : [ function ( require , module , exports ) { <nl> + } , { " . / renderContext " : 14 , " . / util " : 22 , " . / vector " : 23 } ] , 16 : [ function ( require , module , exports ) { <nl> + arguments [ 4 ] [ 12 ] [ 0 ] . apply ( exports , arguments ) <nl> + } , { " dup " : 12 } ] , 17 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> " use strict " ; <nl> - var util_1 = require ( ' . / util ' ) ; <nl> + var util = require ( ' . / util ' ) ; <nl> var FONT_SIZE = 80 ; <nl> var ONE_OVER_FONT_SIZE = 1 / FONT_SIZE ; <nl> var LABEL_SCALE = 2 . 2 ; / / at 1 : 1 texel / pixel ratio <nl> < h2 > Settings < / h2 > <nl> * / <nl> var ScatterPlotVisualizer3DLabels = ( function ( ) { <nl> function ScatterPlotVisualizer3DLabels ( ) { <nl> - this . glyphTexture = this . createGlyphTexture ( ) ; <nl> - this . uniforms = { <nl> - texture : { type : ' t ' , value : this . glyphTexture . texture } , <nl> - picking : { type : ' bool ' , value : false } , <nl> - } ; <nl> - this . material = new THREE . ShaderMaterial ( { <nl> - uniforms : this . uniforms , <nl> - transparent : true , <nl> - vertexShader : VERTEX_SHADER , <nl> - fragmentShader : FRAGMENT_SHADER , <nl> - } ) ; <nl> } <nl> ScatterPlotVisualizer3DLabels . prototype . createGlyphTexture = function ( ) { <nl> - if ( this . glyphTexture ) { <nl> - this . glyphTexture . texture . dispose ( ) ; <nl> - } <nl> var canvas = document . createElement ( ' canvas ' ) ; <nl> canvas . width = MAX_CANVAS_DIMENSION ; <nl> canvas . height = FONT_SIZE ; <nl> < h2 > Settings < / h2 > <nl> ctx . fillText ( text , leftCoord - spaceOffset , 0 ) ; <nl> leftCoord + = textLength ; <nl> } <nl> - var tex = util_1 . createTexture ( canvas ) ; <nl> + var tex = util . createTexture ( canvas ) ; <nl> return { texture : tex , lengths : glyphLengths , offsets : glyphOffset } ; <nl> } ; <nl> ScatterPlotVisualizer3DLabels . prototype . processLabelVerts = function ( ) { <nl> < h2 > Settings < / h2 > <nl> _loop_1 ( i ) ; <nl> } <nl> } ; <nl> - ScatterPlotVisualizer3DLabels . prototype . createLabelGeometry = function ( dataSet ) { <nl> + ScatterPlotVisualizer3DLabels . prototype . createLabels = function ( dataSet ) { <nl> var _this = this ; <nl> + this . glyphTexture = this . createGlyphTexture ( ) ; <nl> + this . uniforms = { <nl> + texture : { type : ' t ' } , <nl> + picking : { type : ' bool ' } , <nl> + } ; <nl> + this . material = new THREE . ShaderMaterial ( { <nl> + uniforms : this . uniforms , <nl> + transparent : true , <nl> + vertexShader : VERTEX_SHADER , <nl> + fragmentShader : FRAGMENT_SHADER , <nl> + } ) ; <nl> this . processLabelVerts ( ) ; <nl> this . createColorBuffers ( ) ; <nl> var positionArray = new Float32Array ( this . totalVertexCount * XYZ_ELEMENTS_PER_ENTRY ) ; <nl> < h2 > Settings < / h2 > <nl> leftOffset + = letterWidth ; <nl> } <nl> } <nl> + var n = dataSet . points . length ; <nl> var _loop_2 = function ( i ) { <nl> - var pp = dataSet . points [ i ] . projectedPoint ; <nl> + var p = util . vector3FromPackedArray ( this_2 . worldSpacePointPositions , i ) ; <nl> this_2 . labelVertexMap [ i ] . forEach ( function ( j ) { <nl> - _this . positions . setXYZ ( j , pp [ 0 ] , pp [ 1 ] , pp [ 2 ] ) ; <nl> + _this . positions . setXYZ ( j , p . x , p . y , p . z ) ; <nl> } ) ; <nl> } ; <nl> var this_2 = this ; <nl> - for ( var i = 0 ; i < dataSet . points . length ; i + + ) { <nl> + for ( var i = 0 ; i < n ; i + + ) { <nl> _loop_2 ( i ) ; <nl> } <nl> ; <nl> this . labelsMesh = new THREE . Mesh ( this . geometry , this . material ) ; <nl> + this . labelsMesh . frustumCulled = false ; <nl> + this . scene . add ( this . labelsMesh ) ; <nl> } ; <nl> - ScatterPlotVisualizer3DLabels . prototype . destroyLabels = function ( ) { <nl> - if ( this . labelsMesh ) { <nl> - if ( this . scene ) { <nl> - this . scene . remove ( this . labelsMesh ) ; <nl> - } <nl> - this . geometry . dispose ( ) ; <nl> - this . labelsMesh = null ; <nl> - } <nl> - } ; <nl> - ScatterPlotVisualizer3DLabels . prototype . createLabels = function ( dataSet ) { <nl> - this . destroyLabels ( ) ; <nl> - if ( this . labelAccessor ) { <nl> - this . createLabelGeometry ( dataSet ) ; <nl> - } <nl> - } ; <nl> - ScatterPlotVisualizer3DLabels . prototype . colorSprites = function ( pointColors ) { <nl> + ScatterPlotVisualizer3DLabels . prototype . colorLabels = function ( pointColors ) { <nl> if ( this . labelAccessor = = null | | this . geometry = = null | | <nl> this . dataSet = = null | | pointColors = = null ) { <nl> return ; <nl> < h2 > Settings < / h2 > <nl> colors . array = this . renderColors ; <nl> var n = this . dataSet . points . length ; <nl> var src = 0 ; <nl> - var _loop_3 = function ( i ) { <nl> - var c = new THREE . Color ( pointColors [ src + + ] , pointColors [ src + + ] , pointColors [ src + + ] ) ; <nl> - this_3 . labelVertexMap [ i ] . forEach ( function ( j ) { <nl> - colors . setXYZ ( j , c . r , c . g , c . b ) ; <nl> - } ) ; <nl> - } ; <nl> - var this_3 = this ; <nl> for ( var i = 0 ; i < n ; + + i ) { <nl> - _loop_3 ( i ) ; <nl> + var c = new THREE . Color ( pointColors [ src ] , pointColors [ src + 1 ] , pointColors [ src + 2 ] ) ; <nl> + var m = this . labelVertexMap [ i ] . length ; <nl> + for ( var j = 0 ; j < m ; + + j ) { <nl> + colors . setXYZ ( this . labelVertexMap [ i ] [ j ] , c . r , c . g , c . b ) ; <nl> + } <nl> + src + = RGB_ELEMENTS_PER_ENTRY ; <nl> } <nl> colors . needsUpdate = true ; <nl> } ; <nl> - ScatterPlotVisualizer3DLabels . prototype . onRecreateScene = function ( scene , sceneIs3D , backgroundColor ) { <nl> + ScatterPlotVisualizer3DLabels . prototype . setScene = function ( scene ) { <nl> this . scene = scene ; <nl> - if ( this . labelsMesh = = null ) { <nl> - this . createLabels ( this . dataSet ) ; <nl> - } <nl> + } ; <nl> + ScatterPlotVisualizer3DLabels . prototype . dispose = function ( ) { <nl> if ( this . labelsMesh ) { <nl> - scene . add ( this . labelsMesh ) ; <nl> + if ( this . scene ) { <nl> + this . scene . remove ( this . labelsMesh ) ; <nl> + } <nl> + this . labelsMesh = null ; <nl> + } <nl> + if ( this . geometry ) { <nl> + this . geometry . dispose ( ) ; <nl> + this . geometry = null ; <nl> + } <nl> + if ( ( this . glyphTexture ! = null ) & & ( this . glyphTexture . texture ! = null ) ) { <nl> + this . glyphTexture . texture . dispose ( ) ; <nl> + this . glyphTexture . texture = null ; <nl> } <nl> - } ; <nl> - ScatterPlotVisualizer3DLabels . prototype . removeAllFromScene = function ( scene ) { <nl> - this . destroyLabels ( ) ; <nl> } ; <nl> ScatterPlotVisualizer3DLabels . prototype . onSetLabelAccessor = function ( labelAccessor ) { <nl> this . labelAccessor = labelAccessor ; <nl> - this . onUpdate ( this . dataSet ) ; <nl> - } ; <nl> - ScatterPlotVisualizer3DLabels . prototype . onDataSet = function ( dataSet ) { <nl> - this . dataSet = dataSet ; <nl> - this . labelAccessor = null ; <nl> + this . dispose ( ) ; <nl> + this . onPointPositionsChanged ( this . worldSpacePointPositions , this . dataSet ) ; <nl> } ; <nl> ScatterPlotVisualizer3DLabels . prototype . onPickingRender = function ( rc ) { <nl> this . material . uniforms . texture . value = this . glyphTexture . texture ; <nl> < h2 > Settings < / h2 > <nl> colors . needsUpdate = true ; <nl> } ; <nl> ScatterPlotVisualizer3DLabels . prototype . onRender = function ( rc ) { <nl> - this . colorSprites ( rc . pointColors ) ; <nl> + this . colorLabels ( rc . pointColors ) ; <nl> this . material . uniforms . texture . value = this . glyphTexture . texture ; <nl> this . material . uniforms . picking . value = false ; <nl> var colors = this . geometry . getAttribute ( ' color ' ) ; <nl> colors . array = this . renderColors ; <nl> colors . needsUpdate = true ; <nl> } ; <nl> - ScatterPlotVisualizer3DLabels . prototype . onUpdate = function ( dataSet ) { <nl> - this . createLabels ( dataSet ) ; <nl> - if ( this . labelsMesh & & this . scene ) { <nl> - this . scene . add ( this . labelsMesh ) ; <nl> + ScatterPlotVisualizer3DLabels . prototype . onPointPositionsChanged = function ( newPositions , dataSet ) { <nl> + this . worldSpacePointPositions = newPositions ; <nl> + this . dataSet = dataSet ; <nl> + this . dispose ( ) ; <nl> + if ( ( this . dataSet ! = null ) & & ( this . labelAccessor ! = null ) & & <nl> + ( this . worldSpacePointPositions ! = null ) ) { <nl> + this . createLabels ( this . dataSet ) ; <nl> } <nl> } ; <nl> ScatterPlotVisualizer3DLabels . prototype . onResize = function ( newWidth , newHeight ) { } ; <nl> < h2 > Settings < / h2 > <nl> } ( ) ) ; <nl> exports . ScatterPlotVisualizer3DLabels = ScatterPlotVisualizer3DLabels ; <nl> <nl> - } , { " . / util " : 24 } ] , 18 : [ function ( require , module , exports ) { <nl> - / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - " use strict " ; <nl> - / * * <nl> - * Maintains and renders 3d axes for the scatter plot . <nl> - * / <nl> - var ScatterPlotVisualizerAxes = ( function ( ) { <nl> - function ScatterPlotVisualizerAxes ( ) { <nl> - this . axis = new THREE . AxisHelper ( ) ; <nl> - } <nl> - ScatterPlotVisualizerAxes . prototype . onDataSet = function ( dataSet ) { } ; <nl> - ScatterPlotVisualizerAxes . prototype . onRecreateScene = function ( scene , sceneIs3D , backgroundColor ) { <nl> - if ( sceneIs3D ) { <nl> - scene . add ( this . axis ) ; <nl> - } <nl> - } ; <nl> - ScatterPlotVisualizerAxes . prototype . removeAllFromScene = function ( scene ) { <nl> - scene . remove ( this . axis ) ; <nl> - } ; <nl> - ScatterPlotVisualizerAxes . prototype . onPickingRender = function ( renderContext ) { } ; <nl> - ScatterPlotVisualizerAxes . prototype . onRender = function ( renderContext ) { } ; <nl> - ScatterPlotVisualizerAxes . prototype . onUpdate = function ( dataSet ) { } ; <nl> - ScatterPlotVisualizerAxes . prototype . onResize = function ( newWidth , newHeight ) { } ; <nl> - ScatterPlotVisualizerAxes . prototype . onSetLabelAccessor = function ( labelAccessor ) { } ; <nl> - return ScatterPlotVisualizerAxes ; <nl> - } ( ) ) ; <nl> - exports . ScatterPlotVisualizerAxes = ScatterPlotVisualizerAxes ; <nl> - <nl> - } , { } ] , 19 : [ function ( require , module , exports ) { <nl> + } , { " . / util " : 22 } ] , 18 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> " use strict " ; <nl> var label_1 = require ( ' . / label ' ) ; <nl> - var util_1 = require ( ' . / util ' ) ; <nl> + var renderContext_1 = require ( ' . / renderContext ' ) ; <nl> + var util = require ( ' . / util ' ) ; <nl> var MAX_LABELS_ON_SCREEN = 10000 ; <nl> / * * <nl> * Creates and maintains a 2d canvas on top of the GL canvas . All labels , when <nl> < h2 > Settings < / h2 > <nl> var ScatterPlotVisualizerCanvasLabels = ( function ( ) { <nl> function ScatterPlotVisualizerCanvasLabels ( container ) { <nl> this . labelsActive = true ; <nl> - this . sceneIs3D = true ; <nl> this . canvas = container . append ( ' canvas ' ) . node ( ) ; <nl> this . gc = this . canvas . getContext ( ' 2d ' ) ; <nl> d3 . select ( this . canvas ) . style ( { position : ' absolute ' , left : 0 , top : 0 } ) ; <nl> < h2 > Settings < / h2 > <nl> if ( ( rc . labels = = null ) | | ( rc . labels . pointIndices . length = = = 0 ) ) { <nl> return ; <nl> } <nl> + var sceneIs3D = ( rc . cameraType = = = renderContext_1 . CameraType . Perspective ) ; <nl> var strokeStylePrefix ; <nl> var fillStylePrefix ; <nl> { <nl> < h2 > Settings < / h2 > <nl> . range ( [ 0 . 1 , 1 ] ) ; <nl> var camPos = rc . camera . position ; <nl> var camToTarget = camPos . clone ( ) . sub ( rc . cameraTarget ) ; <nl> + var camToPoint = new THREE . Vector3 ( ) ; <nl> this . gc . lineWidth = 6 ; <nl> this . gc . textBaseline = ' middle ' ; <nl> this . gc . miterLimit = 2 ; <nl> < h2 > Settings < / h2 > <nl> var n = Math . min ( MAX_LABELS_ON_SCREEN , rc . labels . pointIndices . length ) ; <nl> for ( var i = 0 ; i < n ; + + i ) { <nl> var index = rc . labels . pointIndices [ i ] ; <nl> - var point = util_1 . getProjectedPointFromIndex ( this . dataSet , index ) ; <nl> + var point = util . vector3FromPackedArray ( this . worldSpacePointPositions , index ) ; <nl> / / discard points that are behind the camera <nl> - var camToPoint = camPos . clone ( ) . sub ( point ) ; <nl> + camToPoint . copy ( camPos ) . sub ( point ) ; <nl> if ( camToTarget . dot ( camToPoint ) < 0 ) { <nl> continue ; <nl> } <nl> - var _a = util_1 . vector3DToScreenCoords ( rc . camera , rc . screenWidth , rc . screenHeight , point ) , x = _a [ 0 ] , y = _a [ 1 ] ; <nl> + var _a = util . vector3DToScreenCoords ( rc . camera , rc . screenWidth , rc . screenHeight , point ) , x = _a [ 0 ] , y = _a [ 1 ] ; <nl> x + = xShift ; <nl> / / Computing the width of the font is expensive , <nl> / / so we assume width of 1 at first . Then , if the label doesn ' t <nl> < h2 > Settings < / h2 > <nl> textBoundingBox . hiX + = this . gc . measureText ( text ) . width - 1 ; <nl> if ( grid . insert ( textBoundingBox ) ) { <nl> var opacity = 1 ; <nl> - if ( this . sceneIs3D & & ( rc . labels . useSceneOpacityFlags [ i ] = = = 1 ) ) { <nl> + if ( sceneIs3D & & ( rc . labels . useSceneOpacityFlags [ i ] = = = 1 ) ) { <nl> opacity = opacityMap ( camToPoint . length ( ) ) ; <nl> } <nl> this . gc . strokeStyle = strokeStylePrefix + opacity + ' ) ' ; <nl> < h2 > Settings < / h2 > <nl> } <nl> } <nl> } ; <nl> - ScatterPlotVisualizerCanvasLabels . prototype . onDataSet = function ( dataSet ) { <nl> - this . labelsActive = ( dataSet . spriteAndMetadataInfo . spriteImage = = null ) ; <nl> - this . dataSet = dataSet ; <nl> - } ; <nl> ScatterPlotVisualizerCanvasLabels . prototype . onResize = function ( newWidth , newHeight ) { <nl> var dpr = window . devicePixelRatio ; <nl> d3 . select ( this . canvas ) <nl> < h2 > Settings < / h2 > <nl> . attr ( ' height ' , newHeight * dpr ) <nl> . style ( { width : newWidth + ' px ' , height : newHeight + ' px ' } ) ; <nl> } ; <nl> - ScatterPlotVisualizerCanvasLabels . prototype . onRecreateScene = function ( scene , sceneIs3D , backgroundColor ) { <nl> - this . sceneIs3D = sceneIs3D ; <nl> - } ; <nl> - ScatterPlotVisualizerCanvasLabels . prototype . removeAllFromScene = function ( scene ) { <nl> + ScatterPlotVisualizerCanvasLabels . prototype . dispose = function ( ) { <nl> this . removeAllLabels ( ) ; <nl> + this . canvas = null ; <nl> + this . gc = null ; <nl> } ; <nl> - ScatterPlotVisualizerCanvasLabels . prototype . onUpdate = function ( ) { <nl> + ScatterPlotVisualizerCanvasLabels . prototype . onPointPositionsChanged = function ( newPositions , dataSet ) { <nl> + this . worldSpacePointPositions = newPositions ; <nl> this . removeAllLabels ( ) ; <nl> } ; <nl> ScatterPlotVisualizerCanvasLabels . prototype . onRender = function ( rc ) { <nl> < h2 > Settings < / h2 > <nl> this . removeAllLabels ( ) ; <nl> this . makeLabels ( rc ) ; <nl> } ; <nl> + ScatterPlotVisualizerCanvasLabels . prototype . setScene = function ( scene ) { } ; <nl> ScatterPlotVisualizerCanvasLabels . prototype . onPickingRender = function ( renderContext ) { } ; <nl> ScatterPlotVisualizerCanvasLabels . prototype . onSetLabelAccessor = function ( labelAccessor ) { } ; <nl> return ScatterPlotVisualizerCanvasLabels ; <nl> } ( ) ) ; <nl> exports . ScatterPlotVisualizerCanvasLabels = ScatterPlotVisualizerCanvasLabels ; <nl> <nl> - } , { " . / label " : 11 , " . / util " : 24 } ] , 20 : [ function ( require , module , exports ) { <nl> + } , { " . / label " : 10 , " . / renderContext " : 14 , " . / util " : 22 } ] , 19 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> " use strict " ; <nl> - var util_1 = require ( ' . / util ' ) ; <nl> + var renderContext_1 = require ( ' . / renderContext ' ) ; <nl> + var util = require ( ' . / util ' ) ; <nl> var NUM_POINTS_FOG_THRESHOLD = 5000 ; <nl> var MIN_POINT_SIZE = 5 . 0 ; <nl> var IMAGE_SIZE = 30 ; <nl> < h2 > Settings < / h2 > <nl> var INDEX_NUM_ELEMENTS = 1 ; <nl> var XYZ_NUM_ELEMENTS = 3 ; <nl> var VERTEX_SHADER = " \ n / / Index of the specific vertex ( passed in as bufferAttribute ) , and the \ n / / variable that will be used to pass it to the fragment shader . \ n attribute float vertexIndex ; \ n attribute vec3 color ; \ n attribute float scaleFactor ; \ n \ n varying vec2 xyIndex ; \ n varying vec3 vColor ; \ n \ n uniform bool sizeAttenuation ; \ n uniform float pointSize ; \ n uniform float imageWidth ; \ n uniform float imageHeight ; \ n \ n void main ( ) { \ n / / Pass index and color values to fragment shader . \ n vColor = color ; \ n xyIndex = vec2 ( mod ( vertexIndex , imageWidth ) , \ n floor ( vertexIndex / imageWidth ) ) ; \ n \ n / / Transform current vertex by modelViewMatrix ( model world position and \ n / / camera world position matrix ) . \ n vec4 cameraSpacePos = modelViewMatrix * vec4 ( position , 1 . 0 ) ; \ n \ n / / Project vertex in camera - space to screen coordinates using the camera ' s \ n / / projection matrix . \ n gl_Position = projectionMatrix * cameraSpacePos ; \ n \ n / / Create size attenuation ( if we ' re in 3D mode ) by making the size of \ n / / each point inversly proportional to its distance to the camera . \ n float outputPointSize = pointSize ; \ n if ( sizeAttenuation ) { \ n outputPointSize = - pointSize / cameraSpacePos . z ; \ n } \ n \ n gl_PointSize = \ n max ( outputPointSize * scaleFactor , " + MIN_POINT_SIZE . toFixed ( 1 ) + " ) ; \ n } " ; <nl> - var FRAGMENT_SHADER_POINT_TEST_CHUNK = " \ n bool point_in_unit_circle ( vec2 spriteCoord ) { \ n vec2 centerToP = spriteCoord - vec2 ( 0 . 5 , 0 . 5 ) ; \ n return dot ( centerToP , centerToP ) < ( 0 . 5 * 0 . 5 ) ; \ n } \ n \ n bool point_in_unit_equilateral_triangle ( vec2 spriteCoord ) { \ n vec3 v0 = vec3 ( 0 , 1 , 0 ) ; \ n vec3 v1 = vec3 ( 0 . 5 , 0 , 0 ) ; \ n vec3 v2 = vec3 ( 1 , 1 , 0 ) ; \ n vec3 p = vec3 ( spriteCoord , 0 ) ; \ n float p_in_v0_v1 = cross ( v1 - v0 , p - v0 ) . z ; \ n float p_in_v1_v2 = cross ( v2 - v1 , p - v1 ) . z ; \ n return ( p_in_v0_v1 > 0 . 0 ) & & ( p_in_v1_v2 > 0 . 0 ) ; \ n } \ n " ; <nl> - var FRAGMENT_SHADER = " \ n varying vec2 xyIndex ; \ n varying vec3 vColor ; \ n \ n uniform sampler2D texture ; \ n uniform float imageWidth ; \ n uniform float imageHeight ; \ n uniform bool isImage ; \ n \ n " + THREE . ShaderChunk [ ' common ' ] + " \ n " + THREE . ShaderChunk [ ' fog_pars_fragment ' ] + " \ n " + FRAGMENT_SHADER_POINT_TEST_CHUNK + " \ n \ n void main ( ) { \ n if ( isImage ) { \ n / / Coordinates of the vertex within the entire sprite image . \ n vec2 coords = ( gl_PointCoord + xyIndex ) / vec2 ( imageWidth , imageHeight ) ; \ n gl_FragColor = vec4 ( vColor , 1 . 0 ) * texture2D ( texture , coords ) ; \ n } else { \ n bool inside = point_in_unit_circle ( gl_PointCoord ) ; \ n vec3 c = mix ( vec3 ( 1 , 1 , 1 ) , vColor , float ( inside ) ) ; \ n gl_FragColor = vec4 ( c , 1 ) ; \ n } \ n " + THREE . ShaderChunk [ ' fog_fragment ' ] + " \ n } " ; <nl> + var FRAGMENT_SHADER_POINT_TEST_CHUNK = " \ n bool point_in_unit_circle ( vec2 spriteCoord ) { \ n vec2 centerToP = spriteCoord - vec2 ( 0 . 5 , 0 . 5 ) ; \ n return dot ( centerToP , centerToP ) < ( 0 . 5 * 0 . 5 ) ; \ n } \ n \ n bool point_in_unit_equilateral_triangle ( vec2 spriteCoord ) { \ n vec3 v0 = vec3 ( 0 , 1 , 0 ) ; \ n vec3 v1 = vec3 ( 0 . 5 , 0 , 0 ) ; \ n vec3 v2 = vec3 ( 1 , 1 , 0 ) ; \ n vec3 p = vec3 ( spriteCoord , 0 ) ; \ n float p_in_v0_v1 = cross ( v1 - v0 , p - v0 ) . z ; \ n float p_in_v1_v2 = cross ( v2 - v1 , p - v1 ) . z ; \ n return ( p_in_v0_v1 > 0 . 0 ) & & ( p_in_v1_v2 > 0 . 0 ) ; \ n } \ n \ n bool point_in_unit_square ( vec2 spriteCoord ) { \ n return true ; \ n } \ n " ; <nl> + var FRAGMENT_SHADER = " \ n varying vec2 xyIndex ; \ n varying vec3 vColor ; \ n \ n uniform sampler2D texture ; \ n uniform float imageWidth ; \ n uniform float imageHeight ; \ n uniform bool isImage ; \ n \ n " + THREE . ShaderChunk [ ' common ' ] + " \ n " + THREE . ShaderChunk [ ' fog_pars_fragment ' ] + " \ n " + FRAGMENT_SHADER_POINT_TEST_CHUNK + " \ n \ n void main ( ) { \ n if ( isImage ) { \ n / / Coordinates of the vertex within the entire sprite image . \ n vec2 coords = ( gl_PointCoord + xyIndex ) / vec2 ( imageWidth , imageHeight ) ; \ n gl_FragColor = vec4 ( vColor , 1 . 0 ) * texture2D ( texture , coords ) ; \ n } else { \ n bool inside = point_in_unit_circle ( gl_PointCoord ) ; \ n if ( ! inside ) { \ n discard ; \ n } \ n gl_FragColor = vec4 ( vColor , 1 ) ; \ n } \ n " + THREE . ShaderChunk [ ' fog_fragment ' ] + " \ n } " ; <nl> var FRAGMENT_SHADER_PICKING = " \ n varying vec2 xyIndex ; \ n varying vec3 vColor ; \ n uniform bool isImage ; \ n \ n " + FRAGMENT_SHADER_POINT_TEST_CHUNK + " \ n \ n void main ( ) { \ n xyIndex ; / / Silence ' unused variable ' warning . \ n if ( isImage ) { \ n gl_FragColor = vec4 ( vColor , 1 ) ; \ n } else { \ n bool inside = point_in_unit_circle ( gl_PointCoord ) ; \ n if ( ! inside ) { \ n discard ; \ n } \ n gl_FragColor = vec4 ( vColor , 1 ) ; \ n } \ n } " ; <nl> / * * <nl> * Uses GL point sprites to render the dataset . <nl> * / <nl> var ScatterPlotVisualizerSprites = ( function ( ) { <nl> function ScatterPlotVisualizerSprites ( ) { <nl> - this . sceneIs3D = true ; <nl> + this . texture = null ; <nl> } <nl> / * * <nl> * Create points , set their locations and actually instantiate the <nl> * geometry . <nl> * / <nl> - ScatterPlotVisualizerSprites . prototype . addSprites = function ( scene ) { <nl> - this . geometry = new THREE . BufferGeometry ( ) ; <nl> - this . createBufferAttributes ( ) ; <nl> - var canvas = document . createElement ( ' canvas ' ) ; <nl> - var image = this . image | | canvas ; <nl> - var tex = util_1 . createTexture ( image ) ; <nl> - var pointSize = ( this . sceneIs3D ? this . pointSize3D : this . pointSize2D ) ; <nl> - var imageDim = [ 0 , 0 ] ; <nl> - if ( this . image ) { <nl> - pointSize = IMAGE_SIZE ; <nl> - imageDim = <nl> - this . dataSet . spriteAndMetadataInfo . spriteMetadata . singleImageDim ; <nl> + ScatterPlotVisualizerSprites . prototype . createPointSprites = function ( scene , positions , dataSet ) { <nl> + var geometry = this . createGeometry ( positions . length / XYZ_NUM_ELEMENTS , dataSet ) ; <nl> + var haveImage = ( this . image ! = null ) ; <nl> + this . fog = new THREE . Fog ( 0xFFFFFF ) ; / / unused value , gets overwritten . <nl> + { <nl> + var image = this . image | | document . createElement ( ' canvas ' ) ; <nl> + this . texture = util . createTexture ( image ) ; <nl> } <nl> - this . uniforms = { <nl> - texture : { type : ' t ' , value : tex } , <nl> - imageWidth : { type : ' f ' , value : image . width / imageDim [ 0 ] } , <nl> - imageHeight : { type : ' f ' , value : image . height / imageDim [ 1 ] } , <nl> - fogColor : { type : ' c ' , value : this . fog . color } , <nl> - fogNear : { type : ' f ' , value : this . fog . near } , <nl> - fogFar : { type : ' f ' , value : this . fog . far } , <nl> - sizeAttenuation : { type : ' bool ' , value : this . sceneIs3D } , <nl> - isImage : { type : ' bool ' , value : ( this . image ! = null ) } , <nl> - pointSize : { type : ' f ' , value : pointSize } <nl> + var imageDim = [ 1 , 1 ] ; <nl> + { <nl> + var spriteMetadata = dataSet . spriteAndMetadataInfo . spriteMetadata ; <nl> + if ( haveImage & & spriteMetadata ) { <nl> + imageDim [ 0 ] = this . image . width / spriteMetadata . singleImageDim [ 0 ] ; <nl> + imageDim [ 1 ] = this . image . height / spriteMetadata . singleImageDim [ 1 ] ; <nl> + } <nl> + } <nl> + var uniforms = { <nl> + texture : { type : ' t ' } , <nl> + imageWidth : { type : ' f ' , value : imageDim [ 0 ] } , <nl> + imageHeight : { type : ' f ' , value : imageDim [ 1 ] } , <nl> + fogColor : { type : ' c ' } , <nl> + fogNear : { type : ' f ' } , <nl> + fogFar : { type : ' f ' } , <nl> + isImage : { type : ' bool ' , value : haveImage } , <nl> + sizeAttenuation : { type : ' bool ' } , <nl> + pointSize : { type : ' f ' } <nl> } ; <nl> - var haveImage = ( this . image ! = null ) ; <nl> this . renderMaterial = new THREE . ShaderMaterial ( { <nl> - uniforms : this . uniforms , <nl> + uniforms : THREE . UniformsUtils . clone ( uniforms ) , <nl> vertexShader : VERTEX_SHADER , <nl> fragmentShader : FRAGMENT_SHADER , <nl> transparent : ! haveImage , <nl> depthTest : haveImage , <nl> depthWrite : haveImage , <nl> fog : true , <nl> - blending : ( this . image ? THREE . NormalBlending : THREE . MultiplyBlending ) , <nl> + blending : THREE . MultiplyBlending , <nl> } ) ; <nl> this . pickingMaterial = new THREE . ShaderMaterial ( { <nl> - uniforms : this . uniforms , <nl> + uniforms : THREE . UniformsUtils . clone ( uniforms ) , <nl> vertexShader : VERTEX_SHADER , <nl> fragmentShader : FRAGMENT_SHADER_PICKING , <nl> transparent : true , <nl> < h2 > Settings < / h2 > <nl> fog : false , <nl> blending : THREE . NormalBlending , <nl> } ) ; <nl> - this . points = new THREE . Points ( this . geometry , this . renderMaterial ) ; <nl> + this . points = new THREE . Points ( geometry , this . renderMaterial ) ; <nl> + this . points . frustumCulled = false ; <nl> scene . add ( this . points ) ; <nl> } ; <nl> - ScatterPlotVisualizerSprites . prototype . calibratePointSize = function ( ) { <nl> - var numPts = this . dataSet . points . length ; <nl> - var scaleConstant = 200 ; <nl> - var logBase = 8 ; <nl> - / / Scale point size inverse - logarithmically to the number of points . <nl> - this . pointSize3D = scaleConstant / Math . log ( numPts ) / Math . log ( logBase ) ; <nl> - this . pointSize2D = this . pointSize3D / 1 . 5 ; <nl> - } ; <nl> - ScatterPlotVisualizerSprites . prototype . setFogDistances = function ( nearestPointZ , farthestPointZ ) { <nl> - if ( this . sceneIs3D ) { <nl> - this . fog . near = nearestPointZ ; <nl> - / / If there are fewer points we want less fog . We do this <nl> - / / by making the " far " value ( that is , the distance from the camera to the <nl> - / / far edge of the fog ) proportional to the number of points . <nl> - var multiplier = 2 - <nl> - Math . min ( this . dataSet . points . length , NUM_POINTS_FOG_THRESHOLD ) / <nl> - NUM_POINTS_FOG_THRESHOLD ; <nl> - this . fog . far = farthestPointZ * multiplier ; <nl> - } <nl> - else { <nl> - this . fog . near = Infinity ; <nl> - this . fog . far = Infinity ; <nl> + ScatterPlotVisualizerSprites . prototype . calculatePointSize = function ( sceneIs3D ) { <nl> + if ( this . image ! = null ) { <nl> + return IMAGE_SIZE ; <nl> } <nl> + var n = this . worldSpacePointPositions . length / XYZ_NUM_ELEMENTS ; <nl> + var SCALE = 200 ; <nl> + var LOG_BASE = 8 ; <nl> + var DIVISOR = 1 . 5 ; <nl> + / / Scale point size inverse - logarithmically to the number of points . <nl> + var pointSize = SCALE / Math . log ( n ) / Math . log ( LOG_BASE ) ; <nl> + return sceneIs3D ? pointSize : ( pointSize / DIVISOR ) ; <nl> } ; <nl> / * * <nl> * Set up buffer attributes to be used for the points / images . <nl> * / <nl> - ScatterPlotVisualizerSprites . prototype . createBufferAttributes = function ( ) { <nl> - var numPoints = this . dataSet . points . length ; <nl> + ScatterPlotVisualizerSprites . prototype . createGeometry = function ( pointCount , dataSet ) { <nl> + var n = pointCount ; <nl> / / Fill pickingColors with each point ' s unique id as its color . <nl> - this . pickingColors = new Float32Array ( numPoints * RGB_NUM_ELEMENTS ) ; <nl> + this . pickingColors = new Float32Array ( n * RGB_NUM_ELEMENTS ) ; <nl> { <nl> var dst = 0 ; <nl> - for ( var i = 0 ; i < numPoints ; i + + ) { <nl> + for ( var i = 0 ; i < n ; i + + ) { <nl> var c = new THREE . Color ( i ) ; <nl> this . pickingColors [ dst + + ] = c . r ; <nl> this . pickingColors [ dst + + ] = c . g ; <nl> this . pickingColors [ dst + + ] = c . b ; <nl> } <nl> } <nl> - var colors = new THREE . BufferAttribute ( this . pickingColors , RGB_NUM_ELEMENTS ) ; <nl> - var scaleFactors = new THREE . BufferAttribute ( new Float32Array ( numPoints ) , INDEX_NUM_ELEMENTS ) ; <nl> - var positions = new THREE . BufferAttribute ( new Float32Array ( numPoints * XYZ_NUM_ELEMENTS ) , XYZ_NUM_ELEMENTS ) ; <nl> - / * * <nl> - * The actual indices of the points which we use for sizeAttenuation in <nl> - * the shader . <nl> - * / <nl> - var indicesShader = new THREE . BufferAttribute ( new Float32Array ( numPoints ) , 1 ) ; <nl> - / / Create the array of indices . <nl> - for ( var i = 0 ; i < numPoints ; i + + ) { <nl> - indicesShader . setX ( i , this . dataSet . points [ i ] . index ) ; <nl> + var spriteIndexes = new THREE . BufferAttribute ( new Float32Array ( n ) , INDEX_NUM_ELEMENTS ) ; <nl> + for ( var i = 0 ; i < n ; i + + ) { <nl> + spriteIndexes . setX ( i , dataSet . points [ i ] . index ) ; <nl> } <nl> - this . geometry . addAttribute ( ' position ' , positions ) ; <nl> - this . geometry . addAttribute ( ' color ' , colors ) ; <nl> - this . geometry . addAttribute ( ' vertexIndex ' , indicesShader ) ; <nl> - this . geometry . addAttribute ( ' scaleFactor ' , scaleFactors ) ; <nl> + var geometry = new THREE . BufferGeometry ( ) ; <nl> + geometry . addAttribute ( ' position ' , new THREE . BufferAttribute ( null , XYZ_NUM_ELEMENTS ) ) ; <nl> + geometry . addAttribute ( ' color ' , new THREE . BufferAttribute ( null , RGB_NUM_ELEMENTS ) ) ; <nl> + geometry . addAttribute ( ' scaleFactor ' , new THREE . BufferAttribute ( null , INDEX_NUM_ELEMENTS ) ) ; <nl> + geometry . addAttribute ( ' vertexIndex ' , spriteIndexes ) ; <nl> + return geometry ; <nl> } ; <nl> - ScatterPlotVisualizerSprites . prototype . updatePositionsArray = function ( dataSet ) { <nl> - if ( this . geometry = = null ) { <nl> - return ; <nl> + ScatterPlotVisualizerSprites . prototype . setFogDistances = function ( sceneIs3D , nearestPointZ , farthestPointZ ) { <nl> + if ( sceneIs3D ) { <nl> + var n = this . worldSpacePointPositions . length / XYZ_NUM_ELEMENTS ; <nl> + this . fog . near = nearestPointZ ; <nl> + / / If there are fewer points we want less fog . We do this <nl> + / / by making the " far " value ( that is , the distance from the camera to the <nl> + / / far edge of the fog ) proportional to the number of points . <nl> + var multiplier = 2 - Math . min ( n , NUM_POINTS_FOG_THRESHOLD ) / NUM_POINTS_FOG_THRESHOLD ; <nl> + this . fog . far = farthestPointZ * multiplier ; <nl> } <nl> - var n = dataSet . points . length ; <nl> - var positions = this . geometry . getAttribute ( ' position ' ) ; <nl> - positions . array = new Float32Array ( n * XYZ_NUM_ELEMENTS ) ; <nl> - for ( var i = 0 ; i < n ; i + + ) { <nl> - var pp = dataSet . points [ i ] . projectedPoint ; <nl> - positions . setXYZ ( i , pp [ 0 ] , pp [ 1 ] , pp [ 2 ] ) ; <nl> + else { <nl> + this . fog . near = Infinity ; <nl> + this . fog . far = Infinity ; <nl> } <nl> - positions . needsUpdate = true ; <nl> - } ; <nl> - ScatterPlotVisualizerSprites . prototype . removeAllFromScene = function ( scene ) { <nl> - scene . remove ( this . points ) ; <nl> } ; <nl> - ScatterPlotVisualizerSprites . prototype . onDataSet = function ( dataSet ) { <nl> - this . dataSet = dataSet ; <nl> - this . image = this . dataSet . spriteAndMetadataInfo . spriteImage ; <nl> - this . points = null ; <nl> - if ( this . geometry ) { <nl> - this . geometry . dispose ( ) ; <nl> + ScatterPlotVisualizerSprites . prototype . dispose = function ( ) { <nl> + this . scene . remove ( this . points ) ; <nl> + this . points . geometry . dispose ( ) ; <nl> + if ( this . renderMaterial . uniforms . texture . value ) { <nl> + this . renderMaterial . uniforms . texture . value . dispose ( ) ; <nl> } <nl> - this . geometry = null ; <nl> - this . calibratePointSize ( ) ; <nl> + this . points = null ; <nl> + this . renderMaterial = null ; <nl> + this . pickingMaterial = null ; <nl> + this . worldSpacePointPositions = null ; <nl> + this . image = null ; <nl> } ; <nl> - ScatterPlotVisualizerSprites . prototype . onRecreateScene = function ( scene , sceneIs3D , backgroundColor ) { <nl> - this . sceneIs3D = sceneIs3D ; <nl> - this . fog = new THREE . Fog ( backgroundColor ) ; <nl> - scene . fog = this . fog ; <nl> - if ( this . dataSet ) { <nl> - this . addSprites ( scene ) ; <nl> - this . updatePositionsArray ( this . dataSet ) ; <nl> - } <nl> + ScatterPlotVisualizerSprites . prototype . setScene = function ( scene ) { <nl> + this . scene = scene ; <nl> } ; <nl> - ScatterPlotVisualizerSprites . prototype . onUpdate = function ( dataSet ) { <nl> - this . updatePositionsArray ( dataSet ) ; <nl> + ScatterPlotVisualizerSprites . prototype . onPointPositionsChanged = function ( newPositions , dataSet ) { <nl> + if ( this . points ! = null ) { <nl> + var notEnoughSpace = ( this . pickingColors . length < newPositions . length ) ; <nl> + var newImage = ( this . image ! = = dataSet . spriteAndMetadataInfo . spriteImage ) ; <nl> + if ( notEnoughSpace | | newImage ) { <nl> + this . dispose ( ) ; <nl> + } <nl> + } <nl> + this . image = dataSet . spriteAndMetadataInfo . spriteImage ; <nl> + this . worldSpacePointPositions = newPositions ; <nl> + if ( this . points = = null ) { <nl> + this . createPointSprites ( this . scene , newPositions , dataSet ) ; <nl> + } <nl> + if ( newPositions ) { <nl> + var positions = this . points . geometry <nl> + . getAttribute ( ' position ' ) ; <nl> + positions . array = newPositions ; <nl> + positions . needsUpdate = true ; <nl> + } <nl> } ; <nl> - ScatterPlotVisualizerSprites . prototype . onResize = function ( newWidth , newHeight ) { } ; <nl> - ScatterPlotVisualizerSprites . prototype . onSetLabelAccessor = function ( labelAccessor ) { } ; <nl> ScatterPlotVisualizerSprites . prototype . onPickingRender = function ( rc ) { <nl> - if ( ! this . geometry ) { <nl> + if ( ! this . points ) { <nl> return ; <nl> } <nl> + var sceneIs3D = ( rc . cameraType = = = renderContext_1 . CameraType . Perspective ) ; <nl> + this . pickingMaterial . uniforms . sizeAttenuation . value = sceneIs3D ; <nl> + this . pickingMaterial . uniforms . pointSize . value = <nl> + this . calculatePointSize ( sceneIs3D ) ; <nl> this . points . material = this . pickingMaterial ; <nl> - var colors = this . geometry . getAttribute ( ' color ' ) ; <nl> + var colors = this . points . geometry <nl> + . getAttribute ( ' color ' ) ; <nl> colors . array = this . pickingColors ; <nl> colors . needsUpdate = true ; <nl> - var scaleFactors = this . geometry . getAttribute ( ' scaleFactor ' ) ; <nl> + var scaleFactors = this . points . geometry <nl> + . getAttribute ( ' scaleFactor ' ) ; <nl> scaleFactors . array = rc . pointScaleFactors ; <nl> scaleFactors . needsUpdate = true ; <nl> } ; <nl> ScatterPlotVisualizerSprites . prototype . onRender = function ( rc ) { <nl> - if ( ! this . geometry ) { <nl> + if ( ! this . points ) { <nl> return ; <nl> } <nl> - this . setFogDistances ( rc . nearestCameraSpacePointZ , rc . farthestCameraSpacePointZ ) ; <nl> + var sceneIs3D = ( rc . camera instanceof THREE . PerspectiveCamera ) ; <nl> + this . setFogDistances ( sceneIs3D , rc . nearestCameraSpacePointZ , rc . farthestCameraSpacePointZ ) ; <nl> + this . scene . fog = this . fog ; <nl> + this . scene . fog . color = new THREE . Color ( rc . backgroundColor ) ; <nl> + this . renderMaterial . uniforms . fogColor . value = this . scene . fog . color ; <nl> + this . renderMaterial . uniforms . fogNear . value = this . fog . near ; <nl> + this . renderMaterial . uniforms . fogFar . value = this . fog . far ; <nl> + this . renderMaterial . uniforms . texture . value = this . texture ; <nl> + this . renderMaterial . uniforms . sizeAttenuation . value = sceneIs3D ; <nl> + this . renderMaterial . uniforms . pointSize . value = <nl> + this . calculatePointSize ( sceneIs3D ) ; <nl> this . points . material = this . renderMaterial ; <nl> - this . renderMaterial . uniforms . isImage . value = ! ! this . image ; <nl> - var colors = this . geometry . getAttribute ( ' color ' ) ; <nl> + var colors = this . points . geometry <nl> + . getAttribute ( ' color ' ) ; <nl> this . renderColors = rc . pointColors ; <nl> colors . array = this . renderColors ; <nl> colors . needsUpdate = true ; <nl> - var scaleFactors = this . geometry . getAttribute ( ' scaleFactor ' ) ; <nl> + var scaleFactors = this . points . geometry <nl> + . getAttribute ( ' scaleFactor ' ) ; <nl> scaleFactors . array = rc . pointScaleFactors ; <nl> scaleFactors . needsUpdate = true ; <nl> } ; <nl> + ScatterPlotVisualizerSprites . prototype . onResize = function ( newWidth , newHeight ) { } ; <nl> + ScatterPlotVisualizerSprites . prototype . onSetLabelAccessor = function ( labelAccessor ) { } ; <nl> return ScatterPlotVisualizerSprites ; <nl> } ( ) ) ; <nl> exports . ScatterPlotVisualizerSprites = ScatterPlotVisualizerSprites ; <nl> <nl> - } , { " . / util " : 24 } ] , 21 : [ function ( require , module , exports ) { <nl> + } , { " . / renderContext " : 14 , " . / util " : 22 } ] , 20 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> " use strict " ; <nl> - var TRACE_DEFAULT_OPACITY = . 2 ; <nl> - var TRACE_DEFAULT_LINEWIDTH = 2 ; <nl> - var TRACE_SELECTED_OPACITY = . 9 ; <nl> - var TRACE_SELECTED_LINEWIDTH = 3 ; <nl> - var TRACE_DESELECTED_OPACITY = . 05 ; <nl> + var util = require ( ' . / util ' ) ; <nl> var RGB_NUM_ELEMENTS = 3 ; <nl> var XYZ_NUM_ELEMENTS = 3 ; <nl> / * * <nl> * Renders ' traces ' ( polylines ) that connect multiple points in the dataset <nl> * / <nl> var ScatterPlotVisualizerTraces = ( function ( ) { <nl> - function ScatterPlotVisualizerTraces ( selectionContext ) { <nl> - var _this = this ; <nl> + function ScatterPlotVisualizerTraces ( ) { <nl> this . tracePositionBuffer = { } ; <nl> this . traceColorBuffer = { } ; <nl> - selectionContext . registerSelectionChangedListener ( function ( s ) { return _this . onSelectionChanged ( s ) ; } ) ; <nl> } <nl> - / * * <nl> - * Create line traces between connected points and instantiate the geometry . <nl> - * / <nl> - ScatterPlotVisualizerTraces . prototype . addTraces = function ( scene ) { <nl> + ScatterPlotVisualizerTraces . prototype . updateTraceIndicesInDataSet = function ( ds ) { <nl> + for ( var i = 0 ; i < ds . traces . length ; i + + ) { <nl> + var trace = ds . traces [ i ] ; <nl> + for ( var j = 0 ; j < trace . pointIndices . length - 1 ; j + + ) { <nl> + ds . points [ trace . pointIndices [ j ] ] . traceIndex = i ; <nl> + ds . points [ trace . pointIndices [ j + 1 ] ] . traceIndex = i ; <nl> + } <nl> + } <nl> + } ; <nl> + ScatterPlotVisualizerTraces . prototype . createTraces = function ( scene ) { <nl> if ( ! this . dataSet | | ! this . dataSet . traces ) { <nl> return ; <nl> } <nl> + this . updateTraceIndicesInDataSet ( this . dataSet ) ; <nl> this . traces = [ ] ; <nl> for ( var i = 0 ; i < this . dataSet . traces . length ; i + + ) { <nl> - var dataTrace = this . dataSet . traces [ i ] ; <nl> - for ( var j = 0 ; j < dataTrace . pointIndices . length - 1 ; j + + ) { <nl> - this . dataSet . points [ dataTrace . pointIndices [ j ] ] . traceIndex = i ; <nl> - this . dataSet . points [ dataTrace . pointIndices [ j + 1 ] ] . traceIndex = i ; <nl> - } <nl> var geometry = new THREE . BufferGeometry ( ) ; <nl> geometry . addAttribute ( ' position ' , this . tracePositionBuffer [ i ] ) ; <nl> - this . tracePositionBuffer [ i ] . needsUpdate = true ; <nl> geometry . addAttribute ( ' color ' , this . traceColorBuffer [ i ] ) ; <nl> - this . traceColorBuffer [ i ] . needsUpdate = true ; <nl> - / / We use the same material for every line . <nl> var material = new THREE . LineBasicMaterial ( { <nl> - linewidth : TRACE_DEFAULT_LINEWIDTH , <nl> - opacity : TRACE_DEFAULT_OPACITY , <nl> + linewidth : 1 , <nl> + opacity : 1 . 0 , <nl> transparent : true , <nl> vertexColors : THREE . VertexColors <nl> } ) ; <nl> var trace = new THREE . LineSegments ( geometry , material ) ; <nl> + trace . frustumCulled = false ; <nl> this . traces . push ( trace ) ; <nl> scene . add ( trace ) ; <nl> } <nl> } ; <nl> - ScatterPlotVisualizerTraces . prototype . resetTraces = function ( ) { <nl> + ScatterPlotVisualizerTraces . prototype . dispose = function ( ) { <nl> if ( ! this . traces ) { <nl> return ; <nl> } <nl> for ( var i = 0 ; i < this . traces . length ; i + + ) { <nl> - this . traces [ i ] . material . opacity = TRACE_DEFAULT_OPACITY ; <nl> - this . traces [ i ] . material . linewidth = <nl> - TRACE_DEFAULT_LINEWIDTH ; <nl> - this . traces [ i ] . material . needsUpdate = true ; <nl> + this . scene . remove ( this . traces [ i ] ) ; <nl> + this . traces [ i ] . geometry . dispose ( ) ; <nl> } <nl> + this . traces = null ; <nl> + this . tracePositionBuffer = { } ; <nl> + this . traceColorBuffer = { } ; <nl> } ; <nl> - ScatterPlotVisualizerTraces . prototype . removeAllFromScene = function ( scene ) { <nl> - if ( ! this . traces ) { <nl> - return ; <nl> - } <nl> - for ( var i = 0 ; i < this . traces . length ; i + + ) { <nl> - scene . remove ( this . traces [ i ] ) ; <nl> - } <nl> - this . traces = [ ] ; <nl> + ScatterPlotVisualizerTraces . prototype . setScene = function ( scene ) { <nl> + this . scene = scene ; <nl> } ; <nl> - ScatterPlotVisualizerTraces . prototype . onDataSet = function ( dataSet ) { <nl> + ScatterPlotVisualizerTraces . prototype . onPointPositionsChanged = function ( newPositions , dataSet ) { <nl> this . dataSet = dataSet ; <nl> - if ( dataSet ) { <nl> + if ( dataSet = = null ) { <nl> + return ; <nl> + } <nl> + if ( ( this . traces = = null ) | | <nl> + ( this . traces . length ! = = dataSet . traces . length ) ) { <nl> + if ( this . traces ! = null ) { <nl> + this . dispose ( ) ; <nl> + } <nl> / / Set up the position buffer arrays for each trace . <nl> for ( var i = 0 ; i < this . dataSet . traces . length ; i + + ) { <nl> var dataTrace = this . dataSet . traces [ i ] ; <nl> < h2 > Settings < / h2 > <nl> new THREE . BufferAttribute ( colors , RGB_NUM_ELEMENTS ) ; <nl> } <nl> } <nl> - } ; <nl> - ScatterPlotVisualizerTraces . prototype . onSelectionChanged = function ( selection ) { <nl> - this . resetTraces ( ) ; <nl> - if ( selection . length > 0 ) { <nl> - var selectedIndex = selection [ 0 ] ; <nl> - var traceIndex = this . dataSet . points [ selectedIndex ] . traceIndex ; <nl> - if ( traceIndex ) { <nl> - for ( var i = 0 ; i < this . traces . length ; i + + ) { <nl> - this . traces [ i ] . material . opacity = TRACE_DESELECTED_OPACITY ; <nl> - this . traces [ i ] . material . needsUpdate = true ; <nl> - } <nl> - this . traces [ traceIndex ] . material . opacity = TRACE_SELECTED_OPACITY ; <nl> - this . traces [ traceIndex ] . material <nl> - . linewidth = TRACE_SELECTED_LINEWIDTH ; <nl> - this . traces [ traceIndex ] . material . needsUpdate = true ; <nl> - } <nl> - } <nl> - } ; <nl> - ScatterPlotVisualizerTraces . prototype . onRecreateScene = function ( scene , sceneIs3D , backgroundColor ) { <nl> - this . addTraces ( scene ) ; <nl> - } ; <nl> - ScatterPlotVisualizerTraces . prototype . onUpdate = function ( ) { <nl> - if ( ! this . dataSet ) { <nl> - return ; <nl> - } <nl> for ( var i = 0 ; i < this . dataSet . traces . length ; i + + ) { <nl> var dataTrace = this . dataSet . traces [ i ] ; <nl> - var vertexCount = 0 ; <nl> + var src = 0 ; <nl> for ( var j = 0 ; j < dataTrace . pointIndices . length - 1 ; j + + ) { <nl> - var point1 = this . dataSet . points [ dataTrace . pointIndices [ j ] ] ; <nl> - var point2 = this . dataSet . points [ dataTrace . pointIndices [ j + 1 ] ] ; <nl> - this . tracePositionBuffer [ i ] . setXYZ ( vertexCount , point1 . projectedPoint [ 0 ] , point1 . projectedPoint [ 1 ] , point1 . projectedPoint [ 2 ] ) ; <nl> - this . tracePositionBuffer [ i ] . setXYZ ( vertexCount + 1 , point2 . projectedPoint [ 0 ] , point2 . projectedPoint [ 1 ] , point2 . projectedPoint [ 2 ] ) ; <nl> - vertexCount + = 2 ; <nl> + var p1Index = dataTrace . pointIndices [ j ] ; <nl> + var p2Index = dataTrace . pointIndices [ j + 1 ] ; <nl> + var p1 = util . vector3FromPackedArray ( newPositions , p1Index ) ; <nl> + var p2 = util . vector3FromPackedArray ( newPositions , p2Index ) ; <nl> + this . tracePositionBuffer [ i ] . setXYZ ( src , p1 . x , p1 . y , p1 . z ) ; <nl> + this . tracePositionBuffer [ i ] . setXYZ ( src + 1 , p2 . x , p2 . y , p2 . z ) ; <nl> + src + = 2 ; <nl> } <nl> - } <nl> - for ( var i = 0 ; i < this . dataSet . traces . length ; i + + ) { <nl> this . tracePositionBuffer [ i ] . needsUpdate = true ; <nl> } <nl> + if ( this . traces = = null ) { <nl> + this . createTraces ( this . scene ) ; <nl> + } <nl> } ; <nl> ScatterPlotVisualizerTraces . prototype . onRender = function ( renderContext ) { <nl> - for ( var i = 0 ; i < this . dataSet . traces . length ; i + + ) { <nl> + if ( this . traces = = null ) { <nl> + return ; <nl> + } <nl> + for ( var i = 0 ; i < this . traces . length ; i + + ) { <nl> + this . traces [ i ] . material . opacity = renderContext . traceOpacities [ i ] ; <nl> + this . traces [ i ] . material . linewidth = <nl> + renderContext . traceWidths [ i ] ; <nl> this . traceColorBuffer [ i ] . array = renderContext . traceColors [ i ] ; <nl> this . traceColorBuffer [ i ] . needsUpdate = true ; <nl> } <nl> < h2 > Settings < / h2 > <nl> } ( ) ) ; <nl> exports . ScatterPlotVisualizerTraces = ScatterPlotVisualizerTraces ; <nl> <nl> - } , { } ] , 22 : [ function ( require , module , exports ) { <nl> - arguments [ 4 ] [ 9 ] [ 0 ] . apply ( exports , arguments ) <nl> - } , { " dup " : 9 } ] , 23 : [ function ( require , module , exports ) { <nl> + } , { " . / util " : 22 } ] , 21 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> } <nl> } <nl> <nl> - } , { } ] , 24 : [ function ( require , module , exports ) { <nl> + } , { } ] , 22 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> return array ; <nl> } <nl> exports . shuffle = shuffle ; <nl> - / * * Retrieves a projected point from the data set as a THREE . js vector * / <nl> - function getProjectedPointFromIndex ( dataSet , i ) { <nl> - var pp = dataSet . points [ i ] . projectedPoint ; <nl> - var v = new THREE . Vector3 ( pp [ 0 ] , pp [ 1 ] , pp [ 2 ] ) ; <nl> - return v ; <nl> - } <nl> - exports . getProjectedPointFromIndex = getProjectedPointFromIndex ; <nl> / * * Projects a 3d point into screen space * / <nl> function vector3DToScreenCoords ( cam , w , h , v ) { <nl> var dpr = window . devicePixelRatio ; <nl> < h2 > Settings < / h2 > <nl> return coords ; <nl> } <nl> exports . vector3DToScreenCoords = vector3DToScreenCoords ; <nl> + / * * Loads 3 contiguous elements from a packed xyz array into a Vector3 . * / <nl> + function vector3FromPackedArray ( a , pointIndex ) { <nl> + var offset = pointIndex * 3 ; <nl> + return new THREE . Vector3 ( a [ offset ] , a [ offset + 1 ] , a [ offset + 2 ] ) ; <nl> + } <nl> + exports . vector3FromPackedArray = vector3FromPackedArray ; <nl> / * * <nl> * Gets the camera - space z coordinates of the nearest and farthest points . <nl> * Ignores points that are behind the camera . <nl> * / <nl> - function getNearFarPoints ( dataSet , cameraPos , cameraTarget ) { <nl> + function getNearFarPoints ( worldSpacePoints , cameraPos , cameraTarget ) { <nl> var shortestDist = Infinity ; <nl> var furthestDist = 0 ; <nl> var camToTarget = new THREE . Vector3 ( ) . copy ( cameraTarget ) . sub ( cameraPos ) ; <nl> - for ( var i = 0 ; i < dataSet . points . length ; i + + ) { <nl> - var point = getProjectedPointFromIndex ( dataSet , i ) ; <nl> - var camToPoint = new THREE . Vector3 ( ) . copy ( point ) . sub ( cameraPos ) ; <nl> - if ( camToTarget . dot ( camToPoint ) < 0 ) { <nl> + var camPlaneNormal = new THREE . Vector3 ( ) . copy ( camToTarget ) . normalize ( ) ; <nl> + var n = worldSpacePoints . length / 3 ; <nl> + var src = 0 ; <nl> + var p = new THREE . Vector3 ( ) ; <nl> + var camToPoint = new THREE . Vector3 ( ) ; <nl> + for ( var i = 0 ; i < n ; i + + ) { <nl> + p . x = worldSpacePoints [ src ] ; <nl> + p . y = worldSpacePoints [ src + 1 ] ; <nl> + p . z = worldSpacePoints [ src + 2 ] ; <nl> + src + = 3 ; <nl> + camToPoint . copy ( p ) . sub ( cameraPos ) ; <nl> + var dist = camPlaneNormal . dot ( camToPoint ) ; <nl> + if ( dist < 0 ) { <nl> continue ; <nl> } <nl> - var distToCam = cameraPos . distanceToSquared ( point ) ; <nl> - furthestDist = Math . max ( furthestDist , distToCam ) ; <nl> - shortestDist = Math . min ( shortestDist , distToCam ) ; <nl> + furthestDist = ( dist > furthestDist ) ? dist : furthestDist ; <nl> + shortestDist = ( dist < shortestDist ) ? dist : shortestDist ; <nl> } <nl> - furthestDist = Math . sqrt ( furthestDist ) ; <nl> - shortestDist = Math . sqrt ( shortestDist ) ; <nl> return [ shortestDist , furthestDist ] ; <nl> } <nl> exports . getNearFarPoints = getNearFarPoints ; <nl> < h2 > Settings < / h2 > <nl> } ) ; <nl> } <nl> exports . runAsyncTask = runAsyncTask ; <nl> + / * * <nl> + * Parses the URL for query parameters , e . g . ? foo = 1 & bar = 2 will return <nl> + * { ' foo ' : ' 1 ' , ' bar ' : ' 2 ' } . <nl> + * @ param url The URL to parse . <nl> + * @ return A map of queryParam key to its value . <nl> + * / <nl> + function getURLParams ( url ) { <nl> + if ( ! url ) { <nl> + return { } ; <nl> + } <nl> + var queryString = url . indexOf ( ' ? ' ) ! = = - 1 ? url . split ( ' ? ' ) [ 1 ] : url ; <nl> + if ( queryString . indexOf ( ' # ' ) ) { <nl> + queryString = queryString . split ( ' # ' ) [ 0 ] ; <nl> + } <nl> + var queryEntries = queryString . split ( ' & ' ) ; <nl> + var queryParams = { } ; <nl> + for ( var i = 0 ; i < queryEntries . length ; i + + ) { <nl> + var queryEntryComponents = queryEntries [ i ] . split ( ' = ' ) ; <nl> + queryParams [ queryEntryComponents [ 0 ] . toLowerCase ( ) ] = <nl> + decodeURIComponent ( queryEntryComponents [ 1 ] ) ; <nl> + } <nl> + return queryParams ; <nl> + } <nl> + exports . getURLParams = getURLParams ; <nl> <nl> - } , { " . / logging " : 12 } ] , 25 : [ function ( require , module , exports ) { <nl> + } , { " . / logging " : 11 } ] , 23 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> / * * Returns the sum of two vectors , i . e . a + b * / <nl> function add ( a , b ) { <nl> util_1 . assert ( a . length = = = b . length , ' Vectors a and b must be of same length ' ) ; <nl> - var result = new Array ( a . length ) ; <nl> + var result = new Float32Array ( a . length ) ; <nl> for ( var i = 0 ; i < a . length ; + + i ) { <nl> result [ i ] = a [ i ] + b [ i ] ; <nl> } <nl> < h2 > Settings < / h2 > <nl> / * * Subtracts vector b from vector a , i . e . returns a - b * / <nl> function sub ( a , b ) { <nl> util_1 . assert ( a . length = = = b . length , ' Vectors a and b must be of same length ' ) ; <nl> - var result = new Array ( a . length ) ; <nl> + var result = new Float32Array ( a . length ) ; <nl> for ( var i = 0 ; i < a . length ; + + i ) { <nl> result [ i ] = a [ i ] - b [ i ] ; <nl> } <nl> < h2 > Settings < / h2 > <nl> var N = vectors . length ; <nl> var newVectors = new Array ( N ) ; <nl> for ( var i = 0 ; i < N ; + + i ) { <nl> - newVectors [ i ] = new Array ( newDim ) ; <nl> + newVectors [ i ] = new Float32Array ( newDim ) ; <nl> } <nl> / / Make nDim projections . <nl> for ( var k = 0 ; k < newDim ; + + k ) { <nl> < h2 > Settings < / h2 > <nl> return [ dot ( a , dir1 ) , dot ( a , dir2 ) ] ; <nl> } <nl> exports . project2d = project2d ; <nl> - / * * Returns a vector filled with zeros * / <nl> - function zeros ( length ) { <nl> - var result = new Array ( length ) ; <nl> - for ( var i = 0 ; i < length ; + + i ) { <nl> - result [ i ] = 0 ; <nl> - } <nl> - return result ; <nl> - } <nl> - exports . zeros = zeros ; <nl> / * * <nl> * Computes the centroid of the data points . If the provided data points are not <nl> * vectors , an accessor function needs to be provided . <nl> < h2 > Settings < / h2 > <nl> accessor = function ( a ) { return a ; } ; <nl> } <nl> util_1 . assert ( dataPoints . length > = 0 , ' ` vectors ` must be of length > = 1 ' ) ; <nl> - var centroid = zeros ( accessor ( dataPoints [ 0 ] ) . length ) ; <nl> + var centroid = new Float32Array ( accessor ( dataPoints [ 0 ] ) . length ) ; <nl> for ( var i = 0 ; i < dataPoints . length ; + + i ) { <nl> var dataPoint = dataPoints [ i ] ; <nl> var vector = accessor ( dataPoint ) ; <nl> < h2 > Settings < / h2 > <nl> * / <nl> function rn ( size ) { <nl> var normal = d3 . random . normal ( ) ; <nl> - var result = new Array ( size ) ; <nl> + var result = new Float32Array ( size ) ; <nl> for ( var i = 0 ; i < size ; + + i ) { <nl> result [ i ] = normal ( ) ; <nl> } <nl> < h2 > Settings < / h2 > <nl> } <nl> exports . transposeTypedArray = transposeTypedArray ; <nl> <nl> - } , { " . / util " : 24 } ] , 26 : [ function ( require , module , exports ) { <nl> + } , { " . / util " : 22 } ] , 24 : [ function ( require , module , exports ) { <nl> " use strict " ; <nl> var __extends = ( this & & this . __extends ) | | function ( d , b ) { <nl> for ( var p in b ) if ( b . hasOwnProperty ( p ) ) d [ p ] = b [ p ] ; <nl> function __ ( ) { this . constructor = d ; } <nl> d . prototype = b = = = null ? Object . create ( b ) : ( __ . prototype = b . prototype , new __ ( ) ) ; <nl> } ; <nl> + var logging = require ( ' . / logging ' ) ; <nl> / / tslint : disable - next - line : no - unused - variable <nl> var vz_projector_util_1 = require ( ' . / vz - projector - util ' ) ; <nl> / / tslint : disable - next - line <nl> exports . BookmarkPanelPolymer = vz_projector_util_1 . PolymerElement ( { <nl> is : ' vz - projector - bookmark - panel ' , <nl> - properties : { savedStates : Object , selectedState : Number } <nl> + properties : { <nl> + savedStates : Object , <nl> + / / Keep a separate polymer property because the savedStates doesn ' t change <nl> + / / when adding and removing states . <nl> + hasStates : { type : Boolean , value : false } , <nl> + selectedState : Number <nl> + } <nl> } ) ; <nl> var BookmarkPanel = ( function ( _super ) { <nl> __extends ( BookmarkPanel , _super ) ; <nl> function BookmarkPanel ( ) { <nl> _super . apply ( this , arguments ) ; <nl> + this . hasStates = false ; <nl> } <nl> BookmarkPanel . prototype . ready = function ( ) { <nl> this . dom = d3 . select ( this ) ; <nl> this . savedStates = [ ] ; <nl> this . setupUploadButton ( ) ; <nl> + this . ignoreNextProjectionEvent = false ; <nl> } ; <nl> - BookmarkPanel . prototype . initialize = function ( projector , dataProvider ) { <nl> + BookmarkPanel . prototype . initialize = function ( projector , projectorEventContext ) { <nl> + var _this = this ; <nl> this . projector = projector ; <nl> - this . dataProvider = dataProvider ; <nl> + projectorEventContext . registerProjectionChangedListener ( function ( ) { <nl> + if ( _this . ignoreNextProjectionEvent ) { <nl> + _this . ignoreNextProjectionEvent = false ; <nl> + } <nl> + else { <nl> + _this . clearStateSelection ( ) ; <nl> + } <nl> + } ) ; <nl> } ; <nl> - BookmarkPanel . prototype . setSelectedTensor = function ( run , tensorInfo ) { <nl> + BookmarkPanel . prototype . setSelectedTensor = function ( run , tensorInfo , dataProvider ) { <nl> var _this = this ; <nl> if ( tensorInfo & & tensorInfo . bookmarksPath ) { <nl> this . loadAllStates ( [ ] ) ; <nl> / / Get any bookmarks that may come when the projector starts up . <nl> - this . dataProvider . getBookmarks ( run , tensorInfo . tensorName , function ( bookmarks ) { <nl> + dataProvider . getBookmarks ( run , tensorInfo . tensorName , function ( bookmarks ) { <nl> _this . loadAllStates ( bookmarks ) ; <nl> } ) ; <nl> } <nl> < h2 > Settings < / h2 > <nl> this . notifyPath ( ' savedStates . ' + i + ' . isSelected ' , false , false ) ; <nl> } <nl> this . push ( ' savedStates ' , currentState ) ; <nl> + this . updateHasStates ( ) ; <nl> } ; <nl> / * * Handles a click on the download bookmarks button . * / <nl> BookmarkPanel . prototype . _downloadFile = function ( ) { <nl> < h2 > Settings < / h2 > <nl> fileInput . node ( ) . click ( ) ; <nl> } ; <nl> BookmarkPanel . prototype . setupUploadButton = function ( ) { <nl> + var _this = this ; <nl> / / Show and setup the load view button . <nl> var fileInput = this . dom . select ( ' # state - file ' ) ; <nl> fileInput . on ( ' change ' , function ( ) { <nl> < h2 > Settings < / h2 > <nl> fileReader . onload = function ( evt ) { <nl> var str = evt . target . result ; <nl> var savedStates = JSON . parse ( str ) ; <nl> - this . loadAllStates ( savedStates ) ; <nl> - this . loadSavedState ( 0 ) ; <nl> - } . bind ( this ) ; <nl> + / / Verify the bookmarks match . <nl> + if ( _this . savedStatesValid ( savedStates ) ) { <nl> + _this . loadAllStates ( savedStates ) ; <nl> + _this . loadSavedState ( 0 ) ; <nl> + } <nl> + else { <nl> + logging . setWarningMessage ( " Unable to load bookmarks : wrong dataset , expected dataset " + <nl> + ( " with shape ( " + savedStates [ 0 ] . dataSetDimensions + " ) . " ) ) ; <nl> + } <nl> + } ; <nl> fileReader . readAsText ( file ) ; <nl> - } . bind ( this ) ) ; <nl> + } ) ; <nl> } ; <nl> BookmarkPanel . prototype . loadAllStates = function ( savedStates ) { <nl> for ( var i = 0 ; i < savedStates . length ; i + + ) { <nl> savedStates [ i ] . isSelected = false ; <nl> this . push ( ' savedStates ' , savedStates [ i ] ) ; <nl> } <nl> + this . updateHasStates ( ) ; <nl> } ; <nl> / * * Deselects any selected state selection . * / <nl> BookmarkPanel . prototype . clearStateSelection = function ( ) { <nl> < h2 > Settings < / h2 > <nl> } ; <nl> / * * Handles a radio button click on a saved state . * / <nl> BookmarkPanel . prototype . _radioButtonHandler = function ( evt ) { <nl> - var index = + evt . target . parentElement . getAttribute ( ' data - index ' ) ; <nl> - this . loadSavedState ( index ) ; <nl> + this . loadSavedState ( this . getParentDataIndex ( evt ) ) ; <nl> } ; <nl> BookmarkPanel . prototype . loadSavedState = function ( index ) { <nl> for ( var i = 0 ; i < this . savedStates . length ; i + + ) { <nl> < h2 > Settings < / h2 > <nl> else if ( index = = = i ) { <nl> this . savedStates [ i ] . isSelected = true ; <nl> this . notifyPath ( ' savedStates . ' + i + ' . isSelected ' , true , false ) ; <nl> - / / Update the world to this state . <nl> + this . ignoreNextProjectionEvent = true ; <nl> this . projector . loadState ( this . savedStates [ i ] ) ; <nl> } <nl> } <nl> < h2 > Settings < / h2 > <nl> * Crawls up the DOM to find an ancestor with a data - index attribute . This is <nl> * used to match events to their bookmark index . <nl> * / <nl> - BookmarkPanel . prototype . _getParentDataIndex = function ( evt ) { <nl> + BookmarkPanel . prototype . getParentDataIndex = function ( evt ) { <nl> for ( var i = 0 ; i < evt . path . length ; i + + ) { <nl> var dataIndex = evt . path [ i ] . getAttribute ( ' data - index ' ) ; <nl> if ( dataIndex ! = null ) { <nl> < h2 > Settings < / h2 > <nl> } ; <nl> / * * Handles a clear button click on a bookmark . * / <nl> BookmarkPanel . prototype . _clearButtonHandler = function ( evt ) { <nl> - var index = this . _getParentDataIndex ( evt ) ; <nl> + var index = this . getParentDataIndex ( evt ) ; <nl> this . splice ( ' savedStates ' , index , 1 ) ; <nl> + this . updateHasStates ( ) ; <nl> } ; <nl> / * * Handles a label change event on a bookmark . * / <nl> BookmarkPanel . prototype . _labelChange = function ( evt ) { <nl> - var index = this . _getParentDataIndex ( evt ) ; <nl> + var index = this . getParentDataIndex ( evt ) ; <nl> this . savedStates [ index ] . label = evt . target . value ; <nl> } ; <nl> / * * <nl> < h2 > Settings < / h2 > <nl> * / <nl> BookmarkPanel . prototype . loadSavedStates = function ( serializedStates ) { <nl> this . savedStates = JSON . parse ( serializedStates ) ; <nl> + this . updateHasStates ( ) ; <nl> + } ; <nl> + / * * <nl> + * Updates the hasState polymer property . <nl> + * / <nl> + BookmarkPanel . prototype . updateHasStates = function ( ) { <nl> + this . hasStates = ( this . savedStates . length ! = = 0 ) ; <nl> + } ; <nl> + / * * Sanity checks a State array to ensure it matches the current dataset . * / <nl> + BookmarkPanel . prototype . savedStatesValid = function ( states ) { <nl> + for ( var i = 0 ; i < states . length ; i + + ) { <nl> + if ( states [ i ] . dataSetDimensions [ 0 ] ! = = this . projector . dataSet . dim [ 0 ] | | <nl> + states [ i ] . dataSetDimensions [ 1 ] ! = = this . projector . dataSet . dim [ 1 ] ) { <nl> + return false ; <nl> + } <nl> + } <nl> + return true ; <nl> } ; <nl> return BookmarkPanel ; <nl> } ( exports . BookmarkPanelPolymer ) ) ; <nl> exports . BookmarkPanel = BookmarkPanel ; <nl> document . registerElement ( BookmarkPanel . prototype . is , BookmarkPanel ) ; <nl> <nl> - } , { " . / vz - projector - util " : 33 } ] , 27 : [ function ( require , module , exports ) { <nl> + } , { " . / logging " : 11 , " . / vz - projector - util " : 31 } ] , 25 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> } ; <nl> fileReader . readAsText ( file ) ; <nl> } ) ; <nl> - var uploadButton = this . dom . select ( ' # upload ' ) ; <nl> + var uploadButton = this . dom . select ( ' # upload - tensors ' ) ; <nl> uploadButton . on ( ' click ' , function ( ) { fileInput . node ( ) . click ( ) ; } ) ; <nl> / / Show and setup the upload metadata button . <nl> var fileMetadataInput = this . dom . select ( ' # file - metadata ' ) ; <nl> < h2 > Settings < / h2 > <nl> uploadMetadataButton . on ( ' click ' , function ( ) { <nl> fileMetadataInput . node ( ) . click ( ) ; <nl> } ) ; <nl> + / / Fill out the projector config . <nl> + var projectorConfigTemplate = this . $ $ ( ' # projector - config - template ' ) ; <nl> + var projectorConfigTemplateJson = { <nl> + embeddings : [ { <nl> + tensorName : ' My tensor ' , <nl> + tensorShape : [ 1000 , 50 ] , <nl> + tensorPath : ' https : / / gist . github . com / . . . / tensors . tsv ' , <nl> + metadataPath : ' https : / / gist . github . com / . . . / optional . metadata . tsv ' , <nl> + } ] , <nl> + } ; <nl> + this . setProjectorConfigTemplateJson ( projectorConfigTemplate , projectorConfigTemplateJson ) ; <nl> + / / Set up optional field checkboxes . <nl> + var spriteFieldCheckbox = this . $ $ ( ' # config - sprite - checkbox ' ) ; <nl> + spriteFieldCheckbox . addEventListener ( ' change ' , function ( ) { <nl> + if ( spriteFieldCheckbox . checked ) { <nl> + projectorConfigTemplateJson . embeddings [ 0 ] . sprite = { <nl> + imagePath : ' https : / / github . com / . . . / optional . sprite . png ' , <nl> + singleImageDim : [ 32 , 32 ] <nl> + } ; <nl> + } <nl> + else { <nl> + delete projectorConfigTemplateJson . embeddings [ 0 ] . sprite ; <nl> + } <nl> + _this . setProjectorConfigTemplateJson ( projectorConfigTemplate , projectorConfigTemplateJson ) ; <nl> + } ) ; <nl> + var bookmarksFieldCheckbox = this . $ $ ( ' # config - bookmarks - checkbox ' ) ; <nl> + bookmarksFieldCheckbox . addEventListener ( ' change ' , function ( ) { <nl> + if ( bookmarksFieldCheckbox . checked ) { <nl> + projectorConfigTemplateJson . embeddings [ 0 ] . bookmarksPath = <nl> + ' https : / / gist . github . com / . . . / bookmarks . txt ' ; <nl> + } <nl> + else { <nl> + delete projectorConfigTemplateJson . embeddings [ 0 ] . bookmarksPath ; <nl> + } <nl> + _this . setProjectorConfigTemplateJson ( projectorConfigTemplate , projectorConfigTemplateJson ) ; <nl> + } ) ; <nl> + var metadataFieldCheckbox = this . $ $ ( ' # config - metadata - checkbox ' ) ; <nl> + metadataFieldCheckbox . addEventListener ( ' change ' , function ( ) { <nl> + if ( metadataFieldCheckbox . checked ) { <nl> + projectorConfigTemplateJson . embeddings [ 0 ] . metadataPath = <nl> + ' https : / / gist . github . com / . . . / optional . metadata . tsv ' ; <nl> + } <nl> + else { <nl> + delete projectorConfigTemplateJson . embeddings [ 0 ] . metadataPath ; <nl> + } <nl> + _this . setProjectorConfigTemplateJson ( projectorConfigTemplate , projectorConfigTemplateJson ) ; <nl> + } ) ; <nl> + / / Update the link and the readonly shareable URL . <nl> + var projectorConfigUrlInput = this . $ $ ( ' # projector - config - url ' ) ; <nl> + var projectorConfigDemoUrlInput = this . $ $ ( ' # projector - share - url ' ) ; <nl> + var projectorConfigDemoUrlLink = this . $ $ ( ' # projector - share - url - link ' ) ; <nl> + projectorConfigUrlInput . addEventListener ( ' input ' , function ( ) { <nl> + var projectorDemoUrl = location . protocol + ' / / ' + location . host + <nl> + location . pathname + ' ? config = ' + <nl> + projectorConfigUrlInput . value ; <nl> + projectorConfigDemoUrlInput . value = projectorDemoUrl ; <nl> + projectorConfigDemoUrlLink . href = projectorDemoUrl ; <nl> + } ) ; <nl> + } ; <nl> + DataPanel . prototype . setProjectorConfigTemplateJson = function ( projectorConfigTemplate , config ) { <nl> + projectorConfigTemplate . value = <nl> + JSON . stringify ( config , null , / * * replacer * / 2 / * * white space * / ) ; <nl> } ; <nl> DataPanel . prototype . _getNumTensorsLabel = function ( ) { <nl> return this . tensorNames . length = = = 1 ? ' 1 tensor ' : <nl> < h2 > Settings < / h2 > <nl> exports . DataPanel = DataPanel ; <nl> document . registerElement ( DataPanel . prototype . is , DataPanel ) ; <nl> <nl> - } , { " . / data - provider " : 5 , " . / vz - projector - util " : 33 } ] , 28 : [ function ( require , module , exports ) { <nl> + } , { " . / data - provider " : 5 , " . / vz - projector - util " : 31 } ] , 26 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> exports . ProjectorInput = ProjectorInput ; <nl> document . registerElement ( ProjectorInput . prototype . is , ProjectorInput ) ; <nl> <nl> - } , { " . / vz - projector - util " : 33 } ] , 29 : [ function ( require , module , exports ) { <nl> + } , { " . / vz - projector - util " : 31 } ] , 27 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> / / tslint : disable - next - line <nl> exports . PolymerClass = vz_projector_util_1 . PolymerElement ( { <nl> is : ' vz - projector - inspector - panel ' , <nl> - properties : { <nl> - selectedMetadataField : String , <nl> - metadataFields : Array <nl> - } <nl> + properties : { selectedMetadataField : String , metadataFields : Array } <nl> } ) ; <nl> var InspectorPanel = ( function ( _super ) { <nl> __extends ( InspectorPanel , _super ) ; <nl> < h2 > Settings < / h2 > <nl> / / https : / / www . polymer - project . org / 1 . 0 / docs / devguide / styling # scope - subtree <nl> this . scopeSubtree ( this , true ) ; <nl> } ; <nl> - InspectorPanel . prototype . initialize = function ( projector ) { <nl> + InspectorPanel . prototype . initialize = function ( projector , projectorEventContext ) { <nl> + var _this = this ; <nl> this . projector = projector ; <nl> - this . setupUI ( ) ; <nl> + this . projectorEventContext = projectorEventContext ; <nl> + this . setupUI ( projector ) ; <nl> + projectorEventContext . registerSelectionChangedListener ( function ( selection , neighbors ) { <nl> + return _this . updateInspectorPane ( selection , neighbors ) ; <nl> + } ) ; <nl> } ; <nl> / * * Updates the nearest neighbors list in the inspector . * / <nl> InspectorPanel . prototype . updateInspectorPane = function ( indices , neighbors ) { <nl> + this . neighborsOfFirstPoint = neighbors ; <nl> if ( neighbors . length > 0 ) { <nl> - this . selectedPointIndex = indices [ 0 ] ; <nl> + this . selectedPointIndices = indices ; <nl> } <nl> else { <nl> - this . selectedPointIndex = null ; <nl> + this . selectedPointIndices = null ; <nl> } <nl> this . updateFilterButtons ( indices . length + neighbors . length ) ; <nl> this . updateNeighborsList ( neighbors ) ; <nl> < h2 > Settings < / h2 > <nl> this . updateSearchResults ( [ ] ) ; <nl> } <nl> } ; <nl> + InspectorPanel . prototype . enableResetFilterButton = function ( enabled ) { <nl> + this . resetFilterButton . attr ( ' disabled ' , enabled ? null : true ) ; <nl> + } ; <nl> + InspectorPanel . prototype . restoreUIFromBookmark = function ( bookmark ) { <nl> + this . enableResetFilterButton ( bookmark . filteredPoints ! = null ) ; <nl> + } ; <nl> InspectorPanel . prototype . metadataChanged = function ( spriteAndMetadata ) { <nl> var labelIndex = - 1 ; <nl> this . metadataFields = spriteAndMetadata . stats . map ( function ( stats , i ) { <nl> < h2 > Settings < / h2 > <nl> this . selectedMetadataField = spriteAndMetadata . stats [ labelIndex ] . name ; <nl> } ; <nl> InspectorPanel . prototype . datasetChanged = function ( ) { <nl> - this . resetFilterButton . attr ( ' disabled ' , true ) ; <nl> + this . enableResetFilterButton ( false ) ; <nl> } ; <nl> InspectorPanel . prototype . updateSearchResults = function ( indices ) { <nl> var _this = this ; <nl> < h2 > Settings < / h2 > <nl> } <nl> this . limitMessage . style ( ' display ' , indices . length < = LIMIT_RESULTS ? ' none ' : null ) ; <nl> indices = indices . slice ( 0 , LIMIT_RESULTS ) ; <nl> - var rows = list . selectAll ( ' . row ' ) <nl> - . data ( indices ) <nl> - . enter ( ) <nl> - . append ( ' div ' ) . attr ( ' class ' , ' row ' ) ; <nl> + var rows = list . selectAll ( ' . row ' ) . data ( indices ) . enter ( ) . append ( ' div ' ) . attr ( ' class ' , ' row ' ) ; <nl> rows . append ( ' a ' ) <nl> . attr ( ' class ' , ' label ' ) <nl> . attr ( ' title ' , function ( index ) { return _this . getLabelFromIndex ( index ) ; } ) <nl> . text ( function ( index ) { return _this . getLabelFromIndex ( index ) ; } ) ; <nl> rows . on ( ' mouseenter ' , function ( index ) { <nl> - _this . projector . notifyHoverOverPoint ( index ) ; <nl> + _this . projectorEventContext . notifyHoverOverPoint ( index ) ; <nl> } ) ; <nl> rows . on ( ' mouseleave ' , function ( ) { <nl> - _this . projector . notifyHoverOverPoint ( null ) ; <nl> + _this . projectorEventContext . notifyHoverOverPoint ( null ) ; <nl> } ) ; <nl> rows . on ( ' click ' , function ( index ) { <nl> - _this . projector . notifySelectionChanged ( [ index ] ) ; <nl> + _this . projectorEventContext . notifySelectionChanged ( [ index ] ) ; <nl> } ) ; <nl> } ; <nl> InspectorPanel . prototype . getLabelFromIndex = function ( pointIndex ) { <nl> < h2 > Settings < / h2 > <nl> . style ( ' border - top - color ' , function ( d ) { <nl> return dist2color ( _this . distFunc , d . dist , minDist ) ; <nl> } ) <nl> - . style ( ' width ' , function ( d ) { <nl> - return normalizeDist ( _this . distFunc , d . dist , minDist ) * 100 + ' % ' ; <nl> - } ) ; <nl> + . style ( ' width ' , function ( d ) { return normalizeDist ( _this . distFunc , d . dist , minDist ) * 100 + ' % ' ; } ) ; <nl> bar . selectAll ( ' . tick ' ) <nl> . data ( d3 . range ( 1 , 4 ) ) <nl> . enter ( ) <nl> < h2 > Settings < / h2 > <nl> . attr ( ' class ' , ' tick ' ) <nl> . style ( ' left ' , function ( d ) { return d * 100 / 4 + ' % ' ; } ) ; <nl> n . on ( ' mouseenter ' , function ( d ) { <nl> - _this . projector . notifyHoverOverPoint ( d . index ) ; <nl> + _this . projectorEventContext . notifyHoverOverPoint ( d . index ) ; <nl> } ) ; <nl> n . on ( ' mouseleave ' , function ( ) { <nl> - _this . projector . notifyHoverOverPoint ( null ) ; <nl> + _this . projectorEventContext . notifyHoverOverPoint ( null ) ; <nl> } ) ; <nl> n . on ( ' click ' , function ( d ) { <nl> - _this . projector . notifySelectionChanged ( [ d . index ] ) ; <nl> + _this . projectorEventContext . notifySelectionChanged ( [ d . index ] ) ; <nl> } ) ; <nl> } ; <nl> InspectorPanel . prototype . updateFilterButtons = function ( numPoints ) { <nl> < h2 > Settings < / h2 > <nl> this . clearSelectionButton . attr ( ' disabled ' , true ) ; <nl> } <nl> } ; <nl> - InspectorPanel . prototype . setupUI = function ( ) { <nl> + InspectorPanel . prototype . setupUI = function ( projector ) { <nl> var _this = this ; <nl> this . distFunc = vector . cosDist ; <nl> var eucDist = this . dom . select ( ' . distance a . euclidean ' ) ; <nl> < h2 > Settings < / h2 > <nl> _this . dom . selectAll ( ' . distance a ' ) . classed ( ' selected ' , false ) ; <nl> eucDist . classed ( ' selected ' , true ) ; <nl> _this . distFunc = vector . dist ; <nl> - var neighbors = _this . projector . dataSet . findNeighbors ( _this . selectedPointIndex , _this . distFunc , _this . numNN ) ; <nl> + var neighbors = projector . dataSet . findNeighbors ( _this . selectedPointIndices [ 0 ] , _this . distFunc , _this . numNN ) ; <nl> _this . updateNeighborsList ( neighbors ) ; <nl> } ) ; <nl> var cosDist = this . dom . select ( ' . distance a . cosine ' ) ; <nl> < h2 > Settings < / h2 > <nl> _this . dom . selectAll ( ' . distance a ' ) . classed ( ' selected ' , false ) ; <nl> cosDist . classed ( ' selected ' , true ) ; <nl> _this . distFunc = vector . cosDist ; <nl> - var neighbors = _this . projector . dataSet . findNeighbors ( _this . selectedPointIndex , _this . distFunc , _this . numNN ) ; <nl> + var neighbors = projector . dataSet . findNeighbors ( _this . selectedPointIndices [ 0 ] , _this . distFunc , _this . numNN ) ; <nl> _this . updateNeighborsList ( neighbors ) ; <nl> } ) ; <nl> / / Called whenever the search text input changes . <nl> var updateInput = function ( value , inRegexMode ) { <nl> if ( value = = null | | value . trim ( ) = = = ' ' ) { <nl> _this . searchBox . message = ' ' ; <nl> - _this . projector . notifySelectionChanged ( [ ] ) ; <nl> + _this . projectorEventContext . notifySelectionChanged ( [ ] ) ; <nl> return ; <nl> } <nl> - var indices = _this . projector . dataSet . query ( value , inRegexMode , _this . selectedMetadataField ) ; <nl> + var indices = projector . dataSet . query ( value , inRegexMode , _this . selectedMetadataField ) ; <nl> if ( indices . length = = = 0 ) { <nl> _this . searchBox . message = ' 0 matches . ' ; <nl> } <nl> else { <nl> _this . searchBox . message = indices . length + " matches . " ; <nl> } <nl> - _this . projector . notifySelectionChanged ( indices ) ; <nl> + _this . projectorEventContext . notifySelectionChanged ( indices ) ; <nl> } ; <nl> this . searchBox . registerInputChangedListener ( function ( value , inRegexMode ) { <nl> updateInput ( value , inRegexMode ) ; <nl> < h2 > Settings < / h2 > <nl> var updateNumNN = function ( ) { <nl> _this . numNN = + numNNInput . value ; <nl> _this . dom . select ( ' . num - nn . nn - count ' ) . text ( _this . numNN ) ; <nl> - if ( _this . selectedPointIndex ! = null ) { <nl> - _this . projector . notifySelectionChanged ( [ _this . selectedPointIndex ] ) ; <nl> + if ( _this . selectedPointIndices ! = null ) { <nl> + _this . projectorEventContext . notifySelectionChanged ( [ _this . selectedPointIndices [ 0 ] ] ) ; <nl> } <nl> } ; <nl> numNNInput . addEventListener ( ' change ' , updateNumNN ) ; <nl> updateNumNN ( ) ; <nl> / / Filtering dataset . <nl> this . setFilterButton . on ( ' click ' , function ( ) { <nl> - _this . projector . filterDataset ( ) ; <nl> - _this . resetFilterButton . attr ( ' disabled ' , null ) ; <nl> + var indices = _this . selectedPointIndices . concat ( _this . neighborsOfFirstPoint . map ( function ( n ) { return n . index ; } ) ) ; <nl> + projector . filterDataset ( indices ) ; <nl> + _this . enableResetFilterButton ( true ) ; <nl> _this . updateFilterButtons ( 0 ) ; <nl> } ) ; <nl> this . resetFilterButton . on ( ' click ' , function ( ) { <nl> - _this . projector . resetFilterDataset ( ) ; <nl> - _this . resetFilterButton . attr ( ' disabled ' , true ) ; <nl> + projector . resetFilterDataset ( ) ; <nl> + _this . enableResetFilterButton ( false ) ; <nl> } ) ; <nl> this . clearSelectionButton . on ( ' click ' , function ( ) { <nl> - _this . projector . adjustSelectionAndHover ( [ ] ) ; <nl> + projector . adjustSelectionAndHover ( [ ] ) ; <nl> } ) ; <nl> - this . resetFilterButton . attr ( ' disabled ' , true ) ; <nl> + this . enableResetFilterButton ( false ) ; <nl> } ; <nl> return InspectorPanel ; <nl> } ( exports . PolymerClass ) ) ; <nl> < h2 > Settings < / h2 > <nl> } <nl> document . registerElement ( InspectorPanel . prototype . is , InspectorPanel ) ; <nl> <nl> - } , { " . / vector " : 25 , " . / vz - projector - util " : 33 } ] , 30 : [ function ( require , module , exports ) { <nl> + } , { " . / vector " : 23 , " . / vz - projector - util " : 31 } ] , 28 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> exports . Legend = Legend ; <nl> document . registerElement ( Legend . prototype . is , Legend ) ; <nl> <nl> - } , { " . / vz - projector - util " : 33 } ] , 31 : [ function ( require , module , exports ) { <nl> + } , { " . / vz - projector - util " : 31 } ] , 29 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> exports . MetadataCard = MetadataCard ; <nl> document . registerElement ( MetadataCard . prototype . is , MetadataCard ) ; <nl> <nl> - } , { " . / vz - projector - util " : 33 } ] , 32 : [ function ( require , module , exports ) { <nl> + } , { " . / vz - projector - util " : 31 } ] , 30 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> var vector = require ( ' . / vector ' ) ; <nl> / / tslint : disable - next - line : no - unused - variable <nl> var vz_projector_util_1 = require ( ' . / vz - projector - util ' ) ; <nl> + var NUM_PCA_COMPONENTS = 10 ; <nl> / / tslint : disable - next - line <nl> exports . ProjectionsPanelPolymer = vz_projector_util_1 . PolymerElement ( { <nl> is : ' vz - projector - projections - panel ' , <nl> < h2 > Settings < / h2 > <nl> pcaIs3d : { type : Boolean , value : true , observer : ' _pcaDimensionToggleObserver ' } , <nl> tSNEis3d : { type : Boolean , value : true , observer : ' _tsneDimensionToggleObserver ' } , <nl> / / PCA projection . <nl> - pcaComponents : { type : Array , value : d3 . range ( 0 , 10 ) } , <nl> + pcaComponents : Array , <nl> pcaX : { type : Number , value : 0 , observer : ' showPCAIfEnabled ' } , <nl> pcaY : { type : Number , value : 1 , observer : ' showPCAIfEnabled ' } , <nl> pcaZ : { type : Number , value : 2 , observer : ' showPCAIfEnabled ' } , <nl> < h2 > Settings < / h2 > <nl> ProjectionsPanel . prototype . enablePolymerChangesTriggerReprojection = function ( ) { <nl> this . polymerChangesTriggerReprojection = true ; <nl> } ; <nl> - ProjectionsPanel . prototype . updateTSNEPerplexityFromUIChange = function ( ) { <nl> + ProjectionsPanel . prototype . updateTSNEPerplexityFromSliderChange = function ( ) { <nl> if ( this . perplexitySlider ) { <nl> this . perplexity = + this . perplexitySlider . value ; <nl> } <nl> < h2 > Settings < / h2 > <nl> this . runTsneButton . on ( ' click ' , function ( ) { return _this . runTSNE ( ) ; } ) ; <nl> this . stopTsneButton . on ( ' click ' , function ( ) { return _this . dataSet . stopTSNE ( ) ; } ) ; <nl> this . perplexitySlider . value = this . perplexity . toString ( ) ; <nl> - this . perplexitySlider . addEventListener ( ' change ' , function ( ) { return _this . updateTSNEPerplexityFromUIChange ( ) ; } ) ; <nl> - this . updateTSNEPerplexityFromUIChange ( ) ; <nl> + this . perplexitySlider . addEventListener ( ' change ' , function ( ) { return _this . updateTSNEPerplexityFromSliderChange ( ) ; } ) ; <nl> + this . updateTSNEPerplexityFromSliderChange ( ) ; <nl> this . learningRateInput . addEventListener ( ' change ' , function ( ) { return _this . updateTSNELearningRateFromUIChange ( ) ; } ) ; <nl> this . updateTSNELearningRateFromUIChange ( ) ; <nl> this . setupCustomProjectionInputFields ( ) ; <nl> < h2 > Settings < / h2 > <nl> } <nl> this . computeAllCentroids ( ) ; <nl> this . setZDropdownEnabled ( this . pcaIs3d ) ; <nl> - this . updateTSNEPerplexityFromUIChange ( ) ; <nl> + this . updateTSNEPerplexityFromSliderChange ( ) ; <nl> this . updateTSNELearningRateFromUIChange ( ) ; <nl> if ( this . iterationLabel ) { <nl> this . iterationLabel . text ( bookmark . tSNEIteration . toString ( ) ) ; <nl> < h2 > Settings < / h2 > <nl> this . dataSet = dataSet ; <nl> this . originalDataSet = originalDataSet ; <nl> this . dim = dim ; <nl> + var perplexity = Math . max ( 5 , Math . ceil ( Math . sqrt ( dataSet . points . length ) / 4 ) ) ; <nl> + this . perplexitySlider . value = perplexity . toString ( ) ; <nl> + this . updateTSNEPerplexityFromSliderChange ( ) ; <nl> this . clearCentroids ( ) ; <nl> this . dom . select ( ' # tsne - sampling ' ) <nl> . style ( ' display ' , dataSet . points . length > data_1 . SAMPLE_SIZE ? null : ' none ' ) ; <nl> < h2 > Settings < / h2 > <nl> this . showPCA ( ) ; <nl> } <nl> } ; <nl> + ProjectionsPanel . prototype . updateTotalVarianceMessage = function ( ) { <nl> + var variances = this . dataSet . fracVariancesExplained ; <nl> + var totalVariance = variances [ this . pcaX ] + variances [ this . pcaY ] ; <nl> + var msg = ' Total variance described : ' ; <nl> + if ( this . pcaIs3d ) { <nl> + totalVariance + = variances [ this . pcaZ ] ; <nl> + } <nl> + msg + = ( totalVariance * 100 ) . toFixed ( 1 ) + ' % . ' ; <nl> + this . dom . select ( ' # total - variance ' ) . html ( msg ) ; <nl> + } ; <nl> ProjectionsPanel . prototype . showPCA = function ( ) { <nl> var _this = this ; <nl> if ( this . dataSet = = null ) { <nl> < h2 > Settings < / h2 > <nl> / / Polymer properties are 1 - based . <nl> var accessors = _this . dataSet . getPointAccessors ( ' pca ' , [ _this . pcaX , _this . pcaY , _this . pcaZ ] ) ; <nl> _this . projector . setProjection ( ' pca ' , _this . pcaIs3d ? 3 : 2 , accessors ) ; <nl> + var numComponents = Math . min ( NUM_PCA_COMPONENTS , _this . dataSet . dim [ 1 ] ) ; <nl> + _this . updateTotalVarianceMessage ( ) ; <nl> + _this . pcaComponents = d3 . range ( 0 , numComponents ) . map ( function ( i ) { <nl> + var fracVariance = _this . dataSet . fracVariancesExplained [ i ] ; <nl> + return { <nl> + id : i , <nl> + componentNumber : i + 1 , <nl> + percVariance : ( fracVariance * 100 ) . toFixed ( 1 ) <nl> + } ; <nl> + } ) ; <nl> } ) ; <nl> } ; <nl> ProjectionsPanel . prototype . reprojectCustom = function ( ) { <nl> < h2 > Settings < / h2 > <nl> ProjectionsPanel . prototype . getTsneSampleSize = function ( ) { <nl> return data_1 . SAMPLE_SIZE . toLocaleString ( ) ; <nl> } ; <nl> - ProjectionsPanel . prototype . _addOne = function ( value ) { <nl> - return value + 1 ; <nl> - } ; <nl> return ProjectionsPanel ; <nl> } ( exports . ProjectionsPanelPolymer ) ) ; <nl> exports . ProjectionsPanel = ProjectionsPanel ; <nl> document . registerElement ( ProjectionsPanel . prototype . is , ProjectionsPanel ) ; <nl> <nl> - } , { " . / data " : 6 , " . / vector " : 25 , " . / vz - projector - util " : 33 } ] , 33 : [ function ( require , module , exports ) { <nl> + } , { " . / data " : 6 , " . / vector " : 23 , " . / vz - projector - util " : 31 } ] , 31 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> } <nl> exports . PolymerElement = PolymerElement ; <nl> <nl> - } , { } ] , 34 : [ function ( require , module , exports ) { <nl> + } , { } ] , 32 : [ function ( require , module , exports ) { <nl> / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> < h2 > Settings < / h2 > <nl> var scatterPlotVisualizerCanvasLabels_1 = require ( ' . / scatterPlotVisualizerCanvasLabels ' ) ; <nl> var scatterPlotVisualizerSprites_1 = require ( ' . / scatterPlotVisualizerSprites ' ) ; <nl> var scatterPlotVisualizerTraces_1 = require ( ' . / scatterPlotVisualizerTraces ' ) ; <nl> + var util = require ( ' . / util ' ) ; <nl> / / tslint : disable - next - line : no - unused - variable <nl> var vz_projector_util_1 = require ( ' . / vz - projector - util ' ) ; <nl> / * * <nl> < h2 > Settings < / h2 > <nl> properties : { <nl> routePrefix : String , <nl> dataProto : { type : String , observer : ' _dataProtoChanged ' } , <nl> - servingMode : String <nl> + servingMode : String , <nl> + projectorConfigJsonPath : String <nl> } <nl> } ) ; <nl> var INDEX_METADATA_FIELD = ' __index__ ' ; <nl> < h2 > Settings < / h2 > <nl> Projector . prototype . ready = function ( ) { <nl> this . selectionChangedListeners = [ ] ; <nl> this . hoverListeners = [ ] ; <nl> + this . projectionChangedListeners = [ ] ; <nl> this . selectedPointIndices = [ ] ; <nl> this . neighborsOfFirstPoint = [ ] ; <nl> this . dom = d3 . select ( this ) ; <nl> logging . setDomContainer ( this ) ; <nl> this . dataPanel = this . $ [ ' data - panel ' ] ; <nl> this . inspectorPanel = this . $ [ ' inspector - panel ' ] ; <nl> - this . inspectorPanel . initialize ( this ) ; <nl> + this . inspectorPanel . initialize ( this , this ) ; <nl> this . projectionsPanel = this . $ [ ' projections - panel ' ] ; <nl> this . projectionsPanel . initialize ( this ) ; <nl> + this . bookmarkPanel = this . $ [ ' bookmark - panel ' ] ; <nl> + this . bookmarkPanel . initialize ( this , this ) ; <nl> this . metadataCard = this . $ [ ' metadata - card ' ] ; <nl> this . statusBar = this . dom . select ( ' # status - bar ' ) ; <nl> - this . bookmarkPanel = this . $ [ ' bookmark - panel ' ] ; <nl> this . scopeSubtree ( this . $ $ ( ' # wrapper - notify - msg ' ) , true ) ; <nl> this . setupUIControls ( ) ; <nl> this . initializeDataProvider ( ) ; <nl> < h2 > Settings < / h2 > <nl> return _this . dataSet . points [ i ] <nl> . metadata [ _this . selectedLabelOption ] ; <nl> } ; <nl> - this . scatterPlot . setLabelAccessor ( labelAccessor ) ; <nl> this . metadataCard . setLabelOption ( this . selectedLabelOption ) ; <nl> + this . scatterPlot . setLabelAccessor ( labelAccessor ) ; <nl> + this . scatterPlot . render ( ) ; <nl> } ; <nl> Projector . prototype . setSelectedColorOption = function ( colorOption ) { <nl> this . selectedColorOption = colorOption ; <nl> - this . updateScatterPlot ( ) ; <nl> + this . updateScatterPlotAttributes ( ) ; <nl> + this . scatterPlot . render ( ) ; <nl> } ; <nl> Projector . prototype . setNormalizeData = function ( normalizeData ) { <nl> this . normalizeData = normalizeData ; <nl> this . setCurrentDataSet ( this . originalDataSet . getSubset ( ) ) ; <nl> } ; <nl> Projector . prototype . updateDataSet = function ( ds , spriteAndMetadata , metadataFile ) { <nl> + this . dataSetFilterIndices = null ; <nl> this . originalDataSet = ds ; <nl> - if ( this . scatterPlot = = null | | this . originalDataSet = = null ) { <nl> + if ( this . scatterPlot = = null | | ds = = null ) { <nl> / / We are not ready yet . <nl> return ; <nl> } <nl> < h2 > Settings < / h2 > <nl> / / height can grow indefinitely . <nl> var container = this . dom . select ( ' # container ' ) ; <nl> container . style ( ' height ' , container . property ( ' clientHeight ' ) + ' px ' ) ; <nl> + this . scatterPlot . resize ( ) ; <nl> + this . scatterPlot . render ( ) ; <nl> } ; <nl> Projector . prototype . setSelectedTensor = function ( run , tensorInfo ) { <nl> - this . bookmarkPanel . setSelectedTensor ( run , tensorInfo ) ; <nl> + this . bookmarkPanel . setSelectedTensor ( run , tensorInfo , this . dataProvider ) ; <nl> } ; <nl> / * * <nl> * Registers a listener to be called any time the selected point set changes . <nl> < h2 > Settings < / h2 > <nl> Projector . prototype . registerSelectionChangedListener = function ( listener ) { <nl> this . selectionChangedListeners . push ( listener ) ; <nl> } ; <nl> - Projector . prototype . filterDataset = function ( ) { <nl> - var indices = this . selectedPointIndices . concat ( this . neighborsOfFirstPoint . map ( function ( n ) { return n . index ; } ) ) ; <nl> + Projector . prototype . filterDataset = function ( pointIndices ) { <nl> var selectionSize = this . selectedPointIndices . length ; <nl> - this . setCurrentDataSet ( this . dataSet . getSubset ( indices ) ) ; <nl> + this . setCurrentDataSet ( this . dataSet . getSubset ( pointIndices ) ) ; <nl> + this . dataSetFilterIndices = pointIndices ; <nl> this . adjustSelectionAndHover ( d3 . range ( selectionSize ) ) ; <nl> - this . scatterPlot . recreateScene ( ) ; <nl> } ; <nl> Projector . prototype . resetFilterDataset = function ( ) { <nl> var _this = this ; <nl> < h2 > Settings < / h2 > <nl> return _this . dataSet . points [ localIndex ] . index ; <nl> } ) ; <nl> this . setCurrentDataSet ( this . originalDataSet . getSubset ( ) ) ; <nl> + this . updateScatterPlotPositions ( ) ; <nl> + this . dataSetFilterIndices = [ ] ; <nl> this . adjustSelectionAndHover ( originalPointIndices ) ; <nl> } ; <nl> / * * <nl> < h2 > Settings < / h2 > <nl> Projector . prototype . notifyHoverOverPoint = function ( pointIndex ) { <nl> this . hoverListeners . forEach ( function ( l ) { return l ( pointIndex ) ; } ) ; <nl> } ; <nl> + Projector . prototype . registerProjectionChangedListener = function ( listener ) { <nl> + this . projectionChangedListeners . push ( listener ) ; <nl> + } ; <nl> + Projector . prototype . notifyProjectionChanged = function ( dataSet ) { <nl> + this . projectionChangedListeners . forEach ( function ( l ) { return l ( dataSet ) ; } ) ; <nl> + } ; <nl> Projector . prototype . _dataProtoChanged = function ( dataProtoString ) { <nl> - var dataProto = dataProtoString ? <nl> - JSON . parse ( dataProtoString ) : null ; <nl> + var dataProto = dataProtoString ? JSON . parse ( dataProtoString ) : null ; <nl> this . initializeDataProvider ( dataProto ) ; <nl> } ; <nl> Projector . prototype . makeDefaultPointsInfoAndStats = function ( points ) { <nl> < h2 > Settings < / h2 > <nl> } ; <nl> Projector . prototype . initializeDataProvider = function ( dataProto ) { <nl> if ( this . servingMode = = = ' demo ' ) { <nl> - this . dataProvider = new data_provider_demo_1 . DemoDataProvider ( ) ; <nl> + var projectorConfigUrl = void 0 ; <nl> + / / Only in demo mode do we allow the config being passed via URL . <nl> + var urlParams = util . getURLParams ( window . location . search ) ; <nl> + if ( ' config ' in urlParams ) { <nl> + projectorConfigUrl = urlParams [ ' config ' ] ; <nl> + } <nl> + else { <nl> + projectorConfigUrl = this . projectorConfigJsonPath ; <nl> + } <nl> + this . dataProvider = new data_provider_demo_1 . DemoDataProvider ( projectorConfigUrl ) ; <nl> } <nl> else if ( this . servingMode = = = ' server ' ) { <nl> if ( ! this . routePrefix ) { <nl> < h2 > Settings < / h2 > <nl> this . dataProvider = new data_provider_proto_1 . ProtoDataProvider ( dataProto ) ; <nl> } <nl> this . dataPanel . initialize ( this , this . dataProvider ) ; <nl> - this . bookmarkPanel . initialize ( this , this . dataProvider ) ; <nl> } ; <nl> Projector . prototype . getLegendPointColorer = function ( colorOption ) { <nl> var _this = this ; <nl> < h2 > Settings < / h2 > <nl> return label3DModeButton . active ; <nl> } ; <nl> Projector . prototype . getSpriteImageMode = function ( ) { <nl> - return this . dataSet & & <nl> - this . dataSet . spriteAndMetadataInfo & & <nl> + return this . dataSet & & this . dataSet . spriteAndMetadataInfo & & <nl> this . dataSet . spriteAndMetadataInfo . spriteImage ! = null ; <nl> } ; <nl> Projector . prototype . adjustSelectionAndHover = function ( selectedPointIndices , hoverIndex ) { <nl> < h2 > Settings < / h2 > <nl> this . notifyHoverOverPoint ( hoverIndex ) ; <nl> this . scatterPlot . setMode ( scatterPlot_1 . Mode . HOVER ) ; <nl> } ; <nl> - Projector . prototype . unsetCurrentDataSet = function ( ) { <nl> - this . dataSet . stopTSNE ( ) ; <nl> - } ; <nl> Projector . prototype . setCurrentDataSet = function ( ds ) { <nl> this . adjustSelectionAndHover ( [ ] ) ; <nl> if ( this . dataSet ! = null ) { <nl> - this . unsetCurrentDataSet ( ) ; <nl> + this . dataSet . stopTSNE ( ) ; <nl> } <nl> this . dataSet = ds ; <nl> if ( this . normalizeData ) { <nl> < h2 > Settings < / h2 > <nl> this . dim = this . dataSet . dim [ 1 ] ; <nl> this . dom . select ( ' span . numDataPoints ' ) . text ( this . dataSet . dim [ 0 ] ) ; <nl> this . dom . select ( ' span . dim ' ) . text ( this . dataSet . dim [ 1 ] ) ; <nl> + this . selectedProjectionPointAccessors = null ; <nl> this . projectionsPanel . dataSetUpdated ( this . dataSet , this . originalDataSet , this . dim ) ; <nl> this . scatterPlot . setCameraParametersForNextCameraCreation ( null , true ) ; <nl> - this . scatterPlot . setDataSet ( this . dataSet ) ; <nl> - this . updateScatterPlot ( ) ; <nl> } ; <nl> Projector . prototype . setupUIControls = function ( ) { <nl> var _this = this ; <nl> < h2 > Settings < / h2 > <nl> } ) ; <nl> var labels3DModeButton = this . get3DLabelModeButton ( ) ; <nl> labels3DModeButton . addEventListener ( ' click ' , function ( ) { <nl> - _this . createVisualizers ( labels3DModeButton . active ) ; <nl> - _this . scatterPlot . recreateScene ( ) ; <nl> - _this . updateScatterPlot ( ) ; <nl> - _this . scatterPlot . update ( ) ; <nl> + _this . createVisualizers ( _this . get3DLabelMode ( ) ) ; <nl> + _this . updateScatterPlotAttributes ( ) ; <nl> + _this . scatterPlot . render ( ) ; <nl> } ) ; <nl> window . addEventListener ( ' resize ' , function ( ) { <nl> var container = _this . dom . select ( ' # container ' ) ; <nl> < h2 > Settings < / h2 > <nl> _this . scatterPlot . resize ( ) ; <nl> } ) ; <nl> this . projectorScatterPlotAdapter = new projectorScatterPlotAdapter_1 . ProjectorScatterPlotAdapter ( ) ; <nl> - this . scatterPlot = new scatterPlot_1 . ScatterPlot ( this . getScatterContainer ( ) , function ( i ) { return ' ' + <nl> - _this . dataSet . points [ i ] . metadata [ _this . selectedLabelOption ] ; } , this , this ) ; <nl> + this . scatterPlot = new scatterPlot_1 . ScatterPlot ( this . getScatterContainer ( ) , function ( i ) { return ' ' + _this . dataSet . points [ i ] . metadata [ _this . selectedLabelOption ] ; } , this ) ; <nl> this . createVisualizers ( false ) ; <nl> this . scatterPlot . onCameraMove ( function ( cameraPosition , cameraTarget ) { <nl> return _this . bookmarkPanel . clearStateSelection ( ) ; <nl> < h2 > Settings < / h2 > <nl> hoverText = point . metadata [ this . selectedLabelOption ] . toString ( ) ; <nl> } <nl> } <nl> - this . updateScatterPlot ( ) ; <nl> + this . updateScatterPlotAttributes ( ) ; <nl> + this . scatterPlot . render ( ) ; <nl> if ( this . selectedPointIndices . length = = = 0 ) { <nl> this . statusBar . style ( ' display ' , hoverText ? null : ' none ' ) ; <nl> this . statusBar . text ( hoverText ) ; <nl> } <nl> } ; <nl> - Projector . prototype . updateScatterPlot = function ( ) { <nl> + Projector . prototype . updateScatterPlotPositions = function ( ) { <nl> + if ( this . dataSet = = null ) { <nl> + return ; <nl> + } <nl> + if ( this . selectedProjectionPointAccessors = = null ) { <nl> + return ; <nl> + } <nl> + var newPositions = this . projectorScatterPlotAdapter . generatePointPositionArray ( this . dataSet , this . selectedProjectionPointAccessors ) ; <nl> + this . scatterPlot . setPointPositions ( this . dataSet , newPositions ) ; <nl> + } ; <nl> + Projector . prototype . updateScatterPlotAttributes = function ( ) { <nl> var dataSet = this . dataSet ; <nl> var selectedSet = this . selectedPointIndices ; <nl> var hoverIndex = this . hoverPointIndex ; <nl> var neighbors = this . neighborsOfFirstPoint ; <nl> var pointColorer = this . getLegendPointColorer ( this . selectedColorOption ) ; <nl> - var pointColors = this . projectorScatterPlotAdapter . generatePointColorArray ( dataSet , pointColorer , selectedSet , neighbors , hoverIndex , this . get3DLabelMode ( ) , this . getSpriteImageMode ( ) ) ; <nl> - var pointScaleFactors = this . projectorScatterPlotAdapter . generatePointScaleFactorArray ( dataSet , selectedSet , neighbors , hoverIndex ) ; <nl> - var labels = this . projectorScatterPlotAdapter . generateVisibleLabelRenderParams ( dataSet , selectedSet , neighbors , hoverIndex ) ; <nl> - var traceColors = this . projectorScatterPlotAdapter . generateLineSegmentColorMap ( dataSet , pointColorer ) ; <nl> + var adapter = this . projectorScatterPlotAdapter ; <nl> + var pointColors = adapter . generatePointColorArray ( dataSet , pointColorer , selectedSet , neighbors , hoverIndex , this . get3DLabelMode ( ) , this . getSpriteImageMode ( ) ) ; <nl> + var pointScaleFactors = adapter . generatePointScaleFactorArray ( dataSet , selectedSet , neighbors , hoverIndex ) ; <nl> + var labels = adapter . generateVisibleLabelRenderParams ( dataSet , selectedSet , neighbors , hoverIndex ) ; <nl> + var traceColors = adapter . generateLineSegmentColorMap ( dataSet , pointColorer ) ; <nl> + var traceOpacities = adapter . generateLineSegmentOpacityArray ( dataSet , selectedSet ) ; <nl> + var traceWidths = adapter . generateLineSegmentWidthArray ( dataSet , selectedSet ) ; <nl> this . scatterPlot . setPointColors ( pointColors ) ; <nl> this . scatterPlot . setPointScaleFactors ( pointScaleFactors ) ; <nl> this . scatterPlot . setLabels ( labels ) ; <nl> this . scatterPlot . setTraceColors ( traceColors ) ; <nl> - this . scatterPlot . render ( ) ; <nl> + this . scatterPlot . setTraceOpacities ( traceOpacities ) ; <nl> + this . scatterPlot . setTraceWidths ( traceWidths ) ; <nl> } ; <nl> Projector . prototype . getScatterContainer = function ( ) { <nl> return this . dom . select ( ' # scatter ' ) ; <nl> } ; <nl> Projector . prototype . createVisualizers = function ( inLabels3DMode ) { <nl> var scatterPlot = this . scatterPlot ; <nl> - var selectionContext = this ; <nl> scatterPlot . removeAllVisualizers ( ) ; <nl> if ( inLabels3DMode ) { <nl> scatterPlot . addVisualizer ( new scatterPlotVisualizer3DLabels_1 . ScatterPlotVisualizer3DLabels ( ) ) ; <nl> } <nl> else { <nl> scatterPlot . addVisualizer ( new scatterPlotVisualizerSprites_1 . ScatterPlotVisualizerSprites ( ) ) ; <nl> - scatterPlot . addVisualizer ( new scatterPlotVisualizerTraces_1 . ScatterPlotVisualizerTraces ( selectionContext ) ) ; <nl> scatterPlot . addVisualizer ( new scatterPlotVisualizerCanvasLabels_1 . ScatterPlotVisualizerCanvasLabels ( this . getScatterContainer ( ) ) ) ; <nl> } <nl> + scatterPlot . addVisualizer ( new scatterPlotVisualizerTraces_1 . ScatterPlotVisualizerTraces ( ) ) ; <nl> } ; <nl> Projector . prototype . onSelectionChanged = function ( selectedPointIndices , neighborsOfFirstPoint ) { <nl> this . selectedPointIndices = selectedPointIndices ; <nl> < h2 > Settings < / h2 > <nl> var totalNumPoints = this . selectedPointIndices . length + neighborsOfFirstPoint . length ; <nl> this . statusBar . text ( " Selected " + totalNumPoints + " points " ) <nl> . style ( ' display ' , totalNumPoints > 0 ? null : ' none ' ) ; <nl> - this . inspectorPanel . updateInspectorPane ( selectedPointIndices , neighborsOfFirstPoint ) ; <nl> - this . updateScatterPlot ( ) ; <nl> + this . updateScatterPlotAttributes ( ) ; <nl> + this . scatterPlot . render ( ) ; <nl> } ; <nl> Projector . prototype . setProjection = function ( projection , dimensionality , pointAccessors ) { <nl> this . selectedProjection = projection ; <nl> + this . selectedProjectionPointAccessors = pointAccessors ; <nl> this . scatterPlot . setDimensions ( dimensionality ) ; <nl> - this . scatterPlot . showTickLabels ( false ) ; <nl> - this . scatterPlot . setPointAccessors ( pointAccessors ) ; <nl> - if ( this . dataSet . hasMeaningfulVisualization ( projection ) ) { <nl> - this . scatterPlot . update ( ) ; <nl> + if ( this . dataSet . projectionCanBeRendered ( projection ) ) { <nl> + this . updateScatterPlotAttributes ( ) ; <nl> + this . notifyProjectionsUpdated ( ) ; <nl> } <nl> - this . scatterPlot . recreateScene ( ) ; <nl> this . scatterPlot . setCameraParametersForNextCameraCreation ( null , false ) ; <nl> + this . notifyProjectionChanged ( this . dataSet ) ; <nl> } ; <nl> Projector . prototype . notifyProjectionsUpdated = function ( ) { <nl> - this . scatterPlot . update ( ) ; <nl> + this . updateScatterPlotPositions ( ) ; <nl> + this . scatterPlot . render ( ) ; <nl> } ; <nl> / * * <nl> * Gets the current view of the embedding and saves it as a State object . <nl> < h2 > Settings < / h2 > <nl> state . projections . push ( projections ) ; <nl> } <nl> state . selectedProjection = this . selectedProjection ; <nl> + state . dataSetDimensions = this . dataSet . dim ; <nl> state . tSNEIteration = this . dataSet . tSNEIteration ; <nl> state . selectedPoints = this . selectedPointIndices ; <nl> + state . filteredPoints = this . dataSetFilterIndices ; <nl> state . cameraDef = this . scatterPlot . getCameraDef ( ) ; <nl> state . selectedColorOptionName = this . dataPanel . selectedColorOptionName ; <nl> state . selectedLabelOption = this . selectedLabelOption ; <nl> < h2 > Settings < / h2 > <nl> } ; <nl> / * * Loads a State object into the world . * / <nl> Projector . prototype . loadState = function ( state ) { <nl> + { <nl> + this . projectionsPanel . disablePolymerChangesTriggerReprojection ( ) ; <nl> + this . resetFilterDataset ( ) ; <nl> + if ( state . filteredPoints ! = null ) { <nl> + this . filterDataset ( state . filteredPoints ) ; <nl> + } <nl> + this . projectionsPanel . enablePolymerChangesTriggerReprojection ( ) ; <nl> + } <nl> for ( var i = 0 ; i < state . projections . length ; i + + ) { <nl> var point = this . dataSet . points [ i ] ; <nl> var projection = state . projections [ i ] ; <nl> < h2 > Settings < / h2 > <nl> this . dataSet . hasTSNERun = ( state . selectedProjection = = = ' tsne ' ) ; <nl> this . dataSet . tSNEIteration = state . tSNEIteration ; <nl> this . projectionsPanel . restoreUIFromBookmark ( state ) ; <nl> + this . inspectorPanel . restoreUIFromBookmark ( state ) ; <nl> this . dataPanel . selectedColorOptionName = state . selectedColorOptionName ; <nl> this . selectedLabelOption = state . selectedLabelOption ; <nl> this . scatterPlot . setCameraParametersForNextCameraCreation ( state . cameraDef , false ) ; <nl> < h2 > Settings < / h2 > <nl> exports . Projector = Projector ; <nl> document . registerElement ( Projector . prototype . is , Projector ) ; <nl> <nl> - } , { " . / data " : 6 , " . / data - provider - demo " : 2 , " . / data - provider - proto " : 3 , " . / data - provider - server " : 4 , " . / logging " : 12 , " . / projectorScatterPlotAdapter " : 13 , " . / scatterPlot " : 15 , " . / scatterPlotVisualizer3DLabels " : 17 , " . / scatterPlotVisualizerCanvasLabels " : 19 , " . / scatterPlotVisualizerSprites " : 20 , " . / scatterPlotVisualizerTraces " : 21 , " . / vz - projector - util " : 33 } ] , 35 : [ function ( require , module , exports ) { <nl> + } , { " . / data " : 6 , " . / data - provider - demo " : 2 , " . / data - provider - proto " : 3 , " . / data - provider - server " : 4 , " . / logging " : 11 , " . / projectorScatterPlotAdapter " : 13 , " . / scatterPlot " : 15 , " . / scatterPlotVisualizer3DLabels " : 17 , " . / scatterPlotVisualizerCanvasLabels " : 18 , " . / scatterPlotVisualizerSprites " : 19 , " . / scatterPlotVisualizerTraces " : 20 , " . / util " : 22 , " . / vz - projector - util " : 31 } ] , 33 : [ function ( require , module , exports ) { <nl> arguments [ 4 ] [ 7 ] [ 0 ] . apply ( exports , arguments ) <nl> - } , { " dup " : 7 } ] } , { } , [ 35 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 , 32 , 33 , 34 ] ) ; <nl> + } , { " dup " : 7 } ] } , { } , [ 33 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 , 32 ] ) ; <nl> < / script > <nl> < / dom - module > <nl> < / body > < / html > <nl> \ No newline at end of file <nl> | Fix external compile and release new version of TB . | tensorflow/tensorflow | 2ee6a412733fd94e6f11f35ad816d26b6078b0e6 | 2016-11-14T20:30:02Z |
mmm a / src / compiler / js - native - context - specialization . cc <nl> ppp b / src / compiler / js - native - context - specialization . cc <nl> bool JSNativeContextSpecialization : : ComputePropertyAccessInfo ( <nl> Handle < Map > map , Handle < Name > name , PropertyAccessMode access_mode , <nl> PropertyAccessInfo * access_info ) { <nl> MaybeHandle < JSObject > holder ; <nl> - Handle < Map > receiver_map = map ; <nl> - Type * receiver_type = Type : : Class ( receiver_map , graph ( ) - > zone ( ) ) ; <nl> + Type * receiver_type = Type : : Class ( map , graph ( ) - > zone ( ) ) ; <nl> while ( CanInlinePropertyAccess ( map ) ) { <nl> / / Check for special JSObject field accessors . <nl> int offset ; <nl> bool JSNativeContextSpecialization : : ComputePropertyAccessInfo ( <nl> Handle < DescriptorArray > descriptors ( map - > instance_descriptors ( ) , isolate ( ) ) ; <nl> int const number = descriptors - > SearchWithCache ( * name , * map ) ; <nl> if ( number ! = DescriptorArray : : kNotFound ) { <nl> - if ( access_mode = = kStore & & ! map . is_identical_to ( receiver_map ) ) { <nl> - return false ; <nl> - } <nl> PropertyDetails const details = descriptors - > GetDetails ( number ) ; <nl> if ( details . type ( ) = = DATA_CONSTANT ) { <nl> * access_info = PropertyAccessInfo : : DataConstant ( <nl> bool JSNativeContextSpecialization : : ComputePropertyAccessInfo ( <nl> return true ; <nl> } else if ( details . type ( ) = = DATA ) { <nl> / / Don ' t bother optimizing stores to read - only properties . <nl> - if ( access_mode = = kStore & & details . IsReadOnly ( ) ) { <nl> + if ( access_mode = = kStore ) { <nl> break ; <nl> } <nl> int index = descriptors - > GetFieldIndex ( number ) ; <nl> Representation field_representation = details . representation ( ) ; <nl> FieldIndex field_index = FieldIndex : : ForPropertyIndex ( <nl> * map , index , field_representation . IsDouble ( ) ) ; <nl> - Type * field_type = Type : : Tagged ( ) ; <nl> + Type * field_type = Type : : Any ( ) ; <nl> if ( field_representation . IsSmi ( ) ) { <nl> field_type = Type : : Intersect ( Type : : SignedSmall ( ) , <nl> Type : : TaggedSigned ( ) , graph ( ) - > zone ( ) ) ; <nl> bool JSNativeContextSpecialization : : ComputePropertyAccessInfos ( <nl> PropertyAccessMode access_mode , <nl> ZoneVector < PropertyAccessInfo > * access_infos ) { <nl> for ( Handle < Map > map : maps ) { <nl> - if ( Map : : TryUpdate ( map ) . ToHandle ( & map ) ) { <nl> - PropertyAccessInfo access_info ; <nl> - if ( ! ComputePropertyAccessInfo ( map , name , access_mode , & access_info ) ) { <nl> - return false ; <nl> - } <nl> - access_infos - > push_back ( access_info ) ; <nl> + PropertyAccessInfo access_info ; <nl> + if ( ! ComputePropertyAccessInfo ( map , name , access_mode , & access_info ) ) { <nl> + return false ; <nl> } <nl> + access_infos - > push_back ( access_info ) ; <nl> } <nl> return true ; <nl> } <nl> Reduction JSNativeContextSpecialization : : ReduceJSLoadNamed ( Node * node ) { <nl> if ( ! ComputePropertyAccessInfos ( receiver_maps , name , kLoad , & access_infos ) ) { <nl> return NoChange ( ) ; <nl> } <nl> - <nl> - / / Nothing to do if we have no non - deprecated maps . <nl> - if ( access_infos . empty ( ) ) return NoChange ( ) ; <nl> + DCHECK ( ! access_infos . empty ( ) ) ; <nl> <nl> / / The final states for every polymorphic branch . We join them with <nl> / / Merge + Phi + EffectPhi at the bottom . <nl> Reduction JSNativeContextSpecialization : : ReduceJSStoreNamed ( Node * node ) { <nl> / / Determine actual holder and perform prototype chain checks . <nl> Handle < JSObject > holder ; <nl> if ( access_info . holder ( ) . ToHandle ( & holder ) ) { <nl> + this_receiver = jsgraph ( ) - > Constant ( holder ) ; <nl> for ( auto i = access_info . receiver_type ( ) - > Classes ( ) ; ! i . Done ( ) ; <nl> i . Advance ( ) ) { <nl> Handle < Map > map = i . Current ( ) ; <nl> | Revert of [ turbofan ] Fix and enable property stores . ( patchset id : 20001 of https : / / codereview . chromium . org / 1424523002 / ) | v8/v8 | 50e5a7275f2bc93a8ae731acda0f7ac6e5e97c3c | 2015-10-23T11:16:02Z |
mmm a / hphp / compiler / analysis / emitter . cpp <nl> ppp b / hphp / compiler / analysis / emitter . cpp <nl> static void emitContinuationMethod ( UnitEmitter & ue , FuncEmitter * fe , <nl> not_reached ( ) ; <nl> } <nl> <nl> - ue . emitOp ( OpContStopped ) ; <nl> ue . emitOp ( OpNull ) ; <nl> ue . emitOp ( OpRetC ) ; <nl> break ; <nl> mmm a / hphp / doc / bytecode . specification <nl> ppp b / hphp / doc / bytecode . specification <nl> ContCurrent [ ] - > [ C ] <nl> Get continuation value . $ this must be a Continuation object . Pushes the most <nl> recently yielded value from the continuation onto the stack . <nl> <nl> - ContStopped [ ] - > [ ] <nl> - <nl> - Mark continuation as stopped . $ this must be a Continuation object . <nl> - <nl> 15 . Async functions <nl> mmmmmmmmmmmmmmmmmm - <nl> <nl> mmm a / hphp / hhbbc / interp . cpp <nl> ppp b / hphp / hhbbc / interp . cpp <nl> void in ( ISS & env , const bc : : ContCheck & ) { } <nl> void in ( ISS & env , const bc : : ContValid & ) { push ( env , TBool ) ; } <nl> void in ( ISS & env , const bc : : ContKey & ) { push ( env , TInitCell ) ; } <nl> void in ( ISS & env , const bc : : ContCurrent & ) { push ( env , TInitCell ) ; } <nl> - void in ( ISS & env , const bc : : ContStopped & ) { } <nl> <nl> void in ( ISS & env , const bc : : Await & ) { <nl> auto const t = popC ( env ) ; <nl> mmm a / hphp / runtime / ext / ext_continuation . cpp <nl> ppp b / hphp / runtime / ext / ext_continuation . cpp <nl> c_Continuation : : c_Continuation ( Class * cb ) <nl> , m_key ( make_tv < KindOfInt64 > ( - 1LL ) ) <nl> , m_value ( make_tv < KindOfNull > ( ) ) <nl> { <nl> - o_subclassData . u16 = 0 ; <nl> } <nl> <nl> c_Continuation : : ~ c_Continuation ( ) { <nl> - tvRefcountedDecRef ( m_key ) ; <nl> - tvRefcountedDecRef ( m_value ) ; <nl> - <nl> - if ( LIKELY ( done ( ) ) ) { <nl> + if ( LIKELY ( getState ( ) = = Done ) ) { <nl> return ; <nl> } <nl> <nl> + tvRefcountedDecRef ( m_key ) ; <nl> + tvRefcountedDecRef ( m_value ) ; <nl> + <nl> / / Free locals , but don ' t trigger the EventHook for FunctionExit <nl> / / since the continuation function has already been exited . We <nl> / / don ' t want redundant calls . <nl> c_Continuation : : ~ c_Continuation ( ) { <nl> void c_Continuation : : t___construct ( ) { } <nl> <nl> void c_Continuation : : suspend ( Offset offset , const Cell & value ) { <nl> + assert ( getState ( ) = = Running ) ; <nl> resumable ( ) - > setOffset ( offset ) ; <nl> cellSet ( make_tv < KindOfInt64 > ( + + m_index ) , m_key ) ; <nl> cellSet ( value , m_value ) ; <nl> + setState ( Started ) ; <nl> } <nl> <nl> void c_Continuation : : suspend ( Offset offset , const Cell & key , <nl> const Cell & value ) { <nl> + assert ( getState ( ) = = Running ) ; <nl> resumable ( ) - > setOffset ( offset ) ; <nl> cellSet ( key , m_key ) ; <nl> cellSet ( value , m_value ) ; <nl> void c_Continuation : : suspend ( Offset offset , const Cell & key , <nl> int64_t new_index = m_key . m_data . num ; <nl> m_index = new_index > m_index ? new_index : m_index ; <nl> } <nl> + setState ( Started ) ; <nl> } <nl> <nl> - Variant c_Continuation : : t_current ( ) { <nl> - const_assert ( false ) ; <nl> - return tvAsCVarRef ( & m_value ) ; <nl> - } <nl> - <nl> - Variant c_Continuation : : t_key ( ) { <nl> - startedCheck ( ) ; <nl> - return tvAsCVarRef ( & m_key ) ; <nl> - } <nl> - <nl> - void c_Continuation : : t_next ( ) { <nl> - const_assert ( false ) ; <nl> - } <nl> - <nl> - const StaticString <nl> - s_next ( " next " ) , <nl> - s__closure_ ( " { closure } " ) , <nl> - s_this ( " this " ) ; <nl> + / / Functions with native implementation . <nl> + void c_Continuation : : t_next ( ) { const_assert ( false ) ; } <nl> + void c_Continuation : : t_send ( const Variant & v ) { const_assert ( false ) ; } <nl> + void c_Continuation : : t_raise ( const Variant & v ) { const_assert ( false ) ; } <nl> + bool c_Continuation : : t_valid ( ) { const_assert ( false ) ; } <nl> + Variant c_Continuation : : t_current ( ) { const_assert ( false ) ; } <nl> + Variant c_Continuation : : t_key ( ) { const_assert ( false ) ; } <nl> <nl> + const StaticString s_next ( " next " ) ; <nl> void c_Continuation : : t_rewind ( ) { <nl> this - > o_invoke_few_args ( s_next , 0 ) ; <nl> } <nl> <nl> - bool c_Continuation : : t_valid ( ) { <nl> - const_assert ( false ) ; <nl> - return ! done ( ) ; <nl> - } <nl> - <nl> - void c_Continuation : : t_send ( const Variant & v ) { <nl> - const_assert ( false ) ; <nl> - } <nl> - <nl> - void c_Continuation : : t_raise ( const Variant & v ) { <nl> - const_assert ( false ) ; <nl> - } <nl> - <nl> + const StaticString s__closure_ ( " { closure } " ) ; <nl> String c_Continuation : : t_getorigfuncname ( ) { <nl> const Func * origFunc = actRec ( ) - > func ( ) ; <nl> auto const origName = origFunc - > isClosureBody ( ) ? s__closure_ . get ( ) <nl> c_Continuation * c_Continuation : : Clone ( ObjectData * obj ) { <nl> <nl> c_Continuation * cont = Create ( fp , thiz - > resumable ( ) - > offset ( ) ) ; <nl> cont - > copyContinuationVars ( fp ) ; <nl> - cont - > o_subclassData . u16 = thiz - > o_subclassData . u16 ; <nl> + cont - > setState ( thiz - > getState ( ) ) ; <nl> cont - > m_index = thiz - > m_index ; <nl> cellSet ( thiz - > m_key , cont - > m_key ) ; <nl> cellSet ( thiz - > m_value , cont - > m_value ) ; <nl> mmm a / hphp / runtime / ext / ext_continuation . h <nl> ppp b / hphp / runtime / ext / ext_continuation . h <nl> struct c_Continuation : ExtObjectDataFlags < ObjectData : : HasClone > { <nl> static constexpr ptrdiff_t offsetOff ( ) { <nl> return resumableOff ( ) + Resumable : : offsetOff ( ) ; <nl> } <nl> - static constexpr ptrdiff_t startedOff ( ) { <nl> - return offsetof ( c_Continuation , o_subclassData . u8 [ 0 ] ) ; <nl> - } <nl> static constexpr ptrdiff_t stateOff ( ) { <nl> - return offsetof ( c_Continuation , o_subclassData . u8 [ 1 ] ) ; <nl> + return offsetof ( c_Continuation , o_subclassData . u8 [ 0 ] ) ; <nl> } <nl> <nl> - bool started ( ) const { return o_subclassData . u8 [ 0 ] ; } <nl> - void start ( ) { o_subclassData . u8 [ 0 ] = true ; } <nl> - <nl> - enum ContState : uint8_t { <nl> - Running = 1 , <nl> - Done = 2 <nl> + enum GeneratorState : uint8_t { <nl> + Created = 0 , / / generator was created but never iterated <nl> + Started = 1 , / / generator was iterated but not currently running <nl> + Running = 2 , / / generator is currently being iterated <nl> + Done = 3 / / generator has finished its execution <nl> } ; <nl> <nl> - bool done ( ) const { return o_subclassData . u8 [ 1 ] & ContState : : Done ; } <nl> - void setDone ( ) { o_subclassData . u8 [ 1 ] = ContState : : Done ; } <nl> - <nl> - bool running ( ) const { return o_subclassData . u8 [ 1 ] & ContState : : Running ; } <nl> - void setRunning ( ) { o_subclassData . u8 [ 1 ] = ContState : : Running ; } <nl> - void setStopped ( ) { o_subclassData . u8 [ 1 ] & = ~ ContState : : Running ; } <nl> + GeneratorState getState ( ) const { <nl> + return static_cast < GeneratorState > ( o_subclassData . u8 [ 0 ] ) ; <nl> + } <nl> + void setState ( GeneratorState state ) { o_subclassData . u8 [ 0 ] = state ; } <nl> <nl> void t___construct ( ) ; <nl> void suspend ( Offset offset , const Cell & value ) ; <nl> struct c_Continuation : ExtObjectDataFlags < ObjectData : : HasClone > { <nl> auto const cont = new ( obj ) c_Continuation ( ) ; <nl> cont - > incRefCount ( ) ; <nl> cont - > setNoDestruct ( ) ; <nl> + cont - > setState ( Created ) ; <nl> return cont ; <nl> } <nl> <nl> struct c_Continuation : ExtObjectDataFlags < ObjectData : : HasClone > { <nl> return base + 2 ; <nl> } <nl> <nl> - inline void preNext ( ) { <nl> - if ( done ( ) ) { <nl> - throw_exception ( Object ( SystemLib : : AllocExceptionObject ( <nl> - " Continuation is already finished " ) ) ) ; <nl> - } <nl> - if ( running ( ) ) { <nl> - throw_exception ( Object ( SystemLib : : AllocExceptionObject ( <nl> - " Continuation is already running " ) ) ) ; <nl> + inline void startedCheck ( ) { <nl> + if ( getState ( ) = = Created ) { <nl> + throw_exception ( Object ( <nl> + SystemLib : : AllocExceptionObject ( " Need to call next ( ) first " ) ) ) ; <nl> } <nl> - setRunning ( ) ; <nl> - start ( ) ; <nl> } <nl> <nl> - inline void startedCheck ( ) { <nl> - if ( ! started ( ) ) { <nl> - throw_exception ( <nl> - Object ( SystemLib : : AllocExceptionObject ( " Need to call next ( ) first " ) ) ) ; <nl> + inline void preNext ( bool checkStarted ) { <nl> + if ( checkStarted ) { <nl> + startedCheck ( ) ; <nl> + } <nl> + if ( getState ( ) = = Running ) { <nl> + throw_exception ( Object ( <nl> + SystemLib : : AllocExceptionObject ( " Generator is already running " ) ) ) ; <nl> } <nl> + if ( getState ( ) = = Done ) { <nl> + throw_exception ( Object ( <nl> + SystemLib : : AllocExceptionObject ( " Generator is already finished " ) ) ) ; <nl> + } <nl> + assert ( getState ( ) = = Created | | getState ( ) = = Started ) ; <nl> + setState ( Running ) ; <nl> + } <nl> + <nl> + inline void finish ( ) { <nl> + assert ( getState ( ) = = Running ) ; <nl> + cellSet ( make_tv < KindOfNull > ( ) , m_key ) ; <nl> + cellSet ( make_tv < KindOfNull > ( ) , m_value ) ; <nl> + setState ( Done ) ; <nl> } <nl> <nl> private : <nl> mmm a / hphp / runtime / vm / bytecode . cpp <nl> ppp b / hphp / runtime / vm / bytecode . cpp <nl> OPTBLD_INLINE void ExecutionContext : : ret ( IOP_ARGS ) { <nl> cellCopy ( retval , waitHandle - > getResult ( ) ) ; <nl> } else if ( m_fp - > func ( ) - > isGenerator ( ) ) { <nl> / / Mark the generator as finished and store the return value . <nl> - auto cont = frame_continuation ( m_fp ) ; <nl> - cont - > setDone ( ) ; <nl> - tvSetIgnoreRef ( retval , cont - > m_value ) ; <nl> + assert ( IS_NULL_TYPE ( retval . m_type ) ) ; <nl> + frame_continuation ( m_fp ) - > finish ( ) ; <nl> } else { <nl> not_reached ( ) ; <nl> } <nl> OPTBLD_INLINE void ExecutionContext : : contEnterImpl ( IOP_ARGS ) { <nl> / / Do linkage of the continuation ' s AR . <nl> assert ( m_fp - > hasThis ( ) ) ; <nl> c_Continuation * cont = this_continuation ( m_fp ) ; <nl> + assert ( cont - > getState ( ) = = c_Continuation : : Running ) ; <nl> ActRec * contAR = cont - > actRec ( ) ; <nl> contAR - > setReturn ( m_fp , pc , tx - > uniqueStubs . genRetHelper ) ; <nl> <nl> OPTBLD_INLINE void ExecutionContext : : iopYieldK ( IOP_ARGS ) { <nl> <nl> OPTBLD_INLINE void ExecutionContext : : iopContCheck ( IOP_ARGS ) { <nl> NEXT ( ) ; <nl> - DECODE_IVA ( check_started ) ; <nl> - c_Continuation * cont = this_continuation ( m_fp ) ; <nl> - if ( check_started ) { <nl> - cont - > startedCheck ( ) ; <nl> - } <nl> - cont - > preNext ( ) ; <nl> + DECODE_IVA ( checkStarted ) ; <nl> + this_continuation ( m_fp ) - > preNext ( checkStarted ) ; <nl> } <nl> <nl> OPTBLD_INLINE void ExecutionContext : : iopContValid ( IOP_ARGS ) { <nl> NEXT ( ) ; <nl> TypedValue * tv = m_stack . allocTV ( ) ; <nl> tvWriteUninit ( tv ) ; <nl> - tvAsVariant ( tv ) = ! this_continuation ( m_fp ) - > done ( ) ; <nl> + tvAsVariant ( tv ) = this_continuation ( m_fp ) - > getState ( ) ! = c_Continuation : : Done ; <nl> } <nl> <nl> OPTBLD_INLINE void ExecutionContext : : iopContKey ( IOP_ARGS ) { <nl> OPTBLD_INLINE void ExecutionContext : : iopContCurrent ( IOP_ARGS ) { <nl> cellDup ( cont - > m_value , * m_stack . allocC ( ) ) ; <nl> } <nl> <nl> - OPTBLD_INLINE void ExecutionContext : : iopContStopped ( IOP_ARGS ) { <nl> - NEXT ( ) ; <nl> - this_continuation ( m_fp ) - > setStopped ( ) ; <nl> - } <nl> - <nl> OPTBLD_INLINE void ExecutionContext : : asyncSuspendE ( IOP_ARGS , int32_t iters ) { <nl> assert ( ! m_fp - > resumed ( ) ) ; <nl> assert ( m_fp - > func ( ) - > isAsync ( ) ) ; <nl> mmm a / hphp / runtime / vm / hhbc . h <nl> ppp b / hphp / runtime / vm / hhbc . h <nl> enum class BareThisOp : uint8_t { <nl> O ( ContValid , NA , NOV , ONE ( CV ) , NF ) \ <nl> O ( ContKey , NA , NOV , ONE ( CV ) , NF ) \ <nl> O ( ContCurrent , NA , NOV , ONE ( CV ) , NF ) \ <nl> - O ( ContStopped , NA , NOV , NOV , NF ) \ <nl> O ( Await , ONE ( IVA ) , ONE ( CV ) , ONE ( CV ) , NF ) \ <nl> O ( Strlen , NA , ONE ( CV ) , ONE ( CV ) , NF ) \ <nl> O ( IncStat , TWO ( IVA , IVA ) , NOV , NOV , NF ) \ <nl> mmm a / hphp / runtime / vm / jit / code - gen - arm . cpp <nl> ppp b / hphp / runtime / vm / jit / code - gen - arm . cpp <nl> PUNT_OPCODE ( AKExists ) <nl> PUNT_OPCODE ( ContEnter ) <nl> PUNT_OPCODE ( ContPreNext ) <nl> PUNT_OPCODE ( ContStartedCheck ) <nl> - PUNT_OPCODE ( ContSetRunning ) <nl> PUNT_OPCODE ( ContValid ) <nl> PUNT_OPCODE ( ContArIncKey ) <nl> PUNT_OPCODE ( ContArUpdateIdx ) <nl> mmm a / hphp / runtime / vm / jit / code - gen - x64 . cpp <nl> ppp b / hphp / runtime / vm / jit / code - gen - x64 . cpp <nl> void CodeGenerator : : cgContEnter ( IRInstruction * inst ) { <nl> } <nl> <nl> void CodeGenerator : : cgContPreNext ( IRInstruction * inst ) { <nl> - auto contReg = srcLoc ( 0 ) . reg ( ) ; <nl> + auto contReg = srcLoc ( 0 ) . reg ( ) ; <nl> + auto checkStarted = inst - > src ( 1 ) - > boolVal ( ) ; <nl> + auto stateOff = c_Continuation : : stateOff ( ) ; <nl> <nl> - const Offset startedOffset = c_Continuation : : startedOff ( ) ; <nl> - const Offset stateOffset = c_Continuation : : stateOff ( ) ; <nl> - / / Check done and running at the same time <nl> - m_as . testb ( 0x3 , contReg [ stateOffset ] ) ; <nl> - emitFwdJcc ( CC_NZ , inst - > taken ( ) ) ; <nl> + static_assert ( c_Continuation : : Created = = 0 , " used below " ) ; <nl> + static_assert ( c_Continuation : : Started = = 1 , " used below " ) ; <nl> + <nl> + / / Take exit if state ! = 1 ( checkStarted ) or state > 1 ( ! checkStarted ) . <nl> + m_as . cmpb ( 1 , contReg [ stateOff ] ) ; <nl> + emitFwdJcc ( checkStarted ? CC_NE : CC_A , inst - > taken ( ) ) ; <nl> <nl> - static_assert ( startedOffset + 1 = = stateOffset , <nl> - " started should immediately precede state " ) ; <nl> - m_as . storew ( 0x101 , contReg [ startedOffset ] ) ; <nl> + / / Set generator state as Running . <nl> + m_as . storeb ( c_Continuation : : Running , contReg [ stateOff ] ) ; <nl> } <nl> <nl> void CodeGenerator : : cgContStartedCheck ( IRInstruction * inst ) { <nl> - auto contReg = srcLoc ( 0 ) . reg ( ) ; <nl> - auto startedOffset = c_Continuation : : startedOff ( ) ; <nl> - <nl> - m_as . testb ( 0x1 , contReg [ startedOffset ] ) ; <nl> - emitFwdJcc ( CC_Z , inst - > taken ( ) ) ; <nl> - } <nl> + auto contReg = srcLoc ( 0 ) . reg ( ) ; <nl> + auto stateOff = c_Continuation : : stateOff ( ) ; <nl> <nl> - void CodeGenerator : : cgContSetRunning ( IRInstruction * inst ) { <nl> - auto contReg = srcLoc ( 0 ) . reg ( ) ; <nl> - bool running = inst - > src ( 1 ) - > boolVal ( ) ; <nl> + static_assert ( c_Continuation : : Created = = 0 , " used below " ) ; <nl> <nl> - const Offset stateOffset = c_Continuation : : stateOff ( ) ; <nl> - if ( running ) { <nl> - m_as . storeb ( 0x1 , contReg [ stateOffset ] ) ; <nl> - } else { <nl> - m_as . andb ( 0x2 , contReg [ stateOffset ] ) ; <nl> - } <nl> + / / Take exit if state = = 0 . <nl> + m_as . testb ( int8_t ( 0xff ) , contReg [ stateOff ] ) ; <nl> + emitFwdJcc ( CC_Z , inst - > taken ( ) ) ; <nl> } <nl> <nl> void CodeGenerator : : cgContValid ( IRInstruction * inst ) { <nl> - auto contReg = srcLoc ( 0 ) . reg ( ) ; <nl> - auto destReg = dstLoc ( 0 ) . reg ( ) ; <nl> + auto contReg = srcLoc ( 0 ) . reg ( ) ; <nl> + auto dstReg = dstLoc ( 0 ) . reg ( ) ; <nl> + auto stateOff = c_Continuation : : stateOff ( ) ; <nl> <nl> - m_as . loadzbl ( contReg [ c_Continuation : : stateOff ( ) ] , r32 ( destReg ) ) ; <nl> - m_as . shrl ( 0x1 , r32 ( destReg ) ) ; <nl> - m_as . xorb ( 0x1 , rbyte ( destReg ) ) ; <nl> + / / Return 1 if generator state is not Done . <nl> + m_as . cmpb ( c_Continuation : : Done , contReg [ stateOff ] ) ; <nl> + m_as . setne ( rbyte ( dstReg ) ) ; <nl> + m_as . movzbl ( rbyte ( dstReg ) , r32 ( dstReg ) ) ; <nl> } <nl> <nl> void CodeGenerator : : cgContArIncKey ( IRInstruction * inst ) { <nl> mmm a / hphp / runtime / vm / jit / hhbc - translator . cpp <nl> ppp b / hphp / runtime / vm / jit / hhbc - translator . cpp <nl> void HhbcTranslator : : emitResumedReturnControl ( Block * catchBlock ) { <nl> } <nl> <nl> void HhbcTranslator : : emitYieldImpl ( Offset resumeOffset ) { <nl> - / / set m_value = popC ( ) ; <nl> + / / Set resume offset . <nl> + gen ( StContArRaw , RawMemData { RawMemData : : ContOffset } , m_irb - > fp ( ) , <nl> + cns ( resumeOffset ) ) ; <nl> + <nl> + / / Set yielded value . <nl> auto const oldValue = gen ( LdContArValue , Type : : Cell , m_irb - > fp ( ) ) ; <nl> gen ( StContArValue , m_irb - > fp ( ) , popC ( DataTypeGeneric ) ) ; / / teleporting value <nl> gen ( DecRef , oldValue ) ; <nl> <nl> - / / set m_offset = offset ; <nl> - gen ( StContArRaw , RawMemData { RawMemData : : ContOffset } , m_irb - > fp ( ) , <nl> - cns ( resumeOffset ) ) ; <nl> + / / Set state from Running to Started . <nl> + gen ( StContArRaw , RawMemData { RawMemData : : ContState } , m_irb - > fp ( ) , <nl> + cns ( c_Continuation : : Started ) ) ; <nl> } <nl> <nl> void HhbcTranslator : : emitYield ( Offset resumeOffset ) { <nl> void HhbcTranslator : : emitYieldK ( Offset resumeOffset ) { <nl> void HhbcTranslator : : emitContCheck ( bool checkStarted ) { <nl> assert ( curClass ( ) ) ; <nl> SSATmp * cont = gen ( LdThis , m_irb - > fp ( ) ) ; <nl> - if ( checkStarted ) { <nl> - gen ( ContStartedCheck , makeExitSlow ( ) , cont ) ; <nl> - } <nl> - gen ( ContPreNext , makeExitSlow ( ) , cont ) ; <nl> + gen ( ContPreNext , makeExitSlow ( ) , cont , cns ( checkStarted ) ) ; <nl> } <nl> <nl> void HhbcTranslator : : emitContValid ( ) { <nl> void HhbcTranslator : : emitContCurrent ( ) { <nl> pushIncRef ( value ) ; <nl> } <nl> <nl> - void HhbcTranslator : : emitContStopped ( ) { <nl> - assert ( curClass ( ) ) ; <nl> - SSATmp * cont = gen ( LdThis , m_irb - > fp ( ) ) ; <nl> - <nl> - gen ( ContSetRunning , cont , cns ( false ) ) ; <nl> - } <nl> - <nl> void HhbcTranslator : : emitAwaitE ( SSATmp * child , Block * catchBlock , <nl> Offset resumeOffset , int numIters ) { <nl> assert ( curFunc ( ) - > isAsync ( ) ) ; <nl> void HhbcTranslator : : emitRet ( Type type , bool freeInline ) { <nl> / / Pop the return value . Since it will be teleported to its place in memory , <nl> / / we don ' t care about the type . <nl> auto catchBlock = makeCatch ( ) ; <nl> - SSATmp * retVal = pop ( type , DataTypeGeneric ) ; <nl> + SSATmp * retVal = pop ( type , func - > isGenerator ( ) ? DataTypeSpecific <nl> + : DataTypeGeneric ) ; <nl> <nl> / / Free $ this . <nl> if ( func - > mayHaveThis ( ) ) { <nl> void HhbcTranslator : : emitRet ( Type type , bool freeInline ) { <nl> / / Sync SP . <nl> sp = spillStack ( ) ; <nl> } else if ( func - > isGenerator ( ) ) { <nl> - / / Mark generator as finished . <nl> - gen ( StContArRaw , RawMemData { RawMemData : : ContState } , m_irb - > fp ( ) , <nl> - cns ( c_Continuation : : Done ) ) ; <nl> + assert ( retVal - > type ( ) < = Type : : Null ) ; <nl> + <nl> + / / Clear generator ' s key and value . <nl> + auto const oldKey = gen ( LdContArKey , Type : : Cell , m_irb - > fp ( ) ) ; <nl> + gen ( StContArKey , m_irb - > fp ( ) , cns ( Type : : InitNull ) ) ; <nl> + gen ( DecRef , oldKey ) ; <nl> <nl> - / / Store the return value . <nl> auto const oldValue = gen ( LdContArValue , Type : : Cell , m_irb - > fp ( ) ) ; <nl> - gen ( StContArValue , m_irb - > fp ( ) , retVal ) ; <nl> + gen ( StContArValue , m_irb - > fp ( ) , cns ( Type : : InitNull ) ) ; <nl> gen ( DecRef , oldValue ) ; <nl> <nl> + / / Mark generator as finished . <nl> + gen ( StContArRaw , RawMemData { RawMemData : : ContState } , m_irb - > fp ( ) , <nl> + cns ( c_Continuation : : Done ) ) ; <nl> + <nl> / / Sync SP . <nl> sp = spillStack ( ) ; <nl> } else { <nl> mmm a / hphp / runtime / vm / jit / hhbc - translator . h <nl> ppp b / hphp / runtime / vm / jit / hhbc - translator . h <nl> struct HhbcTranslator { <nl> void emitContValid ( ) ; <nl> void emitContKey ( ) ; <nl> void emitContCurrent ( ) ; <nl> - void emitContStopped ( ) ; <nl> <nl> / / async functions <nl> void emitAwaitE ( SSATmp * child , Block * catchBlock , Offset resumeOffset , <nl> mmm a / hphp / runtime / vm / jit / ir . h <nl> ppp b / hphp / runtime / vm / jit / ir . h <nl> O ( Shuffle , ND , SUnk , NF ) \ <nl> O ( CreateCont , D ( Obj ) , S ( FramePtr ) C ( Int ) , E | N | PRc ) \ <nl> O ( ContEnter , ND , S ( FramePtr ) \ <nl> S ( TCA ) C ( Int ) S ( FramePtr ) , E ) \ <nl> - O ( ContPreNext , ND , S ( Obj ) , B | E ) \ <nl> + O ( ContPreNext , ND , S ( Obj ) C ( Bool ) , B | E ) \ <nl> O ( ContStartedCheck , ND , S ( Obj ) , B | E ) \ <nl> - O ( ContSetRunning , ND , S ( Obj ) C ( Bool ) , E ) \ <nl> O ( ContValid , D ( Bool ) , S ( Obj ) , E ) \ <nl> O ( ContArIncKey , ND , S ( FramePtr ) , E ) \ <nl> O ( ContArUpdateIdx , ND , S ( FramePtr ) S ( Int ) , E ) \ <nl> mmm a / hphp / runtime / vm / jit / translator - instrs . h <nl> ppp b / hphp / runtime / vm / jit / translator - instrs . h <nl> <nl> CASE ( Concat ) \ <nl> CASE ( ContCurrent ) \ <nl> CASE ( ContKey ) \ <nl> - CASE ( ContStopped ) \ <nl> CASE ( ContValid ) \ <nl> CASE ( Dup ) \ <nl> CASE ( EmptyG ) \ <nl> mmm a / hphp / runtime / vm / jit / translator . cpp <nl> ppp b / hphp / runtime / vm / jit / translator . cpp <nl> static const struct { <nl> { OpContValid , { None , Stack1 , OutBoolean , 1 } } , <nl> { OpContKey , { None , Stack1 , OutUnknown , 1 } } , <nl> { OpContCurrent , { None , Stack1 , OutUnknown , 1 } } , <nl> - { OpContStopped , { None , None , OutNone , 0 } } , <nl> <nl> / * * * 15 . Async functions instructions * * * / <nl> <nl> mmm a / hphp / runtime / vm / unwind . cpp <nl> ppp b / hphp / runtime / vm / unwind . cpp <nl> void tearDownFrame ( ActRec * & fp , Stack & stack , PC & pc ) { <nl> } else if ( fp - > func ( ) - > isAsync ( ) ) { <nl> / / Do nothing . AsyncFunctionWaitHandle will handle the exception . <nl> } else if ( fp - > func ( ) - > isGenerator ( ) ) { <nl> - / / Mark the generator as finished and clear its m_value . <nl> - auto cont = frame_continuation ( fp ) ; <nl> - cont - > setDone ( ) ; <nl> - cellSet ( make_tv < KindOfNull > ( ) , cont - > m_value ) ; <nl> + / / Mark the generator as finished . <nl> + frame_continuation ( fp ) - > finish ( ) ; <nl> } else { <nl> not_reached ( ) ; <nl> } <nl> mmm a / hphp / test / quick / generator_vars . php . expect <nl> ppp b / hphp / test / quick / generator_vars . php . expect <nl> array ( 5 ) { <nl> int ( 5 ) <nl> } <nl> int ( 4 ) <nl> - string ( 32 ) " Continuation is already finished " <nl> + string ( 29 ) " Generator is already finished " <nl> array ( 12 ) { <nl> [ " b " ] = > <nl> int ( 2 ) <nl> mmm a / hphp / test / slow / yield / 2176 . php . expect <nl> ppp b / hphp / test / slow / yield / 2176 . php . expect <nl> <nl> string ( 33 ) " Got yieldedException , re - raising . " <nl> - string ( 32 ) " Continuation is already finished " <nl> + string ( 29 ) " Generator is already finished " <nl> mmm a / hphp / test / slow / yield / 2183 . php . expect <nl> ppp b / hphp / test / slow / yield / 2183 . php . expect <nl> in __destruct ( ) : object ( Evil ) # 2 ( 0 ) { <nl> current : object ( Evil ) # 3 ( 0 ) { <nl> } <nl> iter from destructor <nl> - Caught : Continuation is already running <nl> + Caught : Generator is already running <nl> iter 5 <nl> gen 5 <nl> in __destruct ( ) : object ( Evil ) # 3 ( 0 ) { <nl> } <nl> current : NULL <nl> iter from destructor <nl> - Caught : Continuation is already finished <nl> + Caught : Generator is already running <nl> Finished ! <nl> Returned from main safely <nl> | Clean up internal generator state machine . | facebook/hhvm | 1d417e6085c3606ea2fd9440c00dbc4668c06675 | 2014-04-28T16:29:13Z |
mmm a / test / functional / test_framework / test_node . py <nl> ppp b / test / functional / test_framework / test_node . py <nl> <nl> from . authproxy import JSONRPCException <nl> from . util import ( <nl> append_config , <nl> - assert_equal , <nl> delete_cookie_file , <nl> get_rpc_proxy , <nl> rpc_url , <nl> def __init__ ( self , i , datadir , rpchost , timewait , binary , stderr , mocktime , cove <nl> <nl> self . p2ps = [ ] <nl> <nl> + def _node_msg ( self , msg : str ) - > str : <nl> + " " " Return a modified msg that identifies this node by its index as a debugging aid . " " " <nl> + return " [ node % d ] % s " % ( self . index , msg ) <nl> + <nl> + def _raise_assertion_error ( self , msg : str ) : <nl> + " " " Raise an AssertionError with msg modified to identify this node . " " " <nl> + raise AssertionError ( self . _node_msg ( msg ) ) <nl> + <nl> def __del__ ( self ) : <nl> # Ensure that we don ' t leave any bitcoind processes lying around after <nl> # the test ends <nl> def __del__ ( self ) : <nl> # Should only happen on test failure <nl> # Avoid using logger , as that may have already been shutdown when <nl> # this destructor is called . <nl> - print ( " Cleaning up leftover process " ) <nl> + print ( self . _node_msg ( " Cleaning up leftover process " ) ) <nl> self . process . kill ( ) <nl> <nl> def __getattr__ ( self , name ) : <nl> def __getattr__ ( self , name ) : <nl> if self . use_cli : <nl> return getattr ( self . cli , name ) <nl> else : <nl> - assert self . rpc_connected and self . rpc is not None , " Error : no RPC connection " <nl> + assert self . rpc_connected and self . rpc is not None , self . _node_msg ( " Error : no RPC connection " ) <nl> return getattr ( self . rpc , name ) <nl> <nl> def start ( self , extra_args = None , stderr = None , * args , * * kwargs ) : <nl> def wait_for_rpc_connection ( self ) : <nl> poll_per_s = 4 <nl> for _ in range ( poll_per_s * self . rpc_timeout ) : <nl> if self . process . poll ( ) is not None : <nl> - raise FailedToStartError ( ' bitcoind exited with status { } during initialization ' . format ( self . process . returncode ) ) <nl> + raise FailedToStartError ( self . _node_msg ( <nl> + ' bitcoind exited with status { } during initialization ' . format ( self . process . returncode ) ) ) <nl> try : <nl> self . rpc = get_rpc_proxy ( rpc_url ( self . datadir , self . index , self . rpchost ) , self . index , timeout = self . rpc_timeout , coveragedir = self . coverage_dir ) <nl> self . rpc . getblockcount ( ) <nl> def wait_for_rpc_connection ( self ) : <nl> if " No RPC credentials " not in str ( e ) : <nl> raise <nl> time . sleep ( 1 . 0 / poll_per_s ) <nl> - raise AssertionError ( " Unable to connect to bitcoind " ) <nl> + self . _raise_assertion_error ( " Unable to connect to bitcoind " ) <nl> <nl> def get_wallet_rpc ( self , wallet_name ) : <nl> if self . use_cli : <nl> return self . cli ( " - rpcwallet = { } " . format ( wallet_name ) ) <nl> else : <nl> - assert self . rpc_connected <nl> - assert self . rpc <nl> + assert self . rpc_connected and self . rpc , self . _node_msg ( " RPC not connected " ) <nl> wallet_path = " wallet / % s " % wallet_name <nl> return self . rpc / wallet_path <nl> <nl> def is_node_stopped ( self ) : <nl> return False <nl> <nl> # process has stopped . Assert that it didn ' t return an error code . <nl> - assert_equal ( return_code , 0 ) <nl> + assert return_code = = 0 , self . _node_msg ( <nl> + " Node returned non - zero exit code ( % d ) when stopping " % return_code ) <nl> self . running = False <nl> self . process = None <nl> self . rpc_connected = False <nl> def assert_start_raises_init_error ( self , extra_args = None , expected_msg = None , mat <nl> stderr = log_stderr . read ( ) . decode ( ' utf - 8 ' ) . strip ( ) <nl> if match = = ErrorMatch . PARTIAL_REGEX : <nl> if re . search ( expected_msg , stderr , flags = re . MULTILINE ) is None : <nl> - raise AssertionError ( ' Expected message " { } " does not partially match stderr : \ n " { } " ' . format ( expected_msg , stderr ) ) <nl> + self . _raise_assertion_error ( <nl> + ' Expected message " { } " does not partially match stderr : \ n " { } " ' . format ( expected_msg , stderr ) ) <nl> elif match = = ErrorMatch . FULL_REGEX : <nl> if re . fullmatch ( expected_msg , stderr ) is None : <nl> - raise AssertionError ( ' Expected message " { } " does not fully match stderr : \ n " { } " ' . format ( expected_msg , stderr ) ) <nl> + self . _raise_assertion_error ( <nl> + ' Expected message " { } " does not fully match stderr : \ n " { } " ' . format ( expected_msg , stderr ) ) <nl> elif match = = ErrorMatch . FULL_TEXT : <nl> if expected_msg ! = stderr : <nl> - raise AssertionError ( ' Expected message " { } " does not fully match stderr : \ n " { } " ' . format ( expected_msg , stderr ) ) <nl> + self . _raise_assertion_error ( <nl> + ' Expected message " { } " does not fully match stderr : \ n " { } " ' . format ( expected_msg , stderr ) ) <nl> else : <nl> if expected_msg is None : <nl> assert_msg = " bitcoind should have exited with an error " <nl> else : <nl> assert_msg = " bitcoind should have exited with expected error " + expected_msg <nl> - raise AssertionError ( assert_msg ) <nl> + self . _raise_assertion_error ( assert_msg ) <nl> <nl> def node_encrypt_wallet ( self , passphrase ) : <nl> " " " " Encrypts the wallet . <nl> def p2p ( self ) : <nl> <nl> Convenience property - most tests only use a single p2p connection to each <nl> node , so this saves having to write node . p2ps [ 0 ] many times . " " " <nl> - assert self . p2ps , " No p2p connection " <nl> + assert self . p2ps , self . _node_msg ( " No p2p connection " ) <nl> return self . p2ps [ 0 ] <nl> <nl> def disconnect_p2ps ( self ) : <nl> | Merge : [ qa ] Attach node index to test_node AssertionError and print messages | bitcoin/bitcoin | 896a9d026ccf75cb4a5c7809a92aa19b6f13fb87 | 2018-04-24T09:08:39Z |
mmm a / tensorflow / c / eager / c_api . cc <nl> ppp b / tensorflow / c / eager / c_api . cc <nl> void SetOpAttrValueScalar ( TFE_Context * ctx , TFE_Op * op , <nl> TFE_OpSetAttrFunction ( op , attr_name , func_op ) ; <nl> TFE_DeleteOp ( func_op ) ; <nl> } break ; <nl> - case tensorflow : : AttrValue : : kList : <nl> - TF_FALLTHROUGH_INTENDED ; <nl> + case tensorflow : : AttrValue : : kList : { <nl> + / / String <nl> + if ( const int s_size = default_value . list ( ) . s_size ( ) ) { <nl> + absl : : InlinedVector < const void * , 4 > values_vector ; <nl> + absl : : InlinedVector < size_t , 4 > lengths_vector ; <nl> + for ( int i = 0 ; i < s_size ; + + i ) { <nl> + const string & v = default_value . list ( ) . s ( i ) ; <nl> + values_vector . push_back ( v . data ( ) ) ; <nl> + lengths_vector . push_back ( v . size ( ) ) ; <nl> + } <nl> + TFE_OpSetAttrStringList ( op , attr_name , values_vector . data ( ) , <nl> + lengths_vector . data ( ) , s_size ) ; <nl> + } <nl> + <nl> + / / Int <nl> + if ( const int i_size = default_value . list ( ) . i_size ( ) ) { <nl> + absl : : InlinedVector < int64_t , 4 > i_vector ; <nl> + for ( int i = 0 ; i < i_size ; + + i ) { <nl> + i_vector . push_back ( default_value . list ( ) . i ( i ) ) ; <nl> + } <nl> + TFE_OpSetAttrIntList ( op , attr_name , i_vector . data ( ) , i_size ) ; <nl> + } <nl> + / / Float <nl> + if ( const int f_size = default_value . list ( ) . f_size ( ) ) { <nl> + absl : : InlinedVector < float , 4 > f_vector ; <nl> + for ( int i = 0 ; i < f_size ; + + i ) { <nl> + f_vector . push_back ( default_value . list ( ) . f ( i ) ) ; <nl> + } <nl> + TFE_OpSetAttrFloatList ( op , attr_name , f_vector . data ( ) , f_size ) ; <nl> + } <nl> + / / Bool <nl> + if ( const int b_size = default_value . list ( ) . b_size ( ) ) { <nl> + absl : : InlinedVector < unsigned char , 4 > b_vector ; <nl> + for ( int i = 0 ; i < b_size ; i + + ) { <nl> + b_vector . push_back ( default_value . list ( ) . b ( i ) ) ; <nl> + } <nl> + TFE_OpSetAttrBoolList ( op , attr_name , b_vector . data ( ) , b_size ) ; <nl> + } <nl> + / / Type <nl> + if ( const int type_size = default_value . list ( ) . type_size ( ) ) { <nl> + absl : : InlinedVector < unsigned int , 4 > type_vector ; <nl> + for ( int i = 0 ; i < type_size ; + + i ) { <nl> + type_vector . push_back ( default_value . list ( ) . type ( i ) ) ; <nl> + } <nl> + TFE_OpSetAttrTypeList ( <nl> + op , attr_name , <nl> + reinterpret_cast < const TF_DataType * > ( type_vector . data ( ) ) , <nl> + type_size ) ; <nl> + } <nl> + <nl> + / / Rest are not supported . <nl> + if ( default_value . list ( ) . shape_size ( ) > 0 | | <nl> + default_value . list ( ) . func_size ( ) > 0 | | <nl> + default_value . list ( ) . tensor_size ( ) > 0 ) { <nl> + TF_SetStatus ( <nl> + status , TF_UNIMPLEMENTED , <nl> + tensorflow : : strings : : StrCat ( " Unable to get setfor default value : " , <nl> + default_value . DebugString ( ) ) <nl> + . data ( ) ) ; <nl> + } <nl> + } break ; <nl> case tensorflow : : AttrValue : : kTensor : <nl> TF_FALLTHROUGH_INTENDED ; <nl> case tensorflow : : AttrValue : : kPlaceholder : <nl> mmm a / tensorflow / c / eager / c_api_test . cc <nl> ppp b / tensorflow / c / eager / c_api_test . cc <nl> limitations under the License . <nl> # include < string > <nl> <nl> / / clang - format off <nl> + # include " tensorflow / core / framework / attr_value . pb . h " <nl> # include " tensorflow / core / platform / platform . h " <nl> / / clang - format on <nl> <nl> TEST ( CAPI , StringAttributes ) { <nl> TF_DeleteStatus ( status ) ; <nl> } <nl> <nl> + / / Same test as above , expect use SetOpAttrValueScalar to set attrs . <nl> + TEST ( CAPI , TestTFE_SetOpAttrs ) { <nl> + / / Test that TFE_OpSetAttrString doesn ' t hold on to the value after it <nl> + / / returns . <nl> + TF_Status * status = TF_NewStatus ( ) ; <nl> + TFE_ContextOptions * opts = TFE_NewContextOptions ( ) ; <nl> + TFE_Context * ctx = TFE_NewContext ( opts , status ) ; <nl> + ASSERT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + TFE_DeleteContextOptions ( opts ) ; <nl> + <nl> + std : : vector < int64_t > dims ( 4 , 1 ) ; <nl> + TFE_Op * op = TFE_NewOp ( ctx , " AvgPool " , status ) ; <nl> + ASSERT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + <nl> + TF_Tensor * tensor = <nl> + TF_AllocateTensor ( TF_FLOAT , dims . data ( ) , dims . size ( ) , sizeof ( float ) ) ; <nl> + float tensor_data [ ] = { 1 } ; <nl> + memcpy ( TF_TensorData ( tensor ) , tensor_data , TF_TensorByteSize ( tensor ) ) ; <nl> + TFE_TensorHandle * tensor_handle = TFE_NewTensorHandle ( tensor , status ) ; <nl> + ASSERT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + TFE_OpAddInput ( op , tensor_handle , status ) ; <nl> + TF_DeleteTensor ( tensor ) ; <nl> + TFE_DeleteTensorHandle ( tensor_handle ) ; <nl> + <nl> + tensorflow : : AttrValue i_list_values ; <nl> + for ( int i = 0 ; i < 4 ; + + i ) { <nl> + i_list_values . mutable_list ( ) - > add_i ( 1 ) ; <nl> + } <nl> + SetOpAttrValueScalar ( ctx , op , i_list_values , " ksize " , status ) ; <nl> + SetOpAttrValueScalar ( ctx , op , i_list_values , " strides " , status ) ; <nl> + <nl> + tensorflow : : AttrValue padding_value ; <nl> + * padding_value . mutable_s ( ) = " VALID " ; <nl> + tensorflow : : SetOpAttrValueScalar ( ctx , op , padding_value , " padding " , status ) ; <nl> + <nl> + tensorflow : : AttrValue data_format_value ; <nl> + * data_format_value . mutable_s ( ) = " NHWC " ; <nl> + tensorflow : : SetOpAttrValueScalar ( ctx , op , data_format_value , " data_format " , <nl> + status ) ; <nl> + <nl> + TFE_OpSetAttrType ( op , " T " , TF_FLOAT ) ; <nl> + <nl> + ASSERT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + <nl> + TFE_TensorHandle * retvals [ 1 ] ; <nl> + int num_retvals = 1 ; <nl> + TFE_Execute ( op , & retvals [ 0 ] , & num_retvals , status ) ; <nl> + ASSERT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + ASSERT_EQ ( 1 , num_retvals ) ; <nl> + <nl> + tensor = TFE_TensorHandleResolve ( retvals [ 0 ] , status ) ; <nl> + ASSERT_EQ ( TF_OK , TF_GetCode ( status ) ) < < TF_Message ( status ) ; <nl> + EXPECT_EQ ( 4 , TF_TensorByteSize ( tensor ) ) ; <nl> + TF_DeleteTensor ( tensor ) ; <nl> + TFE_DeleteTensorHandle ( retvals [ 0 ] ) ; <nl> + <nl> + TFE_DeleteOp ( op ) ; <nl> + <nl> + TFE_DeleteContext ( ctx ) ; <nl> + TF_DeleteStatus ( status ) ; <nl> + } <nl> + <nl> TEST ( CAPI , TestTFE_TensorHandleCopySharingUnderlyingTensorHandle ) { <nl> std : : unique_ptr < TF_Status , decltype ( & TF_DeleteStatus ) > status ( <nl> TF_NewStatus ( ) , TF_DeleteStatus ) ; <nl> mmm a / tensorflow / c / eager / gradients . cc <nl> ppp b / tensorflow / c / eager / gradients . cc <nl> namespace internal { <nl> Status Reset ( AbstractOperation * op_ , const char * op , <nl> const char * raw_device_name , ForwardOperation * forward_op_ ) { <nl> forward_op_ - > op_name = op ; <nl> + forward_op_ - > attrs . Reset ( op ) ; <nl> return op_ - > Reset ( op , raw_device_name ) ; <nl> } <nl> Status AddInput ( AbstractOperation * op_ , AbstractTensorHandle * input , <nl> Status Execute ( AbstractOperation * op_ , AbstractContext * ctx , <nl> / / TODO ( srbs ) : Manage refcount of ForwardOperation ' s inputs / outputs . <nl> forward_op_ - > outputs . push_back ( retvals [ i ] ) ; <nl> } <nl> + / / TODO ( b / 166669239 ) : This is needed to support AttrBuilder : : Get for string <nl> + / / attributes . Number type attrs and DataType attrs work fine without this . <nl> + / / Consider getting rid of this and making the behavior between number types <nl> + / / and string consistent . <nl> + forward_op_ - > attrs . BuildNodeDef ( ) ; <nl> std : : vector < TapeTensor > tape_tensors ; <nl> for ( auto t : retvals ) { <nl> tape_tensors . push_back ( TapeTensor ( t , ctx ) ) ; <nl> mmm a / tensorflow / c / eager / gradients_test . cc <nl> ppp b / tensorflow / c / eager / gradients_test . cc <nl> TEST_P ( CppGradients , TestIdentityNGrad ) { <nl> result_tensor = nullptr ; <nl> } <nl> <nl> + TEST_P ( CppGradients , TestSetAttrString ) { <nl> + std : : unique_ptr < TF_Status , decltype ( & TF_DeleteStatus ) > status ( <nl> + TF_NewStatus ( ) , TF_DeleteStatus ) ; <nl> + AbstractContextPtr ctx ; <nl> + { <nl> + AbstractContext * ctx_raw = nullptr ; <nl> + Status s = <nl> + BuildImmediateExecutionContext ( std : : get < 1 > ( GetParam ( ) ) , & ctx_raw ) ; <nl> + ASSERT_EQ ( errors : : OK , s . code ( ) ) < < s . error_message ( ) ; <nl> + ctx . reset ( ctx_raw ) ; <nl> + } <nl> + <nl> + AbstractTensorHandlePtr t ; <nl> + { <nl> + AbstractTensorHandle * x_raw = nullptr ; <nl> + Status s = TestScalarTensorHandle ( ctx . get ( ) , 1 . 0f , & x_raw ) ; <nl> + ASSERT_EQ ( errors : : OK , s . code ( ) ) < < s . error_message ( ) ; <nl> + t . reset ( x_raw ) ; <nl> + } <nl> + <nl> + AbstractOperationPtr check_numerics_op ( ctx - > CreateOperation ( ) ) ; <nl> + ForwardOperation forward_op ; <nl> + forward_op . ctx = ctx . get ( ) ; <nl> + Status s = Reset ( check_numerics_op . get ( ) , " CheckNumerics " , <nl> + / * raw_device_name = * / nullptr , & forward_op ) ; <nl> + ASSERT_EQ ( errors : : OK , s . code ( ) ) < < s . error_message ( ) ; <nl> + if ( isa < TracingOperation > ( check_numerics_op . get ( ) ) ) { <nl> + s = dyn_cast < TracingOperation > ( check_numerics_op . get ( ) ) <nl> + - > SetOpName ( " check_numerics " ) ; <nl> + ASSERT_EQ ( errors : : OK , s . code ( ) ) < < s . error_message ( ) ; <nl> + } <nl> + s = AddInput ( check_numerics_op . get ( ) , t . get ( ) , & forward_op ) ; <nl> + ASSERT_EQ ( errors : : OK , s . code ( ) ) < < s . error_message ( ) ; <nl> + string message = " This is the way ! " ; <nl> + s = SetAttrString ( check_numerics_op . get ( ) , " message " , message . data ( ) , <nl> + message . length ( ) , & forward_op ) ; <nl> + ASSERT_EQ ( errors : : OK , s . code ( ) ) < < s . error_message ( ) ; <nl> + int num_retvals = 1 ; <nl> + std : : vector < AbstractTensorHandle * > outputs ( 1 ) ; <nl> + GradientRegistry registry ; <nl> + std : : unique_ptr < Tape > tape ( new Tape ( / * persistent = * / false ) ) ; <nl> + s = Execute ( check_numerics_op . get ( ) , ctx . get ( ) , absl : : MakeSpan ( outputs ) , <nl> + & num_retvals , & forward_op , tape . get ( ) , registry ) ; <nl> + ASSERT_EQ ( errors : : OK , s . code ( ) ) < < s . error_message ( ) ; <nl> + <nl> + string read_message ; <nl> + s = forward_op . attrs . Get ( " message " , & read_message ) ; <nl> + ASSERT_EQ ( errors : : OK , s . code ( ) ) < < s . error_message ( ) ; <nl> + ASSERT_EQ ( read_message , message ) ; <nl> + } <nl> + <nl> / / TODO ( b / 164171226 ) : Enable this test with tfrt after AddInputList is <nl> / / supported . It is needed for IdentityN . <nl> # ifdef PLATFORM_GOOGLE <nl> mmm a / tensorflow / c / eager / mnist_gradients_testutil . cc <nl> ppp b / tensorflow / c / eager / mnist_gradients_testutil . cc <nl> Status MNISTForwardModel ( AbstractContext * ctx , <nl> * hidden_layer = tf . nn . relu ( mm_out_1 ) <nl> * scores = tf . matmul ( hidden_layer , W2 ) <nl> * softmax = <nl> - * tf . nn . sparse_softmax_cross_entropy_with_logits ( scores , y_labels ) return <nl> - * scores , softmax <nl> + * tf . nn . sparse_softmax_cross_entropy_with_logits ( scores , y_labels ) <nl> + * return scores , softmax <nl> * <nl> * Use this convention for inputs : <nl> * <nl> mmm a / tensorflow / c / eager / parallel_device / BUILD <nl> ppp b / tensorflow / c / eager / parallel_device / BUILD <nl> cc_library ( <nl> hdrs = [ " parallel_device_testlib . h " ] , <nl> deps = [ <nl> " : parallel_device " , <nl> - " : parallel_device_ops " , <nl> " / / tensorflow / c : c_api " , <nl> " / / tensorflow / c : c_api_experimental " , <nl> " / / tensorflow / c / eager : c_api " , <nl> tf_cc_test ( <nl> srcs = [ " parallel_device_test . cc " ] , <nl> deps = [ <nl> " : parallel_device " , <nl> - " : parallel_device_ops " , <nl> " : parallel_device_testlib " , <nl> " / / tensorflow / c : c_api " , <nl> " / / tensorflow / c : c_api_experimental " , <nl> tf_cc_test ( <nl> args = [ " - - heap_check = local " ] , <nl> deps = [ <nl> " : parallel_device " , <nl> - " : parallel_device_ops " , <nl> " : parallel_device_testlib " , <nl> " / / tensorflow / c : c_api " , <nl> " / / tensorflow / c : c_api_experimental " , <nl> tf_cc_test ( <nl> " / / tensorflow / core / distributed_runtime / rpc : grpc_server_lib " , <nl> ] , <nl> ) <nl> - <nl> - # Note : ParallelDevice - specific ops are experimental and not currently linked in <nl> - # to TensorFlow by default , just used in a few tests . <nl> - filegroup ( <nl> - name = " parallel_device_ops_srcs " , <nl> - srcs = [ " parallel_device_ops . cc " ] , <nl> - visibility = [ " / / tensorflow / python / distribute / parallel_device : __pkg__ " ] , <nl> - ) <nl> - <nl> - cc_library ( <nl> - name = " parallel_device_ops " , <nl> - srcs = [ " : parallel_device_ops_srcs " ] , <nl> - visibility = [ " / / tensorflow : internal " ] , <nl> - deps = [ " / / tensorflow / core : framework " ] , <nl> - alwayslink = 1 , <nl> - ) <nl> mmm a / tensorflow / c / eager / parallel_device / parallel_device . cc <nl> ppp b / tensorflow / c / eager / parallel_device / parallel_device . cc <nl> absl : : optional < std : : vector < MaybeParallelTensorOwned > > ExecuteWithSpecialOps ( <nl> } <nl> result . emplace ( std : : move ( outputs ) ) ; <nl> return result ; <nl> - } else if ( operation_name = = std : : string ( " DeviceID " ) ) { <nl> - std : : vector < MaybeParallelTensorOwned > result_content ; <nl> - result_content . reserve ( 1 ) ; <nl> - result_content . push_back ( parallel_device . DeviceIDs ( context , status ) ) ; <nl> - if ( TF_GetCode ( status ) ! = TF_OK ) return result ; <nl> - result . emplace ( std : : move ( result_content ) ) ; <nl> - return result ; <nl> } <nl> std : : vector < ParallelTensor * > parallel_inputs ; <nl> std : : vector < std : : unique_ptr < ParallelTensor > > implicitly_broadcast_tensors ; <nl> mmm a / tensorflow / c / eager / parallel_device / parallel_device_testlib . cc <nl> ppp b / tensorflow / c / eager / parallel_device / parallel_device_testlib . cc <nl> void BasicTestsForTwoDevices ( TFE_Context * context , const char * first_device , <nl> TFE_TensorHandleBackingDeviceName ( components [ 1 ] . get ( ) , status . get ( ) ) ; <nl> ASSERT_EQ ( underlying_devices [ 1 ] , second_device ) ; <nl> } <nl> - / / Compute the device ID twice and verify the result <nl> - for ( int i = 0 ; i < 2 ; + + i ) { <nl> - std : : unique_ptr < TFE_Op , decltype ( & TFE_DeleteOp ) > op ( <nl> - TFE_NewOp ( context , " DeviceID " , status . get ( ) ) , TFE_DeleteOp ) ; <nl> - ASSERT_TRUE ( TF_GetCode ( status . get ( ) ) = = TF_OK ) < < TF_Message ( status . get ( ) ) ; <nl> - TFE_OpSetDevice ( op . get ( ) , device_name , status . get ( ) ) ; <nl> - ASSERT_TRUE ( TF_GetCode ( status . get ( ) ) = = TF_OK ) < < TF_Message ( status . get ( ) ) ; <nl> - <nl> - TFE_TensorHandle * result_handle ; <nl> - int num_retvals = 1 ; <nl> - TFE_Execute ( op . get ( ) , & result_handle , & num_retvals , status . get ( ) ) ; <nl> - ASSERT_TRUE ( TF_GetCode ( status . get ( ) ) = = TF_OK ) < < TF_Message ( status . get ( ) ) ; <nl> - std : : array < TensorHandlePtr , 2 > components ; <nl> - ExtractPerDeviceValues ( context , result_handle , & components , status . get ( ) ) ; <nl> - TFE_DeleteTensorHandle ( result_handle ) ; <nl> - ASSERT_TRUE ( TF_GetCode ( status . get ( ) ) = = TF_OK ) < < TF_Message ( status . get ( ) ) ; <nl> - <nl> - ExpectScalarEq < int32_t > ( components [ 0 ] . get ( ) , 0 ) ; <nl> - ExpectScalarEq < int32_t > ( components [ 1 ] . get ( ) , 1 ) ; <nl> - std : : string first_device = <nl> - TFE_TensorHandleBackingDeviceName ( components [ 0 ] . get ( ) , status . get ( ) ) ; <nl> - ASSERT_EQ ( underlying_devices [ 0 ] , first_device ) ; <nl> - std : : string second_device = <nl> - TFE_TensorHandleBackingDeviceName ( components [ 1 ] . get ( ) , status . get ( ) ) ; <nl> - ASSERT_EQ ( underlying_devices [ 1 ] , second_device ) ; <nl> - } <nl> } <nl> mmm a / tensorflow / compiler / jit / BUILD <nl> ppp b / tensorflow / compiler / jit / BUILD <nl> tf_cc_test ( <nl> " : xla_cpu_jit " , <nl> " / / tensorflow / cc : cc_ops " , <nl> " / / tensorflow / cc : function_ops " , <nl> + " / / tensorflow / cc : functional_ops " , <nl> " / / tensorflow / cc : ops " , <nl> " / / tensorflow / cc : scope " , <nl> " / / tensorflow / compiler / tf2xla : test_util " , <nl> mmm a / tensorflow / compiler / jit / compilability_check_util . cc <nl> ppp b / tensorflow / compiler / jit / compilability_check_util . cc <nl> RecursiveCompilabilityChecker : : OperationFilter CreateOperationFilter ( <nl> } <nl> } <nl> <nl> + / / Returns ` true ` iff node has a given ` attr ` set to ` true ` . Returns ` false ` <nl> + / / both for the missing attr , and the attr set to ` false ` . <nl> + static bool HasBoolAttr ( const NodeDef & node , const char * attr ) { <nl> + const auto & it = node . attr ( ) . find ( attr ) ; <nl> + return it ! = node . attr ( ) . end ( ) & & it - > second . b ( ) ; <nl> + } <nl> + <nl> bool CanCreateXlaKernel ( const NodeDef & node_def ) { <nl> - / / If kXlaMustCompileAttr is set on the node_def , use its value . <nl> - const auto & it = node_def . attr ( ) . find ( kXlaMustCompileAttr ) ; <nl> - return it ! = node_def . attr ( ) . end ( ) & & it - > second . b ( ) ; <nl> + return HasBoolAttr ( node_def , kXlaMustCompileAttr ) ; <nl> } <nl> <nl> Status GetBodyAndConstantsAndResources ( FunctionLibraryRuntime * flr , <nl> Status GetBodyAndConstantsAndResources ( FunctionLibraryRuntime * flr , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + static auto const ops_triggering_xla_compilation = <nl> + new absl : : flat_hash_set < std : : string > { " XlaBroadcastHelper " , <nl> + " XlaConv " , <nl> + " XlaDequantize " , <nl> + " XlaDot " , <nl> + " XlaDynamicSlice " , <nl> + " XlaDynamicUpdateSlice " , <nl> + " XlaEinsum " , <nl> + " XlaGather " , <nl> + " XlaIf " , <nl> + " XlaKeyValueSort " , <nl> + " XlaPad " , <nl> + " XlaRecv " , <nl> + " XlaReduce " , <nl> + " XlaReduceWindow " , <nl> + " XlaReplicaId " , <nl> + " XlaScatter " , <nl> + " XlaSelectAndScatter " , <nl> + " XlaSelfAdjointEig " , <nl> + " XlaSend " , <nl> + " XlaSharding " , <nl> + " XlaSort " , <nl> + " XlaSpmdFullToShardShape " , <nl> + " XlaSpmdShardToFullShape " , <nl> + " XlaSvd " , <nl> + " XlaWhile " } ; <nl> + <nl> + static bool NodeCanTriggerXlaCompilation ( const NodeDef & node ) { <nl> + return node . attr ( ) . find ( kXlaClusterIdAttr ) ! = node . attr ( ) . end ( ) | | <nl> + HasBoolAttr ( node , kXlaMustCompileAttr ) | | <nl> + HasBoolAttr ( node , kXlaCompileAttr ) | | <nl> + HasBoolAttr ( node , kXlaScopeAttr ) | | <nl> + HasBoolAttr ( node , kXlaInternalScopeAttr ) | | <nl> + ops_triggering_xla_compilation - > count ( node . op ( ) ) ; <nl> + } <nl> + <nl> + bool CanTriggerXlaCompilation ( const GraphDef & graph ) { <nl> + for ( const FunctionDef & function : graph . library ( ) . function ( ) ) { <nl> + for ( const NodeDef & node : function . node_def ( ) ) { <nl> + if ( NodeCanTriggerXlaCompilation ( node ) ) { <nl> + return true ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + for ( const NodeDef & node : graph . node ( ) ) { <nl> + if ( NodeCanTriggerXlaCompilation ( node ) ) { <nl> + return true ; <nl> + } <nl> + } <nl> + <nl> + return false ; <nl> + } <nl> + <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / compiler / jit / compilability_check_util . h <nl> ppp b / tensorflow / compiler / jit / compilability_check_util . h <nl> Status GetBodyAndConstantsAndResources ( FunctionLibraryRuntime * flr , <nl> / / set . <nl> bool CanCreateXlaKernel ( const NodeDef & node_def ) ; <nl> <nl> + / / Check whether graph can trigger XLA compilation . <nl> + bool CanTriggerXlaCompilation ( const GraphDef & graph ) ; <nl> + <nl> } / / namespace tensorflow <nl> <nl> # endif / / TENSORFLOW_COMPILER_JIT_COMPILABILITY_CHECK_UTIL_H_ <nl> mmm a / tensorflow / compiler / jit / compilability_check_util_test . cc <nl> ppp b / tensorflow / compiler / jit / compilability_check_util_test . cc <nl> limitations under the License . <nl> # include " absl / memory / memory . h " <nl> # include " tensorflow / cc / framework / scope . h " <nl> # include " tensorflow / cc / ops / function_ops . h " <nl> + # include " tensorflow / cc / ops / functional_ops . h " <nl> # include " tensorflow / cc / ops / standard_ops . h " <nl> # include " tensorflow / compiler / tf2xla / xla_op_kernel . h " <nl> # include " tensorflow / compiler / tf2xla / xla_op_registry . h " <nl> TEST_F ( CompilabilityCheckUtilTest , CheckFunctionalIfNode ) { <nl> " unsupported op " ) ) ; <nl> } <nl> <nl> + TEST_F ( CompilabilityCheckUtilTest , TestCanNotTriggerXlaCompilation ) { <nl> + GraphDefBuilder b ( GraphDefBuilder : : kFailImmediately ) ; <nl> + Scope root = Scope : : NewRootScope ( ) . ExitOnError ( ) ; <nl> + FunctionDefLibrary library ; <nl> + <nl> + FunctionDef identity_func = FunctionDefHelper : : Create ( <nl> + " IdentityFunc " , <nl> + / * in_def = * / { " x : float " } , <nl> + / * out_def = * / { " res : float " } , <nl> + / * attr_def = * / { } , <nl> + / * node_def = * / { { { " t0 " } , " Identity " , { " x " } , { { " T " , DT_FLOAT } } } } , <nl> + / * ret_def * / { { " res " , " t0 : output " } } ) ; <nl> + <nl> + * library . add_function ( ) = identity_func ; <nl> + <nl> + Output in = ops : : Placeholder ( root , DT_FLOAT ) ; <nl> + NameAttrList b_name_attr ; <nl> + b_name_attr . set_name ( " IdentityFunc " ) ; <nl> + ops : : PartitionedCall call ( root . WithOpName ( " call " ) , { in } , { DT_FLOAT } , <nl> + b_name_attr ) ; <nl> + <nl> + GraphDef graph_def ; <nl> + TF_ASSERT_OK ( root . graph ( ) - > AddFunctionLibrary ( library ) ) ; <nl> + TF_ASSERT_OK ( root . ToGraphDef ( & graph_def ) ) ; <nl> + <nl> + EXPECT_FALSE ( CanTriggerXlaCompilation ( graph_def ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CompilabilityCheckUtilTest , TestXlaOpsCanTriggerXlaCompilation ) { <nl> + GraphDefBuilder b ( GraphDefBuilder : : kFailImmediately ) ; <nl> + Scope root = Scope : : NewRootScope ( ) . ExitOnError ( ) ; <nl> + FunctionDefLibrary library ; <nl> + <nl> + FunctionDef sort_func = FunctionDefHelper : : Create ( <nl> + " SortFunc " , <nl> + / * in_def = * / { " x : float " } , <nl> + / * out_def = * / { " res : float " } , <nl> + / * attr_def = * / { } , <nl> + / * node_def = * / { { { " t0 " } , " XlaSort " , { " x " } , { { " T " , DT_FLOAT } } } } , <nl> + / * ret_def * / { { " res " , " t0 : output " } } ) ; <nl> + <nl> + * library . add_function ( ) = sort_func ; <nl> + <nl> + Output in = ops : : Placeholder ( root , DT_FLOAT ) ; <nl> + NameAttrList b_name_attr ; <nl> + b_name_attr . set_name ( " SortFunc " ) ; <nl> + ops : : PartitionedCall call ( root . WithOpName ( " call " ) , { in } , { DT_FLOAT } , <nl> + b_name_attr ) ; <nl> + <nl> + GraphDef graph_def ; <nl> + TF_ASSERT_OK ( root . graph ( ) - > AddFunctionLibrary ( library ) ) ; <nl> + TF_ASSERT_OK ( root . ToGraphDef ( & graph_def ) ) ; <nl> + <nl> + EXPECT_TRUE ( CanTriggerXlaCompilation ( graph_def ) ) ; <nl> + } <nl> + <nl> + TEST_F ( CompilabilityCheckUtilTest , TestCanTriggerXlaCompilation ) { <nl> + GraphDefBuilder b ( GraphDefBuilder : : kFailImmediately ) ; <nl> + Scope root = Scope : : NewRootScope ( ) . ExitOnError ( ) ; <nl> + FunctionDefLibrary library ; <nl> + <nl> + AttrValue true_attribute ; <nl> + true_attribute . set_b ( true ) ; <nl> + <nl> + FunctionDef identity_func = FunctionDefHelper : : Create ( <nl> + " IdentityFunc " , <nl> + / * in_def = * / { " x : float " } , <nl> + / * out_def = * / { " res : float " } , <nl> + / * attr_def = * / { } , <nl> + / * node_def = * / { { { " t0 " } , " Identity " , { " x " } , { { " T " , DT_FLOAT } } } } , <nl> + / * ret_def * / { { " res " , " t0 : output " } } ) ; <nl> + <nl> + ( * identity_func . mutable_attr ( ) ) [ kXlaMustCompileAttr ] = true_attribute ; <nl> + <nl> + FunctionDef call_identity = FunctionDefHelper : : Create ( <nl> + " CallIdentity " , <nl> + / * in_def = * / { " x : float " } , <nl> + / * out_def = * / { " z : float " } , / * attr_def = * / { } , <nl> + / * node_def = * / <nl> + { { { " func_call " } , <nl> + " PartitionedCall " , <nl> + { " x " } , <nl> + { { " Tin " , DataTypeSlice ( { DT_FLOAT } ) } , <nl> + { " Tout " , DataTypeSlice ( { DT_FLOAT } ) } , <nl> + { " f " , <nl> + FunctionDefHelper : : FunctionRef ( " IdentityRef " , { { " T " , DT_FLOAT } } ) } , <nl> + { kXlaMustCompileAttr , true } } } } , <nl> + / * ret_def = * / { { " z " , " func_call : output : 0 " } } ) ; <nl> + <nl> + * library . add_function ( ) = identity_func ; <nl> + * library . add_function ( ) = call_identity ; <nl> + <nl> + Output in = ops : : Placeholder ( root , DT_FLOAT ) ; <nl> + NameAttrList b_name_attr ; <nl> + b_name_attr . set_name ( " CallIdentity " ) ; <nl> + ops : : PartitionedCall call ( root . WithOpName ( " call " ) , { in } , { DT_FLOAT } , <nl> + b_name_attr ) ; <nl> + <nl> + GraphDef graph_def ; <nl> + TF_ASSERT_OK ( root . graph ( ) - > AddFunctionLibrary ( library ) ) ; <nl> + TF_ASSERT_OK ( root . ToGraphDef ( & graph_def ) ) ; <nl> + <nl> + EXPECT_TRUE ( CanTriggerXlaCompilation ( graph_def ) ) ; <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / compiler / jit / defs . cc <nl> ppp b / tensorflow / compiler / jit / defs . cc <nl> const char * const kXlaScopeAttr = " _XlaScope " ; <nl> / / only when auto_jit is ON . <nl> const char * const kXlaInternalScopeAttr = " _XlaInternalScope " ; <nl> <nl> + const char * const kXlaClusterIdAttr = " _xla_compile_id " ; <nl> + <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / compiler / jit / defs . h <nl> ppp b / tensorflow / compiler / jit / defs . h <nl> extern const char * const kXlaCompileAttr ; / / " _XlaCompile " <nl> extern const char * const kXlaScopeAttr ; / / " _XlaScope " <nl> extern const char * const kXlaInternalScopeAttr ; / / " _XlaInternalScope " <nl> <nl> + / / The id of the compiled cluster . <nl> + extern const char * const kXlaClusterIdAttr ; / / " _xla_compile_id " <nl> + <nl> } / / namespace tensorflow <nl> <nl> # endif / / TENSORFLOW_COMPILER_JIT_DEFS_H_ <nl> mmm a / tensorflow / compiler / jit / encapsulate_xla_computations_pass . cc <nl> ppp b / tensorflow / compiler / jit / encapsulate_xla_computations_pass . cc <nl> limitations under the License . <nl> # include " absl / memory / memory . h " <nl> # include " absl / strings / ascii . h " <nl> # include " absl / strings / str_cat . h " <nl> + # include " tensorflow / compiler / jit / defs . h " <nl> # include " tensorflow / compiler / jit / encapsulate_subgraphs_pass . h " <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> # include " tensorflow / core / framework / node_def . pb . h " <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> - const char * const EncapsulateXlaComputationsPass : : kXlaClusterAttr = <nl> - " _xla_compile_id " ; <nl> - <nl> namespace { <nl> <nl> const char * const kXlaClusterOutput = " XlaClusterOutput " ; <nl> bool IsCpuGpuCompile ( const Graph * graph ) { <nl> for ( Node * n : graph - > nodes ( ) ) { <nl> string name ; <nl> / / Only consider nodes being compiled . <nl> - if ( ! GetNodeAttr ( n - > attrs ( ) , <nl> - EncapsulateXlaComputationsPass : : kXlaClusterAttr , & name ) <nl> - . ok ( ) ) <nl> - continue ; <nl> + if ( ! GetNodeAttr ( n - > attrs ( ) , kXlaClusterIdAttr , & name ) . ok ( ) ) continue ; <nl> / / Early return for any node with a device that is not a CPU or GPU . <nl> DeviceNameUtils : : ParsedName parsed ; <nl> if ( DeviceNameUtils : : ParseFullName ( n - > requested_device ( ) , & parsed ) ) { <nl> Status RewriteSubgraph ( const std : : vector < OutputTensor > & arg_source_tensors , <nl> retvals [ i ] - > AddAttr ( " index " , i ) ; <nl> } <nl> <nl> - AddNodeAttr ( EncapsulateXlaComputationsPass : : kXlaClusterAttr , call_def - > name ( ) , <nl> - call_def ) ; <nl> + AddNodeAttr ( kXlaClusterIdAttr , call_def - > name ( ) , call_def ) ; <nl> AddNodeAttr ( " _variable_start_index " , variable_start_index , call_def ) ; <nl> <nl> / / Uniquify the function name . <nl> Status RewriteSubgraph ( const std : : vector < OutputTensor > & arg_source_tensors , <nl> / / O ( n ) pass over the edges . <nl> for ( const Edge * e : ( * graph ) - > edges ( ) ) { <nl> if ( ! e - > IsControlEdge ( ) & & <nl> - e - > src ( ) - > attrs ( ) . Find ( kXlaClusterAttr ) ! = nullptr & & <nl> - e - > dst ( ) - > attrs ( ) . Find ( kXlaClusterAttr ) = = nullptr & & <nl> + e - > src ( ) - > attrs ( ) . Find ( kXlaClusterIdAttr ) ! = nullptr & & <nl> + e - > dst ( ) - > attrs ( ) . Find ( kXlaClusterIdAttr ) = = nullptr & & <nl> e - > dst ( ) - > type_string ( ) ! = kXlaClusterOutput ) { <nl> return errors : : InvalidArgument ( <nl> " Undeclared output of XLA computation . Some common causes of this " <nl> Status RewriteSubgraph ( const std : : vector < OutputTensor > & arg_source_tensors , <nl> <nl> auto output = absl : : make_unique < Graph > ( ( * graph ) - > op_registry ( ) ) ; <nl> TF_RETURN_WITH_CONTEXT_IF_ERROR ( <nl> - EncapsulateSubgraphsInFunctions ( kXlaClusterAttr , * * graph , RewriteSubgraph , <nl> - / * reuse_existing_functions = * / true , <nl> - & output , flib_def ) , <nl> + EncapsulateSubgraphsInFunctions ( <nl> + kXlaClusterIdAttr , * * graph , RewriteSubgraph , <nl> + / * reuse_existing_functions = * / true , & output , flib_def ) , <nl> " EncapsulateXlaComputationsPass failed " ) ; <nl> graph - > swap ( output ) ; <nl> return Status : : OK ( ) ; <nl> Status RewriteSubgraph ( const std : : vector < OutputTensor > & arg_source_tensors , <nl> / / while iterating . <nl> std : : vector < Node * > launch_nodes ; <nl> for ( Node * n : graph - > nodes ( ) ) { <nl> - const string & name = GetNodeAttrString ( n - > attrs ( ) , kXlaClusterAttr ) ; <nl> + const string & name = GetNodeAttrString ( n - > attrs ( ) , kXlaClusterIdAttr ) ; <nl> if ( ! name . empty ( ) ) { <nl> launch_nodes . push_back ( n ) ; <nl> } <nl> mmm a / tensorflow / compiler / jit / encapsulate_xla_computations_pass . h <nl> ppp b / tensorflow / compiler / jit / encapsulate_xla_computations_pass . h <nl> namespace tensorflow { <nl> / / XlaLaunch operators . <nl> class EncapsulateXlaComputationsPass : public GraphOptimizationPass { <nl> public : <nl> - static const char * const kXlaClusterAttr ; / / _xla_compile_id <nl> - <nl> Status Run ( const GraphOptimizationPassOptions & options ) override ; <nl> <nl> / / The following methods are public only for unit tests . <nl> mmm a / tensorflow / compiler / jit / encapsulate_xla_computations_pass_test . cc <nl> ppp b / tensorflow / compiler / jit / encapsulate_xla_computations_pass_test . cc <nl> limitations under the License . <nl> # include " tensorflow / cc / ops / function_ops . h " <nl> # include " tensorflow / cc / ops / resource_variable_ops . h " <nl> # include " tensorflow / cc / ops / standard_ops . h " <nl> + # include " tensorflow / compiler / jit / defs . h " <nl> # include " tensorflow / compiler / jit / encapsulate_subgraphs_pass . h " <nl> # include " tensorflow / compiler / tf2xla / cc / ops / xla_jit_ops . h " <nl> # include " tensorflow / compiler / tf2xla / test_util . h " <nl> static std : : unique_ptr < Graph > MakeOuterGraph ( <nl> auto w = ops : : Placeholder ( scope . WithOpName ( " W " ) , DT_RESOURCE ) ; <nl> <nl> NodeDef def ; <nl> - TF_CHECK_OK ( <nl> - NodeDefBuilder ( " launch0 " , function , & flib_def ) <nl> - . Input ( a . node ( ) - > name ( ) , 0 , DT_INT32 ) <nl> - . Input ( b . node ( ) - > name ( ) , 0 , DT_FLOAT ) <nl> - . Input ( c . node ( ) - > name ( ) , 0 , DT_INT32 ) <nl> - . Input ( d . node ( ) - > name ( ) , 0 , DT_FLOAT ) <nl> - . Input ( u . node ( ) - > name ( ) , 0 , DT_RESOURCE ) <nl> - . Input ( v . node ( ) - > name ( ) , 0 , DT_RESOURCE ) <nl> - . Input ( w . node ( ) - > name ( ) , 0 , DT_RESOURCE ) <nl> - . Device ( " / gpu : 0 " ) <nl> - . Attr ( EncapsulateXlaComputationsPass : : kXlaClusterAttr , " launch0 " ) <nl> - . Attr ( " _variable_start_index " , 4 ) <nl> - . Finalize ( & def ) ) ; <nl> + TF_CHECK_OK ( NodeDefBuilder ( " launch0 " , function , & flib_def ) <nl> + . Input ( a . node ( ) - > name ( ) , 0 , DT_INT32 ) <nl> + . Input ( b . node ( ) - > name ( ) , 0 , DT_FLOAT ) <nl> + . Input ( c . node ( ) - > name ( ) , 0 , DT_INT32 ) <nl> + . Input ( d . node ( ) - > name ( ) , 0 , DT_FLOAT ) <nl> + . Input ( u . node ( ) - > name ( ) , 0 , DT_RESOURCE ) <nl> + . Input ( v . node ( ) - > name ( ) , 0 , DT_RESOURCE ) <nl> + . Input ( w . node ( ) - > name ( ) , 0 , DT_RESOURCE ) <nl> + . Device ( " / gpu : 0 " ) <nl> + . Attr ( kXlaClusterIdAttr , " launch0 " ) <nl> + . Attr ( " _variable_start_index " , 4 ) <nl> + . Finalize ( & def ) ) ; <nl> <nl> Status status ; <nl> Node * launch = scope . graph ( ) - > AddNode ( def , & status ) ; <nl> static std : : unique_ptr < Graph > MakeBodyGraph ( ) { <nl> auto arg6 = ops : : _Arg ( scope . WithOpName ( " w_0_arg " ) , DT_RESOURCE , 6 ) ; <nl> <nl> auto add_attrs = [ ] ( Node * node ) { <nl> - node - > AddAttr ( EncapsulateXlaComputationsPass : : kXlaClusterAttr , " launch0 " ) ; <nl> + node - > AddAttr ( kXlaClusterIdAttr , " launch0 " ) ; <nl> node - > set_requested_device ( " / gpu : 0 " ) ; <nl> } ; <nl> <nl> TEST ( EncapsulateXlaComputations , DeterministicEncapsulate ) { <nl> : ops : : Add ( scope . WithOpName ( " E " ) , a1 , a0 ) ; <nl> <nl> auto add_attrs = [ ] ( Node * node ) { <nl> - node - > AddAttr ( EncapsulateXlaComputationsPass : : kXlaClusterAttr , <nl> - " launch0 " ) ; <nl> + node - > AddAttr ( kXlaClusterIdAttr , " launch0 " ) ; <nl> } ; <nl> add_attrs ( e . node ( ) ) ; <nl> <nl> TEST ( EncapsulateXlaComputations , Encapsulate ) { <nl> auto w = ops : : Placeholder ( scope . WithOpName ( " W " ) , DT_RESOURCE ) ; <nl> <nl> auto add_attrs = [ ] ( Node * node ) { <nl> - node - > AddAttr ( EncapsulateXlaComputationsPass : : kXlaClusterAttr , " launch0 " ) ; <nl> + node - > AddAttr ( kXlaClusterIdAttr , " launch0 " ) ; <nl> node - > set_requested_device ( " / gpu : 0 " ) ; <nl> } ; <nl> <nl> mmm a / tensorflow / compiler / jit / mark_for_compilation_pass . cc <nl> ppp b / tensorflow / compiler / jit / mark_for_compilation_pass . cc <nl> absl : : flat_hash_map < string , std : : vector < string > > * GetAllowlistTable ( ) { <nl> " ConcatOffset " , " Const " , " MirrorPad " , " Pack " , " Pad " , " PadV2 " , " Reverse " , <nl> " ReverseV2 " , " ReverseSequence " , " Slice " , " Split " , " SplitV " , <nl> " StridedSlice " , " StridedSliceGrad " , " ResourceStridedSliceAssign " , <nl> - " Tile " , " Transpose " , " InvertPermutation " , " Unpack " , " DeviceIndex " } } } ; <nl> + " Tile " , " Transpose " , " InvertPermutation " , " Unpack " , " DeviceIndex " , <nl> + " TensorStridedSliceUpdate " , <nl> + } } } ; <nl> / / clang - format on <nl> return result ; <nl> } <nl> mmm a / tensorflow / compiler / mlir / hlo / BUILD <nl> ppp b / tensorflow / compiler / mlir / hlo / BUILD <nl> cc_library ( <nl> " @ llvm - project / / llvm : Support " , <nl> " @ llvm - project / / mlir : IR " , <nl> " @ llvm - project / / mlir : Pass " , <nl> + " @ llvm - project / / mlir : SCFDialect " , <nl> " @ llvm - project / / mlir : StandardOps " , <nl> " @ llvm - project / / mlir : Support " , <nl> " @ llvm - project / / mlir : Transforms " , <nl> cc_library ( <nl> alwayslink = 1 , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " mhlo_control_flow_to_scf " , <nl> + srcs = [ " lib / Dialect / mhlo / transforms / mhlo_control_flow_to_scf . cc " ] , <nl> + hdrs = [ " include / mlir - hlo / Dialect / mhlo / transforms / passes . h " ] , <nl> + deps = [ <nl> + " : hlo " , <nl> + " @ llvm - project / / llvm : Support " , <nl> + " @ llvm - project / / mlir : IR " , <nl> + " @ llvm - project / / mlir : Pass " , <nl> + " @ llvm - project / / mlir : SCFDialect " , <nl> + " @ llvm - project / / mlir : StandardOps " , <nl> + " @ llvm - project / / mlir : Support " , <nl> + " @ llvm - project / / mlir : Transforms " , <nl> + ] , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " map_lmhlo_to_scalar_op " , <nl> hdrs = [ " include / mlir - hlo / Dialect / mhlo / transforms / map_lmhlo_to_scalar_op . h " ] , <nl> cc_library ( <nl> " : lhlo_legalize_to_affine " , <nl> " : lhlo_legalize_to_gpu " , <nl> " : lhlo_legalize_to_parallel_loops " , <nl> + " : mhlo_control_flow_to_scf " , <nl> " : mhlo_fusion " , <nl> " : mhlo_to_mhlo_lowering_patterns " , <nl> " : sink_constants_to_control_flow " , <nl> mmm a / tensorflow / compiler / mlir / hlo / include / mlir - hlo / Dialect / mhlo / transforms / map_hlo_to_lhlo_op . h <nl> ppp b / tensorflow / compiler / mlir / hlo / include / mlir - hlo / Dialect / mhlo / transforms / map_hlo_to_lhlo_op . h <nl> MAP_HLO_TO_LHLO ( CosOp ) ; <nl> MAP_HLO_TO_LHLO ( DivOp ) ; <nl> MAP_HLO_TO_LHLO ( DotOp ) ; <nl> MAP_HLO_TO_LHLO ( ExpOp ) ; <nl> + MAP_HLO_TO_LHLO ( FloorOp ) ; <nl> MAP_HLO_TO_LHLO ( GatherOp ) ; <nl> MAP_HLO_TO_LHLO ( ImagOp ) ; <nl> MAP_HLO_TO_LHLO ( IotaOp ) ; <nl> mmm a / tensorflow / compiler / mlir / hlo / include / mlir - hlo / Dialect / mhlo / transforms / map_lmhlo_to_scalar_op . h <nl> ppp b / tensorflow / compiler / mlir / hlo / include / mlir - hlo / Dialect / mhlo / transforms / map_lmhlo_to_scalar_op . h <nl> inline Value MapLhloOpToStdScalarOp < lmhlo : : SinOp > ( Location loc , <nl> loc , result_types , args , b ) ; <nl> } <nl> <nl> + template < > <nl> + inline Value MapLhloOpToStdScalarOp < lmhlo : : FloorOp > ( Location loc , <nl> + ArrayRef < Type > result_types , <nl> + ArrayRef < Value > args , <nl> + OpBuilder * b ) { <nl> + return MapLhloOpToStdScalarOpImpl < FloatType , : : mlir : : FloorFOp > { } ( <nl> + loc , result_types , args , b ) ; <nl> + } <nl> + <nl> / / / Implements the conversion of HLO op to scalar op ( to use within region of a <nl> / / / linalg . generic op ) for compare - select style operations like min / max . <nl> template < typename . . . Args > <nl> mmm a / tensorflow / compiler / mlir / hlo / include / mlir - hlo / Dialect / mhlo / transforms / mhlo_passes . td <nl> ppp b / tensorflow / compiler / mlir / hlo / include / mlir - hlo / Dialect / mhlo / transforms / mhlo_passes . td <nl> def LegalizeControlFlowPass : Pass < " mhlo - legalize - control - flow " , " FuncOp " > { <nl> let constructor = " createLegalizeControlFlowPass ( ) " ; <nl> } <nl> <nl> + def LegalizeControlFlowToScfPass : Pass < " mhlo - control - flow - to - scf " , " FuncOp " > { <nl> + let summary = " Legalize from MHLO control flow to SCF control flow . " ; <nl> + let constructor = " createControlFlowToScfPass ( ) " ; <nl> + } <nl> + <nl> def LegalizeGatherToTorchIndexSelectPass : Pass < " mhlo - legalize - gather - to - torch - index - select " , " FuncOp " > { <nl> let summary = " Legalizes gathers to a torch index select . " ; <nl> let constructor = " createLegalizeGatherToTorchIndexSelectPass ( ) " ; <nl> mmm a / tensorflow / compiler / mlir / hlo / include / mlir - hlo / Dialect / mhlo / transforms / passes . h <nl> ppp b / tensorflow / compiler / mlir / hlo / include / mlir - hlo / Dialect / mhlo / transforms / passes . h <nl> namespace mhlo { <nl> / / / Lowers HLO control flow ops to the Standard dialect . <nl> std : : unique_ptr < OperationPass < FuncOp > > createLegalizeControlFlowPass ( ) ; <nl> <nl> + / / / Lowers MHLO control flow ops to the SCF dialect . <nl> + std : : unique_ptr < OperationPass < FuncOp > > createControlFlowToScfPass ( ) ; <nl> + <nl> / / / Lowers from HLO dialect to Standard dialect . <nl> std : : unique_ptr < OperationPass < FuncOp > > createLegalizeToStdPass ( ) ; <nl> <nl> mmm a / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / CMakeLists . txt <nl> ppp b / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / CMakeLists . txt <nl> add_mlir_library ( MhloToLhloConversion <nl> add_mlir_library ( MhloToStandard <nl> legalize_control_flow . cc <nl> legalize_to_standard . cc <nl> + mhlo_control_flow_to_scf . cc <nl> <nl> DEPENDS <nl> MLIRhlo_opsIncGen <nl> mmm a / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / hlo_legalize_to_lhlo . cc <nl> ppp b / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / hlo_legalize_to_lhlo . cc <nl> void populateHLOToLHLOConversionPattern ( <nl> HloToLhloOpConverter < mhlo : : DivOp > , <nl> HloToLhloOpConverter < mhlo : : DotOp > , <nl> HloToLhloOpConverter < mhlo : : ExpOp > , <nl> + HloToLhloOpConverter < mhlo : : FloorOp > , <nl> HloToLhloOpConverter < mhlo : : GatherOp > , <nl> HloToLhloOpConverter < mhlo : : ImagOp > , <nl> HloToLhloOpConverter < mhlo : : IotaOp > , <nl> mmm a / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / legalize_to_linalg . cc <nl> ppp b / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / legalize_to_linalg . cc <nl> void populateLHLOToLinalgConversionPattern ( MLIRContext * context , <nl> PointwiseToLinalgConverter < lmhlo : : CosOp > , <nl> PointwiseToLinalgConverter < lmhlo : : DivOp > , <nl> PointwiseToLinalgConverter < lmhlo : : ExpOp > , <nl> + PointwiseToLinalgConverter < lmhlo : : FloorOp > , <nl> PointwiseToLinalgConverter < lmhlo : : ImagOp > , <nl> PointwiseToLinalgConverter < lmhlo : : LogOp > , <nl> PointwiseToLinalgConverter < lmhlo : : MaxOp > , <nl> void populateHLOToLinalgConversionPattern ( MLIRContext * context , <nl> PointwiseToLinalgConverter < mhlo : : CosOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : DivOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : ExpOp , false > , <nl> + PointwiseToLinalgConverter < mhlo : : FloorOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : ImagOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : LogOp , false > , <nl> PointwiseToLinalgConverter < mhlo : : MaxOp , false > , <nl> new file mode 100644 <nl> index 0000000000000 . . aba7b0784132a <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / mlir / hlo / lib / Dialect / mhlo / transforms / mhlo_control_flow_to_scf . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " llvm / Support / Casting . h " <nl> + # include " mlir - hlo / Dialect / mhlo / IR / hlo_ops . h " <nl> + # include " mlir - hlo / Dialect / mhlo / transforms / passes . h " <nl> + # include " mlir / Dialect / SCF / SCF . h " <nl> + # include " mlir / Dialect / StandardOps / IR / Ops . h " <nl> + # include " mlir / IR / Matchers . h " <nl> + # include " mlir / IR / Operation . h " <nl> + # include " mlir / IR / StandardTypes . h " <nl> + # include " mlir / IR / Value . h " <nl> + # include " mlir / Pass / Pass . h " <nl> + # include " mlir / Support / LLVM . h " <nl> + <nl> + # define DEBUG_TYPE " mhlo - control - flow - to - scf " <nl> + <nl> + namespace mlir { <nl> + namespace mhlo { <nl> + <nl> + namespace { <nl> + <nl> + / / / Convert MHLO While to SCF . <nl> + void MatchAndRewrite ( WhileOp whileOp ) ; <nl> + <nl> + / / / Pass that converts MHLO control flow to SCF . <nl> + class ControlFlowToScfPass <nl> + : public mlir : : PassWrapper < ControlFlowToScfPass , FunctionPass > { <nl> + void getDependentDialects ( DialectRegistry & registry ) const override { <nl> + registry . insert < scf : : SCFDialect > ( ) ; <nl> + } <nl> + void runOnFunction ( ) override { <nl> + getFunction ( ) . walk ( [ & ] ( WhileOp whileOp ) { MatchAndRewrite ( whileOp ) ; } ) ; <nl> + } <nl> + } ; <nl> + <nl> + / / TODO ( jpienaar ) : Look into reformulating as a pattern . <nl> + void MatchAndRewrite ( WhileOp whileOp ) { <nl> + / / Handle pattern : <nl> + / / x = start <nl> + / / step = . . . <nl> + / / limit = . . . <nl> + / / while ( x < limit ) { . . . x + = step ; } <nl> + <nl> + / / Only handling multi value while loops at the moment . <nl> + auto tupleOp = whileOp . getOperand ( ) . getDefiningOp < TupleOp > ( ) ; <nl> + if ( ! tupleOp ) return ; <nl> + auto bodyReturn = whileOp . body ( ) <nl> + . front ( ) <nl> + . getTerminator ( ) <nl> + - > getOperand ( 0 ) <nl> + . getDefiningOp < mhlo : : TupleOp > ( ) ; <nl> + / / Note : due to the shape restrictions on While , if the operand to While is a <nl> + / / tuple , then so is the return type of the body . But the verifier isn ' t <nl> + / / checking that at the moment , so just bail out here if this doesn ' t hold . <nl> + if ( ! bodyReturn ) return ; <nl> + <nl> + Value result = whileOp . cond ( ) . front ( ) . getTerminator ( ) - > getOperand ( 0 ) ; <nl> + / / TODO ( jpienaar ) : Expand to handle more than simple case with LT compare and <nl> + / / constant step . <nl> + auto cmp = result . getDefiningOp < mhlo : : CompareOp > ( ) ; <nl> + if ( ! cmp | | cmp . comparison_direction ( ) ! = " LT " ) return ; <nl> + <nl> + const int kConstant = - 1 ; <nl> + auto getValueAndIndex = [ & ] ( Value val ) - > std : : pair < Value , int > { <nl> + if ( matchPattern ( val , m_Constant ( ) ) ) return { val , kConstant } ; <nl> + / / If it is defined by a tuple , then the tuple has to have been fed in and <nl> + / / the external value is captured . <nl> + if ( auto gte = val . getDefiningOp < GetTupleElementOp > ( ) ) { <nl> + if ( ! gte . getOperand ( ) . isa < mlir : : BlockArgument > ( ) ) return { nullptr , 0 } ; <nl> + int index = gte . index ( ) . getSExtValue ( ) ; <nl> + return { tupleOp . getOperand ( index ) , index } ; <nl> + } <nl> + return { nullptr , 0 } ; <nl> + } ; <nl> + <nl> + using ValueIndex = std : : pair < Value , int > ; <nl> + ValueIndex loopIndVar = getValueAndIndex ( cmp . lhs ( ) ) ; <nl> + ValueIndex max = getValueAndIndex ( cmp . rhs ( ) ) ; <nl> + if ( ! loopIndVar . first | | ! max . first ) return ; <nl> + auto add = <nl> + bodyReturn . getOperand ( loopIndVar . second ) . getDefiningOp < mhlo : : AddOp > ( ) ; <nl> + if ( ! add ) return ; <nl> + ValueIndex step = getValueAndIndex ( add . rhs ( ) ) ; <nl> + if ( step . second ! = kConstant | | ! step . first ) return ; <nl> + <nl> + / / Only handle case where tuple isn ' t propagated as is for now . <nl> + / / TODO ( jpienaar ) : Remove this when a tuple is also created inside the loop <nl> + / / to propagate . <nl> + for ( auto * use : whileOp . body ( ) . front ( ) . getArgument ( 0 ) . getUsers ( ) ) <nl> + if ( ! isa < GetTupleElementOp > ( use ) ) return ; <nl> + <nl> + LLVM_DEBUG ( llvm : : dbgs ( ) < < " Found for ( " < < whileOp . getLoc ( ) < < " ) : \ n " ; <nl> + llvm : : dbgs ( ) < < " loopIndVar = " < < loopIndVar . second < < " max = " <nl> + < < max . second < < " step = " < < step . second < < " \ n " ; <nl> + llvm : : dbgs ( ) < < " loopIndVar = " < < loopIndVar . first < < " max = " <nl> + < < max . first < < " step = " < < step . first < < " \ n " ; ) ; <nl> + OpBuilder b ( whileOp ) ; <nl> + / / Inputs to new for loop . <nl> + llvm : : SmallVector < Value , 4 > input ; <nl> + input . reserve ( tupleOp . getNumOperands ( ) ) ; <nl> + for ( auto r : tupleOp . getOperands ( ) . take_front ( loopIndVar . second ) ) <nl> + input . push_back ( r ) ; <nl> + for ( auto r : tupleOp . getOperands ( ) . drop_front ( loopIndVar . second + 1 ) ) <nl> + input . push_back ( r ) ; <nl> + <nl> + auto tensorIndexType = RankedTensorType : : get ( { } , b . getIndexType ( ) ) ; <nl> + auto getAsIndex = [ & ] ( Value val ) { <nl> + auto loc = whileOp . getLoc ( ) ; <nl> + return b . create < ExtractElementOp > ( <nl> + loc , b . create < IndexCastOp > ( loc , tensorIndexType , val ) , ValueRange ( ) ) ; <nl> + } ; <nl> + <nl> + / / SCF for uses index type , so converted these . <nl> + auto forloopIndVar = getAsIndex ( loopIndVar . first ) ; <nl> + auto forMax = getAsIndex ( max . first ) ; <nl> + auto forStep = getAsIndex ( step . first ) ; <nl> + auto forOp = b . create < mlir : : scf : : ForOp > ( whileOp . getLoc ( ) , forloopIndVar , <nl> + forMax , forStep , input ) ; <nl> + / / Transfer the body without the block arguments . <nl> + forOp . getLoopBody ( ) . front ( ) . getOperations ( ) . splice ( <nl> + forOp . getLoopBody ( ) . front ( ) . getOperations ( ) . end ( ) , <nl> + whileOp . body ( ) . front ( ) . getOperations ( ) ) ; <nl> + <nl> + b . setInsertionPointToStart ( & forOp . getLoopBody ( ) . front ( ) ) ; <nl> + auto loopIndVarElType = <nl> + loopIndVar . first . getType ( ) . cast < ShapedType > ( ) . getElementType ( ) ; <nl> + Value indVar = b . create < SplatOp > ( <nl> + whileOp . getLoc ( ) , RankedTensorType : : get ( { } , loopIndVarElType ) , <nl> + b . create < IndexCastOp > ( whileOp . getLoc ( ) , loopIndVarElType , <nl> + forOp . getInductionVar ( ) ) ) ; <nl> + / / Update all block argument users to the SCF For args . <nl> + for ( auto * use : <nl> + llvm : : make_early_inc_range ( whileOp . body ( ) . getArgument ( 0 ) . getUsers ( ) ) ) { <nl> + / / TODO ( jpienaar ) : Expand here too when we allow using the tuple in the <nl> + / / loop . <nl> + auto gte = cast < GetTupleElementOp > ( use ) ; <nl> + / / If the loop induction var , then refer to the loop induction variable as <nl> + / / this operand is not updated . <nl> + if ( gte . index ( ) = = loopIndVar . second ) { <nl> + use - > getResult ( 0 ) . replaceAllUsesWith ( indVar ) ; <nl> + use - > erase ( ) ; <nl> + continue ; <nl> + } <nl> + int index = gte . index ( ) . getSExtValue ( ) ; <nl> + / / If after the loop induction variable , then decrement as we don ' t include <nl> + / / the loop induction variable in the for iter operands . <nl> + if ( index > loopIndVar . second ) - - index ; <nl> + use - > getResult ( 0 ) . replaceAllUsesWith ( forOp . getIterOperands ( ) [ index ] ) ; <nl> + use - > erase ( ) ; <nl> + } <nl> + <nl> + / / Create new yield op without induction var update . <nl> + SmallVector < Value , 4 > newYieldOps ; <nl> + newYieldOps . reserve ( bodyReturn . getNumOperands ( ) - 1 ) ; <nl> + for ( auto r : bodyReturn . getOperands ( ) . take_front ( loopIndVar . second ) ) <nl> + newYieldOps . push_back ( r ) ; <nl> + for ( auto r : bodyReturn . getOperands ( ) . drop_front ( loopIndVar . second + 1 ) ) <nl> + newYieldOps . push_back ( r ) ; <nl> + / / Delete return & tuple op . <nl> + forOp . getLoopBody ( ) . front ( ) . back ( ) . erase ( ) ; <nl> + forOp . getLoopBody ( ) . front ( ) . back ( ) . erase ( ) ; <nl> + b . setInsertionPointToEnd ( & forOp . getLoopBody ( ) . front ( ) ) ; <nl> + b . create < scf : : YieldOp > ( whileOp . getLoc ( ) , newYieldOps ) ; <nl> + <nl> + / / Recombine output tuple with max value of induction variable . <nl> + llvm : : SmallVector < Value , 4 > loopOut ; <nl> + loopOut . reserve ( forOp . getNumResults ( ) + 1 ) ; <nl> + for ( auto r : forOp . getResults ( ) . take_front ( loopIndVar . second ) ) <nl> + loopOut . push_back ( r ) ; <nl> + loopOut . push_back ( max . first ) ; <nl> + for ( auto r : forOp . getResults ( ) . drop_front ( loopIndVar . second ) ) <nl> + loopOut . push_back ( r ) ; <nl> + b . setInsertionPoint ( whileOp ) ; <nl> + auto newRes = b . create < mhlo : : TupleOp > ( whileOp . getLoc ( ) , loopOut ) ; <nl> + whileOp . replaceAllUsesWith ( newRes . getOperation ( ) ) ; <nl> + whileOp . erase ( ) ; <nl> + } <nl> + <nl> + } / / anonymous namespace <nl> + <nl> + std : : unique_ptr < OperationPass < FuncOp > > createControlFlowToScfPass ( ) { <nl> + return std : : make_unique < ControlFlowToScfPass > ( ) ; <nl> + } <nl> + <nl> + } / / namespace mhlo <nl> + } / / namespace mlir <nl> mmm a / tensorflow / compiler / mlir / hlo / tests / hlo - legalize - to - lhlo . mlir <nl> ppp b / tensorflow / compiler / mlir / hlo / tests / hlo - legalize - to - lhlo . mlir <nl> func @ cos ( % operand : memref < 2x2xf32 > , % result : memref < 2x2xf32 > ) { <nl> <nl> / / mmm - - <nl> <nl> + / / BOTH - LABEL : func @ floor <nl> + func @ floor ( % operand : memref < 2x2xf32 > , % result : memref < 2x2xf32 > ) { <nl> + % tensor_operand = tensor_load % operand : memref < 2x2xf32 > <nl> + % tensor_result = " mhlo . floor " ( % tensor_operand ) <nl> + : ( tensor < 2x2xf32 > ) - > tensor < 2x2xf32 > <nl> + / / BOTH : " lmhlo . floor " ( % { { . * } } , % { { . * } } ) <nl> + tensor_store % tensor_result , % result : memref < 2x2xf32 > <nl> + return <nl> + } <nl> + <nl> + / / mmm - - <nl> + <nl> / / BOTH - LABEL : func @ neg <nl> func @ neg ( % operand : memref < 2x2xf32 > , % result : memref < 2x2xf32 > ) { <nl> % tensor_operand = tensor_load % operand : memref < 2x2xf32 > <nl> new file mode 100644 <nl> index 0000000000000 . . 9c887a73a0f50 <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / mlir / hlo / tests / legalize_to_scf . mlir <nl> <nl> + / / RUN : mlir - hlo - opt - - mhlo - control - flow - to - scf % s | FileCheck % s <nl> + <nl> + func @ lt_loop ( % arg0 : tensor < 4xf32 > , % arg1 : tensor < f32 > , % arg2 : tensor < f32 > , % arg3 : tensor < 4xf32 > , % arg4 : tensor < f32 > , % arg5 : tensor < f32 > , % arg6 : tensor < f32 > , % arg7 : tensor < f32 > , % arg8 : tensor < i32 > ) - > ( tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > ) { <nl> + % cst = constant dense < - 1 > : tensor < i32 > <nl> + % cst_0 = constant dense < 1 > : tensor < i32 > <nl> + % cst_1 = constant dense < 0 > : tensor < i32 > <nl> + % cst_2 = constant dense < 1000 > : tensor < i32 > <nl> + % 0 = " mhlo . tuple " ( % cst_1 , % cst , % cst_2 ) : ( tensor < i32 > , tensor < i32 > , tensor < i32 > ) - > tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > <nl> + % 1 = " mhlo . while " ( % 0 ) ( { <nl> + ^ bb0 ( % arg9 : tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > ) : / / no predecessors <nl> + % 2 = " mhlo . get_tuple_element " ( % arg9 ) { index = 0 : i32 } : ( tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > ) - > tensor < i32 > <nl> + % 3 = " mhlo . get_tuple_element " ( % arg9 ) { index = 2 : i32 } : ( tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > ) - > tensor < i32 > <nl> + % 4 = " mhlo . compare " ( % 2 , % 3 ) { comparison_direction = " LT " } : ( tensor < i32 > , tensor < i32 > ) - > tensor < i1 > <nl> + " mhlo . return " ( % 4 ) : ( tensor < i1 > ) - > ( ) <nl> + } , { <nl> + ^ bb0 ( % arg9 : tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > ) : / / no predecessors <nl> + % 2 = " mhlo . get_tuple_element " ( % arg9 ) { index = 0 : i32 } : ( tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > ) - > tensor < i32 > <nl> + % 3 = mhlo . add % 2 , % cst_0 : tensor < i32 > <nl> + % 4 = " mhlo . get_tuple_element " ( % arg9 ) { index = 1 : i32 } : ( tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > ) - > tensor < i32 > <nl> + % 5 = " mhlo . get_tuple_element " ( % arg9 ) { index = 2 : i32 } : ( tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > ) - > tensor < i32 > <nl> + % 6 = " mhlo . tuple " ( % 3 , % 4 , % 5 ) : ( tensor < i32 > , tensor < i32 > , tensor < i32 > ) - > tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > <nl> + " mhlo . return " ( % 6 ) : ( tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > ) - > ( ) <nl> + } ) : ( tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > ) - > tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > <nl> + return % 1 : tuple < tensor < i32 > , tensor < i32 > , tensor < i32 > > <nl> + } <nl> + <nl> + / / CHECK - LABEL : func @ lt_loop ( <nl> + / / CHECK : % [ [ VAL_9 : . * ] ] = constant dense < - 1 > : tensor < i32 > <nl> + / / CHECK : % [ [ VAL_10 : . * ] ] = constant dense < 1 > : tensor < i32 > <nl> + / / CHECK : % [ [ VAL_11 : . * ] ] = constant dense < 0 > : tensor < i32 > <nl> + / / CHECK : % [ [ VAL_12 : . * ] ] = constant dense < 1000 > : tensor < i32 > <nl> + / / CHECK : % [ [ VAL_14 : . * ] ] = index_cast % [ [ VAL_11 ] ] : tensor < i32 > to tensor < index > <nl> + / / CHECK : % [ [ VAL_15 : . * ] ] = extract_element % [ [ VAL_14 ] ] [ ] : tensor < index > <nl> + / / CHECK : % [ [ VAL_16 : . * ] ] = index_cast % [ [ VAL_12 ] ] : tensor < i32 > to tensor < index > <nl> + / / CHECK : % [ [ VAL_17 : . * ] ] = extract_element % [ [ VAL_16 ] ] [ ] : tensor < index > <nl> + / / CHECK : % [ [ VAL_18 : . * ] ] = index_cast % [ [ VAL_10 ] ] : tensor < i32 > to tensor < index > <nl> + / / CHECK : % [ [ VAL_19 : . * ] ] = extract_element % [ [ VAL_18 ] ] [ ] : tensor < index > <nl> + / / CHECK : scf . for % [ [ VAL_21 : . * ] ] = % [ [ VAL_15 ] ] to % [ [ VAL_17 ] ] step % [ [ VAL_19 ] ] iter_args ( % [ [ VAL_22 : . * ] ] = % [ [ VAL_9 ] ] , % [ [ VAL_23 : . * ] ] = % [ [ VAL_12 ] ] ) <nl> mmm a / tensorflow / compiler / mlir / hlo / tests / lhlo - legalize - to - linalg . mlir <nl> ppp b / tensorflow / compiler / mlir / hlo / tests / lhlo - legalize - to - linalg . mlir <nl> func @ sin ( % input : memref < 2x2xf32 > , <nl> <nl> / / mmm - - <nl> <nl> + / / CHECK - LABEL : func @ floor <nl> + func @ floor ( % input : memref < 2x2xf32 > , % result : memref < 2x2xf32 > ) { <nl> + " lmhlo . floor " ( % input , % result ) : ( memref < 2x2xf32 > , memref < 2x2xf32 > ) - > ( ) <nl> + return <nl> + } <nl> + / / CHECK : linalg . generic <nl> + / / CHECK - NEXT : ^ bb0 ( % [ [ OPERAND_IN : . * ] ] : f32 , % [ [ RESULT_OUT : . * ] ] ) : <nl> + / / CHECK - NEXT : % [ [ RESULT : . * ] ] = floorf % [ [ OPERAND_IN ] ] : f32 <nl> + / / CHECK - NEXT : linalg . yield % [ [ RESULT ] ] : f32 <nl> + <nl> + / / mmm - - <nl> + <nl> / / CHECK - LABEL : func @ negf <nl> func @ negf ( % input : memref < 2x2xf32 > , % result : memref < 2x2xf32 > ) { <nl> " lmhlo . negate " ( % input , % result ) : ( memref < 2x2xf32 > , memref < 2x2xf32 > ) - > ( ) <nl> mmm a / tensorflow / compiler / mlir / lite / tests / prepare - tf . mlir <nl> ppp b / tensorflow / compiler / mlir / lite / tests / prepare - tf . mlir <nl> func @ fusedBatchNorm ( tensor < 8x8x8x8xf32 > , tensor < 8xf32 > , tensor < 8xf32 > , tensor < 8 <nl> / / offset - mean * scale * rsqrt ( variance + epsilon ) <nl> / / CHECK : % [ [ ADD2 : . * ] ] = " tf . Add " ( % [ [ MUL2 ] ] , % [ [ SUB ] ] ) <nl> <nl> - / / CHECK : % [ [ BATCHNORM1_a : [ ^ , ] + ] ] , { { . * } } = " tf . FusedBatchNorm " ( % [ [ ADD2 ] ] , % [ [ ARG1 ] ] , % [ [ ARG2 ] ] , % [ [ ARG3 ] ] , % [ [ ARG4 ] ] ) <nl> - / / CHECK : " tf . FusedBatchNorm " ( % [ [ BATCHNORM1_a ] ] , % [ [ ARG1 ] ] , % [ [ ARG2 ] ] , % [ [ ARG3 ] ] , % [ [ ARG4 ] ] ) <nl> + / / CHECK : % [ [ BATCHNORM1_a : [ ^ , ] + ] ] , { { . * } } = " tf . FusedBatchNormV3 " ( % [ [ ADD2 ] ] , % [ [ ARG1 ] ] , % [ [ ARG2 ] ] , % [ [ ARG3 ] ] , % [ [ ARG4 ] ] ) <nl> + / / CHECK : " tf . FusedBatchNormV3 " ( % [ [ BATCHNORM1_a ] ] , % [ [ ARG1 ] ] , % [ [ ARG2 ] ] , % [ [ ARG3 ] ] , % [ [ ARG4 ] ] ) <nl> } <nl> <nl> func @ fusedBatchNormV3 ( tensor < 8x8x8x8xf32 > , tensor < 8xf32 > , tensor < 8xf32 > , tensor < 8xf32 > , tensor < 8xf32 > ) - > ( tensor < 8x8x8x8xf32 > , tensor < 8xf32 > ) { <nl> mmm a / tensorflow / compiler / mlir / lite / transforms / prepare_patterns . td <nl> ppp b / tensorflow / compiler / mlir / lite / transforms / prepare_patterns . td <nl> def : Pat < <nl> ( TF_MulOp $ t , ( TF_MulOp : $ mul ( TF_RsqrtOp ( TF_AddOp $ v , ( TF_ConstOp $ variance_epsilon ) ) ) , $ gamma ) ) , <nl> ( TF_SubOp $ beta , ( TF_MulOp $ m , $ mul ) ) ) > ; <nl> <nl> - / / Converts tf . FusedBatchNorm & tf . FusedBatchNormV3 into a sequence of more primitive arithmetic <nl> + / / Converts tf . FusedBatchNormV3 into a sequence of more primitive arithmetic <nl> / / operations . Specifically , performs the following calculation : <nl> / / <nl> / / ( x - mean ) * scale / sqrt ( variance + epsilon ) + offset <nl> def : Pat < <nl> / / ( x - mean ) * scale / sqrt ( variance + epsilon ) + offset , <nl> / / is then to compute <nl> / / ( x * multiplier ) + ( offset - mean * multiplier ) . <nl> - def : Pattern < <nl> - ( TF_FusedBatchNormOp : $ root <nl> - $ x , $ scale , $ offset , $ mean , $ variance , <nl> - F32Attr : $ epsilon , $ exponential_avg_factor , <nl> - $ data_format , FalseBoolAttr : $ is_training ) , <nl> - [ ( TF_AddOp <nl> - ( TF_MulOp <nl> - $ x , <nl> - ( TF_MulOp : $ multiplier <nl> - $ scale , <nl> - ( TF_RsqrtOp <nl> - ( TF_AddOp $ variance , <nl> - ( TF_ConstOp $ epsilon ) ) ) ) ) , <nl> - ( TF_SubOp $ offset , ( TF_MulOp $ mean , $ multiplier ) ) ) , <nl> - / / We already guaranteed that the last four results has no use so it does <nl> - / / not matter what value we provide here for replacement . <nl> - / * batch_mean = * / ( replaceWithValue $ x ) , <nl> - / * batch_variance = * / ( replaceWithValue $ x ) , <nl> - / * reserve_space_1 = * / ( replaceWithValue $ x ) , <nl> - / * reserve_space_2 = * / ( replaceWithValue $ x ) ] , <nl> - [ ( HasNoUseOf : $ root__1 ) , ( HasNoUseOf : $ root__2 ) , <nl> - ( HasNoUseOf : $ root__3 ) , ( HasNoUseOf : $ root__4 ) ] > ; <nl> <nl> def : Pattern < <nl> ( TF_FusedBatchNormV3Op : $ root <nl> mmm a / tensorflow / compiler / mlir / lite / transforms / prepare_tf . cc <nl> ppp b / tensorflow / compiler / mlir / lite / transforms / prepare_tf . cc <nl> struct ConvertTFBroadcastTo : public RewritePattern { <nl> } <nl> } ; <nl> <nl> + struct ConvertFusedBatchNorm : public OpRewritePattern < TF : : FusedBatchNormOp > { <nl> + explicit ConvertFusedBatchNorm ( MLIRContext * context ) <nl> + : OpRewritePattern < TF : : FusedBatchNormOp > ( context ) { } <nl> + <nl> + LogicalResult matchAndRewrite ( TF : : FusedBatchNormOp tf_fused_batch_norm_op , <nl> + PatternRewriter & rewriter ) const override { <nl> + auto new_result_types = <nl> + llvm : : to_vector < 6 > ( tf_fused_batch_norm_op . getResultTypes ( ) ) ; <nl> + / / reserve_space_3 <nl> + new_result_types . push_back ( <nl> + UnrankedTensorType : : get ( FloatType : : getF32 ( rewriter . getContext ( ) ) ) ) ; <nl> + <nl> + OperationState new_state ( tf_fused_batch_norm_op . getLoc ( ) , <nl> + TF : : FusedBatchNormV3Op : : getOperationName ( ) , <nl> + tf_fused_batch_norm_op . getOperands ( ) , <nl> + new_result_types , <nl> + tf_fused_batch_norm_op . getAttrs ( ) ) ; <nl> + Operation * tf_fused_batch_norm_op_v3 = rewriter . createOperation ( new_state ) ; <nl> + <nl> + rewriter . replaceOp ( tf_fused_batch_norm_op , <nl> + tf_fused_batch_norm_op_v3 - > getResults ( ) . drop_back ( ) ) ; <nl> + return success ( ) ; <nl> + } <nl> + } ; <nl> + <nl> # include " tensorflow / compiler / mlir / lite / transforms / generated_prepare_tf . inc " <nl> <nl> / / Returns success if all the operations in the ` op ` ' s regions including ` op ` <nl> void PrepareTFPass : : runOnFunction ( ) { <nl> / / replaced with a single Conv op with dilation parameter . <nl> patterns . insert < ConvertTFDilatedConvOp < TF : : Conv2DOp > , <nl> ConvertTFDilatedConvOp < TF : : DepthwiseConv2dNativeOp > > ( ctx ) ; <nl> + <nl> + patterns . insert < ConvertFusedBatchNorm > ( ctx ) ; <nl> TFL : : populateWithGenerated ( ctx , & patterns ) ; <nl> / / TODO ( karimnosseir ) : Split to separate pass probably after <nl> / / deciding on long term plan for this optimization . <nl> mmm a / tensorflow / compiler / mlir / tensorflow / ir / tf_generated_ops . td <nl> ppp b / tensorflow / compiler / mlir / tensorflow / ir / tf_generated_ops . td <nl> an output element , this operation computes \ \ ( y = | x | \ \ ) . <nl> def TF_AcosOp : TF_Op < " Acos " , [ NoSideEffect , SameOperandsAndResultType ] > { <nl> let summary = " Computes acos of x element - wise . " ; <nl> <nl> + let description = [ { <nl> + Provided an input tensor , the ` tf . math . acos ` operation returns the inverse cosine of each element of the tensor . If ` y = tf . math . cos ( x ) ` then , ` x = tf . math . acos ( y ) ` . <nl> + <nl> + Input range is ` [ - 1 , 1 ] ` and the output has a range of ` [ 0 , pi ] ` . <nl> + } ] ; <nl> + <nl> let arguments = ( ins <nl> TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Complex128 , TF_Complex64 ] > : $ x <nl> ) ; <nl> def TF_AddOp : TF_Op < " Add " , [ NoSideEffect , ResultsBroadcastableShape , TF_LayoutA <nl> let description = [ { <nl> * NOTE * : ` Add ` supports broadcasting . ` AddN ` does not . More about broadcasting <nl> [ here ] ( http : / / docs . scipy . org / doc / numpy / user / basics . broadcasting . html ) <nl> + <nl> + Given two input tensors , the ` tf . add ` operation computes the sum for every element in the tensor . <nl> + <nl> + Both input and output have a range ` ( - inf , inf ) ` . <nl> } ] ; <nl> <nl> let arguments = ( ins <nl> Inputs must be of same size and shape . <nl> let hasFolder = 1 ; <nl> } <nl> <nl> - def TF_AddV2Op : TF_Op < " AddV2 " , [ Commutative , NoSideEffect , ResultsBroadcastableShape , TF_CwiseBinary , TF_LayoutAgnostic , TF_SameOperandsAndResultElementTypeResolveRef ] > , <nl> - WithBroadcastableBinOpBuilder { <nl> - let summary = " Returns x + y element - wise . " ; <nl> - <nl> - let description = [ { <nl> - * NOTE * : ` Add ` supports broadcasting . ` AddN ` does not . More about broadcasting <nl> - [ here ] ( http : / / docs . scipy . org / doc / numpy / user / basics . broadcasting . html ) <nl> - } ] ; <nl> - <nl> - let arguments = ( ins <nl> - TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Complex128 , TF_Complex64 , TF_Uint8 ] > : $ x , <nl> - TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Complex128 , TF_Complex64 , TF_Uint8 ] > : $ y <nl> - ) ; <nl> - <nl> - let results = ( outs <nl> - TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Complex128 , TF_Complex64 , TF_Uint8 ] > : $ z <nl> - ) ; <nl> - <nl> - TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> - <nl> - let hasCanonicalizer = 1 ; <nl> - <nl> - let hasFolder = 1 ; <nl> - } <nl> - <nl> def TF_AdjustContrastv2Op : TF_Op < " AdjustContrastv2 " , [ NoSideEffect ] > { <nl> let summary = " Adjust the contrast of one or more images . " ; <nl> <nl> def TF_DivOp : TF_Op < " Div " , [ NoSideEffect , ResultsBroadcastableShape , TF_SameOpe <nl> let hasFolder = 1 ; <nl> } <nl> <nl> - def TF_DivNoNanOp : TF_Op < " DivNoNan " , [ NoSideEffect , ResultsBroadcastableShape , TF_SameOperandsAndResultElementTypeResolveRef ] > , <nl> - WithBroadcastableBinOpBuilder { <nl> - let summary = " Returns 0 if the denominator is zero . " ; <nl> - <nl> - let description = [ { <nl> - * NOTE * : ` DivNoNan ` supports broadcasting . More about broadcasting <nl> - [ here ] ( http : / / docs . scipy . org / doc / numpy / user / basics . broadcasting . html ) <nl> - } ] ; <nl> - <nl> - let arguments = ( ins <nl> - TensorOf < [ F16 , F32 , F64 , TF_Complex128 , TF_Complex64 ] > : $ x , <nl> - TensorOf < [ F16 , F32 , F64 , TF_Complex128 , TF_Complex64 ] > : $ y <nl> - ) ; <nl> - <nl> - let results = ( outs <nl> - TensorOf < [ F16 , F32 , F64 , TF_Complex128 , TF_Complex64 ] > : $ z <nl> - ) ; <nl> - <nl> - TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> - } <nl> - <nl> def TF_DynamicStitchOp : TF_Op < " DynamicStitch " , [ NoSideEffect , SameVariadicOperandSize ] > { <nl> let summary = [ { <nl> Interleave the values from the ` data ` tensors into a single tensor . <nl> retained with length 1 . <nl> } ] ; <nl> <nl> let arguments = ( ins <nl> - TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Complex128 , TF_Complex64 , TF_Qint32 , TF_Qint8 , TF_Quint8 , TF_Uint16 , TF_Uint32 , TF_Uint64 , TF_Uint8 ] > : $ input , <nl> + TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Qint16 , TF_Qint32 , TF_Qint8 , TF_Quint16 , TF_Quint8 , TF_Uint16 , TF_Uint32 , TF_Uint64 , TF_Uint8 ] > : $ input , <nl> TF_I32OrI64Tensor : $ reduction_indices , <nl> <nl> DefaultValuedAttr < BoolAttr , " false " > : $ keep_dims <nl> ) ; <nl> <nl> let results = ( outs <nl> - TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Complex128 , TF_Complex64 , TF_Qint32 , TF_Qint8 , TF_Quint8 , TF_Uint16 , TF_Uint32 , TF_Uint64 , TF_Uint8 ] > : $ output <nl> + TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Qint16 , TF_Qint32 , TF_Qint8 , TF_Quint16 , TF_Quint8 , TF_Uint16 , TF_Uint32 , TF_Uint64 , TF_Uint8 ] > : $ output <nl> ) ; <nl> <nl> TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> def TF_MaxPoolGradOp : TF_Op < " MaxPoolGrad " , [ NoSideEffect ] > { <nl> } ] ; <nl> } <nl> <nl> - def TF_MaximumOp : TF_Op < " Maximum " , [ NoSideEffect , ResultsBroadcastableShape , TF_SameOperandsAndResultElementTypeResolveRef ] > , <nl> - WithBroadcastableBinOpBuilder { <nl> - let summary = " Returns the max of x and y ( i . e . x > y ? x : y ) element - wise . " ; <nl> - <nl> - let description = [ { <nl> - * NOTE * : ` Maximum ` supports broadcasting . More about broadcasting <nl> - [ here ] ( http : / / docs . scipy . org / doc / numpy / user / basics . broadcasting . html ) <nl> - } ] ; <nl> - <nl> - let arguments = ( ins <nl> - TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , TF_Uint8 ] > : $ x , <nl> - TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , TF_Uint8 ] > : $ y <nl> - ) ; <nl> - <nl> - let results = ( outs <nl> - TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , TF_Uint8 ] > : $ z <nl> - ) ; <nl> - <nl> - TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> - } <nl> - <nl> def TF_MeanOp : TF_Op < " Mean " , [ NoSideEffect , TF_FoldOperandsTransposeInterface ] > { <nl> let summary = " Computes the mean of elements across dimensions of a tensor . " ; <nl> <nl> retained with length 1 . <nl> } ] ; <nl> <nl> let arguments = ( ins <nl> - TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Complex128 , TF_Complex64 , TF_Qint32 , TF_Qint8 , TF_Quint8 , TF_Uint16 , TF_Uint32 , TF_Uint64 , TF_Uint8 ] > : $ input , <nl> + TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Qint16 , TF_Qint32 , TF_Qint8 , TF_Quint16 , TF_Quint8 , TF_Uint16 , TF_Uint32 , TF_Uint64 , TF_Uint8 ] > : $ input , <nl> TF_I32OrI64Tensor : $ reduction_indices , <nl> <nl> DefaultValuedAttr < BoolAttr , " false " > : $ keep_dims <nl> ) ; <nl> <nl> let results = ( outs <nl> - TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Complex128 , TF_Complex64 , TF_Qint32 , TF_Qint8 , TF_Quint8 , TF_Uint16 , TF_Uint32 , TF_Uint64 , TF_Uint8 ] > : $ output <nl> + TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Qint16 , TF_Qint32 , TF_Qint8 , TF_Quint16 , TF_Quint8 , TF_Uint16 , TF_Uint32 , TF_Uint64 , TF_Uint8 ] > : $ output <nl> ) ; <nl> <nl> TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> tf . real ( input ) = = > [ - 2 . 25 , 3 . 25 ] <nl> TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr < 0 > ; <nl> } <nl> <nl> - def TF_RealDivOp : TF_Op < " RealDiv " , [ NoSideEffect , ResultsBroadcastableShape , TF_CwiseBinary ] > , <nl> - WithBroadcastableBinOpBuilder { <nl> - let summary = " Returns x / y element - wise for real types . " ; <nl> - <nl> - let description = [ { <nl> - If ` x ` and ` y ` are reals , this will return the floating - point division . <nl> - <nl> - * NOTE * : ` Div ` supports broadcasting . More about broadcasting <nl> - [ here ] ( http : / / docs . scipy . org / doc / numpy / user / basics . broadcasting . html ) <nl> - } ] ; <nl> - <nl> - let arguments = ( ins <nl> - TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Complex128 , TF_Complex64 , TF_Uint16 , TF_Uint8 ] > : $ x , <nl> - TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Complex128 , TF_Complex64 , TF_Uint16 , TF_Uint8 ] > : $ y <nl> - ) ; <nl> - <nl> - let results = ( outs <nl> - TensorOf < [ BF16 , F16 , F32 , F64 , I16 , I32 , I64 , I8 , TF_Complex128 , TF_Complex64 , TF_Uint16 , TF_Uint8 ] > : $ z <nl> - ) ; <nl> - <nl> - TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> - <nl> - let hasCanonicalizer = 1 ; <nl> - <nl> - let hasFolder = 1 ; <nl> - } <nl> - <nl> def TF_ReciprocalOp : TF_Op < " Reciprocal " , [ NoSideEffect , SameOperandsAndResultType ] > { <nl> let summary = " Computes the reciprocal of x element - wise . " ; <nl> <nl> On GPU , if an out of bound index is found , the index is ignored . <nl> ] ; <nl> } <nl> <nl> + def TF_TensorStridedSliceUpdateOp : TF_Op < " TensorStridedSliceUpdate " , [ NoSideEffect ] > { <nl> + let summary = " Assign ` value ` to the sliced l - value reference of ` input ` . " ; <nl> + <nl> + let description = [ { <nl> + The values of ` value ` are assigned to the positions in the tensor ` input ` that <nl> + are selected by the slice parameters . The slice parameters ` begin ` ` end ` <nl> + ` strides ` etc . work exactly as in ` StridedSlice ` . <nl> + <nl> + NOTE this op currently does not support broadcasting and so ` value ` ' s shape <nl> + must be exactly the shape produced by the slice of ` input ` . <nl> + } ] ; <nl> + <nl> + let arguments = ( ins <nl> + TF_Tensor : $ input , <nl> + TF_I32OrI64Tensor : $ begin , <nl> + TF_I32OrI64Tensor : $ end , <nl> + TF_I32OrI64Tensor : $ strides , <nl> + TF_Tensor : $ value , <nl> + <nl> + DefaultValuedAttr < I64Attr , " 0 " > : $ begin_mask , <nl> + DefaultValuedAttr < I64Attr , " 0 " > : $ end_mask , <nl> + DefaultValuedAttr < I64Attr , " 0 " > : $ ellipsis_mask , <nl> + DefaultValuedAttr < I64Attr , " 0 " > : $ new_axis_mask , <nl> + DefaultValuedAttr < I64Attr , " 0 " > : $ shrink_axis_mask <nl> + ) ; <nl> + <nl> + let results = ( outs <nl> + TF_Tensor : $ output <nl> + ) ; <nl> + <nl> + TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> + TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr < 1 > ; <nl> + } <nl> + <nl> def TF_TileOp : TF_Op < " Tile " , [ NoSideEffect ] > { <nl> let summary = " Constructs a tensor by tiling a given tensor . " ; <nl> <nl> create these operators . <nl> DefaultValuedAttr < I64ArrayAttr , " { 1 , 1 , 1 , 1 } " > : $ dilations , <nl> DefaultValuedAttr < BoolAttr , " true " > : $ use_cudnn_on_gpu , <nl> DefaultValuedAttr < StrArrayAttr , " { } " > : $ fused_ops , <nl> - DefaultValuedAttr < F32Attr , " 0 . 0001f " > : $ epsilon <nl> + DefaultValuedAttr < F32Attr , " 0 . 0001f " > : $ epsilon , <nl> + DefaultValuedAttr < F32Attr , " 0 . 2f " > : $ leakyrelu_alpha <nl> ) ; <nl> <nl> let results = ( outs <nl> mmm a / tensorflow / compiler / mlir / tensorflow / ir / tf_ops . td <nl> ppp b / tensorflow / compiler / mlir / tensorflow / ir / tf_ops . td <nl> must be a Tensor or a list / tuple of Tensors . <nl> TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr < 0 > ; <nl> } <nl> <nl> + def TF_AddV2Op : TF_Op < " AddV2 " , [ Commutative , NoSideEffect , ResultsBroadcastableShape , TF_CwiseBinary , TF_LayoutAgnostic , TF_SameOperandsAndResultElementTypeResolveRef ] > , <nl> + WithBroadcastableBinOpBuilder { <nl> + let summary = " Returns x + y element - wise . " ; <nl> + <nl> + let description = [ { <nl> + * NOTE * : ` Add ` supports broadcasting . ` AddN ` does not . More about broadcasting <nl> + [ here ] ( http : / / docs . scipy . org / doc / numpy / user / basics . broadcasting . html ) <nl> + } ] ; <nl> + <nl> + let arguments = ( ins <nl> + TensorOf < [ TF_Float , TF_SInt , TF_Complex , TF_Uint8 , TF_FloatRef , TF_SIntRef , TF_ComplexRef , TF_Uint8Ref ] > : $ x , <nl> + TensorOf < [ TF_Float , TF_SInt , TF_Complex , TF_Uint8 , TF_FloatRef , TF_SIntRef , TF_ComplexRef , TF_Uint8Ref ] > : $ y <nl> + ) ; <nl> + <nl> + let results = ( outs <nl> + TensorOf < [ TF_Float , TF_SInt , TF_Complex , TF_Uint8 , TF_FloatRef , TF_SIntRef , TF_ComplexRef , TF_Uint8Ref ] > : $ z <nl> + ) ; <nl> + <nl> + TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> + <nl> + let hasCanonicalizer = 1 ; <nl> + <nl> + let hasFolder = 1 ; <nl> + } <nl> + <nl> + def TF_DivNoNanOp : TF_Op < " DivNoNan " , [ NoSideEffect , ResultsBroadcastableShape , TF_SameOperandsAndResultElementTypeResolveRef ] > , <nl> + WithBroadcastableBinOpBuilder { <nl> + let summary = " Returns 0 if the denominator is zero . " ; <nl> + <nl> + let description = [ { <nl> + * NOTE * : ` DivNoNan ` supports broadcasting . More about broadcasting <nl> + [ here ] ( http : / / docs . scipy . org / doc / numpy / user / basics . broadcasting . html ) <nl> + } ] ; <nl> + <nl> + let arguments = ( ins <nl> + TensorOf < [ F16 , F32 , F64 , TF_Complex , TF_F16Ref , TF_F32Ref , TF_F64Ref , TF_ComplexRef ] > : $ x , <nl> + TensorOf < [ F16 , F32 , F64 , TF_Complex , TF_F16Ref , TF_F32Ref , TF_F64Ref , TF_ComplexRef ] > : $ y <nl> + ) ; <nl> + <nl> + let results = ( outs <nl> + TensorOf < [ F16 , F32 , F64 , TF_Complex , TF_F16Ref , TF_F32Ref , TF_F64Ref , TF_ComplexRef ] > : $ z <nl> + ) ; <nl> + <nl> + TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> + } <nl> + <nl> + def TF_MaximumOp : TF_Op < " Maximum " , [ NoSideEffect , ResultsBroadcastableShape , TF_SameOperandsAndResultElementTypeResolveRef ] > , <nl> + WithBroadcastableBinOpBuilder { <nl> + let summary = " Returns the max of x and y ( i . e . x > y ? x : y ) element - wise . " ; <nl> + <nl> + let description = [ { <nl> + * NOTE * : ` Maximum ` supports broadcasting . More about broadcasting <nl> + [ here ] ( http : / / docs . scipy . org / doc / numpy / user / basics . broadcasting . html ) <nl> + } ] ; <nl> + <nl> + let arguments = ( ins <nl> + TensorOf < [ TF_Float , I16 , I32 , I64 , TF_Uint8 , TF_FloatRef , TF_Int16Ref , TF_Int32Ref , TF_Int64Ref , TF_Uint8Ref ] > : $ x , <nl> + TensorOf < [ TF_Float , I16 , I32 , I64 , TF_Uint8 , TF_FloatRef , TF_Int16Ref , TF_Int32Ref , TF_Int64Ref , TF_Uint8Ref ] > : $ y <nl> + ) ; <nl> + <nl> + let results = ( outs <nl> + TensorOf < [ TF_Float , I16 , I32 , I64 , TF_Uint8 , TF_FloatRef , TF_Int16Ref , TF_Int32Ref , TF_Int64Ref , TF_Uint8Ref ] > : $ z <nl> + ) ; <nl> + <nl> + TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> + } <nl> + <nl> + def TF_RealDivOp : TF_Op < " RealDiv " , [ NoSideEffect , ResultsBroadcastableShape , TF_CwiseBinary ] > , <nl> + WithBroadcastableBinOpBuilder { <nl> + let summary = " Returns x / y element - wise for real types . " ; <nl> + <nl> + let description = [ { <nl> + If ` x ` and ` y ` are reals , this will return the floating - point division . <nl> + <nl> + * NOTE * : ` Div ` supports broadcasting . More about broadcasting <nl> + [ here ] ( http : / / docs . scipy . org / doc / numpy / user / basics . broadcasting . html ) <nl> + } ] ; <nl> + <nl> + let arguments = ( ins <nl> + TensorOf < [ TF_Float , TF_SInt , TF_Complex , TF_Uint16 , TF_Uint8 , TF_FloatRef , TF_SIntRef , TF_ComplexRef , TF_Uint16Ref , TF_Uint8Ref ] > : $ x , <nl> + TensorOf < [ TF_Float , TF_SInt , TF_Complex , TF_Uint16 , TF_Uint8 , TF_FloatRef , TF_SIntRef , TF_ComplexRef , TF_Uint16Ref , TF_Uint8Ref ] > : $ y <nl> + ) ; <nl> + <nl> + let results = ( outs <nl> + TensorOf < [ TF_Float , TF_SInt , TF_Complex , TF_Uint16 , TF_Uint8 , TF_FloatRef , TF_SIntRef , TF_ComplexRef , TF_Uint16Ref , TF_Uint8Ref ] > : $ z <nl> + ) ; <nl> + <nl> + TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr < 0 > ; <nl> + <nl> + let hasCanonicalizer = 1 ; <nl> + <nl> + let hasFolder = 1 ; <nl> + } <nl> + <nl> # endif / / TF_OPS <nl> mmm a / tensorflow / compiler / mlir / tensorflow / tests / constant - fold . mlir <nl> ppp b / tensorflow / compiler / mlir / tensorflow / tests / constant - fold . mlir <nl> func @ DontFoldTile ( ) - > ( tensor < 8x10000xi32 > ) { <nl> return % 3 : tensor < 8x10000xi32 > <nl> } <nl> / / LINT . ThenChange ( . . / transforms / constant_fold . cc : folding - policy ) <nl> + <nl> + func @ fold_conv ( ) - > tensor < 1x520x520x1xf32 > { <nl> + % 0 = " tf . Const " ( ) { value = dense < 0 . 111111112 > : tensor < 3x3x1x1xf32 > } : ( ) - > tensor < 3x3x1x1xf32 > <nl> + % 1 = " tf . Const " ( ) { value = dense < 1 . 000000e + 00 > : tensor < 1x520x520x1xf32 > } : ( ) - > tensor < 1x520x520x1xf32 > <nl> + % 2 = " tf . DepthwiseConv2dNative " ( % 1 , % 0 ) { data_format = " NHWC " , device = " " , dilations = [ 1 , 1 , 1 , 1 ] , explicit_paddings = [ ] , padding = " SAME " , strides = [ 1 , 1 , 1 , 1 ] } : ( tensor < 1x520x520x1xf32 > , tensor < 3x3x1x1xf32 > ) - > tensor < 1x520x520x1xf32 > <nl> + return % 2 : tensor < 1x520x520x1xf32 > <nl> + <nl> + / / CHECK : tf . Const <nl> + / / CHECK - NOT : tf . DepthwiseConv2dNative <nl> + } <nl> mmm a / tensorflow / compiler / mlir / tensorflow / tests / tf - ops . mlir <nl> ppp b / tensorflow / compiler / mlir / tensorflow / tests / tf - ops . mlir <nl> func @ testTileInvalidOutputShape ( % arg0 : tensor < 2x3xf32 > ) { <nl> % 0 = " tf . Tile " ( % arg0 , % cst ) : ( tensor < 2x3xf32 > , tensor < 2xi32 > ) - > tensor < 4x6xf32 > <nl> return <nl> } <nl> + <nl> + / / mmm - - <nl> + <nl> + / / Test reference variable support for some ops ( no errors expected ) <nl> + <nl> + / / CHECK - LABEL : @ testMaximumWithRef <nl> + func @ testMaximumWithRef ( % arg0 : tensor < ! tf . f32ref > , % arg1 : tensor < f32 > ) - > tensor < f32 > { <nl> + / / CHECK : tf . Maximum <nl> + % 0 = " tf . Maximum " ( % arg0 , % arg1 ) : ( tensor < ! tf . f32ref > , tensor < f32 > ) - > tensor < f32 > <nl> + return % 0 : tensor < f32 > <nl> + } <nl> + <nl> + / / CHECK - LABEL : @ testAddV2WithRef <nl> + func @ testAddV2WithRef ( % arg0 : tensor < ! tf . int16ref > , % arg1 : tensor < i16 > ) - > tensor < i16 > { <nl> + / / CHECK : tf . AddV2 <nl> + % 0 = " tf . AddV2 " ( % arg0 , % arg1 ) : ( tensor < ! tf . int16ref > , tensor < i16 > ) - > tensor < i16 > <nl> + return % 0 : tensor < i16 > <nl> + } <nl> + <nl> + / / CHECK - LABEL : @ testRealDivWithRef <nl> + func @ testRealDivWithRef ( % arg0 : tensor < f64 > , % arg1 : tensor < ! tf . f64ref > ) - > tensor < f64 > { <nl> + / / CHECK : tf . RealDivOp <nl> + % 0 = " tf . RealDivOp " ( % arg0 , % arg1 ) : ( tensor < f64 > , tensor < ! tf . f64ref > ) - > tensor < f64 > <nl> + return % 0 : tensor < f64 > <nl> + } <nl> + <nl> + / / CHECK - LABEL : @ testDivNoNanWithRef <nl> + func @ testDivNoNanWithRef ( % arg0 : tensor < f32 > , % arg1 : tensor < ! tf . f32ref > ) - > tensor < f32 > { <nl> + / / CHECK : tf . DivNoNanOp <nl> + % 0 = " tf . DivNoNanOp " ( % arg0 , % arg1 ) : ( tensor < f32 > , tensor < ! tf . f32ref > ) - > tensor < f32 > <nl> + return % 0 : tensor < f32 > <nl> + } <nl> mmm a / tensorflow / compiler / mlir / tensorflow / tests / tpu_extract_head_tail_outside_compilation . mlir <nl> ppp b / tensorflow / compiler / mlir / tensorflow / tests / tpu_extract_head_tail_outside_compilation . mlir <nl> module attributes { tf . versions = { producer = 888 : i32 } , tf . devices = [ " / job : wor <nl> func @ tail_single_outside_compiled_op ( ) { <nl> / / CHECK : % [ [ CLUSTER_OUT : . * ] ] = " tf_device . cluster " <nl> / / CHECK - NEXT : % [ [ A_OUT : . * ] ] = " tf . A " <nl> - / / CHECK - NEXT : " tf . C " <nl> + / / CHECK - NEXT : " tf . NoOp " <nl> / / CHECK - NEXT : tf_device . return % [ [ A_OUT ] ] <nl> / / CHECK - NEXT : { <nl> / / CHECK - DAG : num_cores_per_replica = 1 <nl> module attributes { tf . versions = { producer = 888 : i32 } , tf . devices = [ " / job : wor <nl> " tf_device . cluster " ( ) ( { <nl> % a = " tf . A " ( ) : ( ) - > tensor < i32 > <nl> " tf . B " ( % a ) { _xla_outside_compilation = " cluster1 " } : ( tensor < i32 > ) - > ( ) <nl> - " tf . C " ( ) : ( ) - > ( ) <nl> + " tf . NoOp " ( ) : ( ) - > ( ) <nl> tf_device . return <nl> } ) { num_cores_per_replica = 1 , step_marker_location = " " , padding_map = [ ] , topology = " " , device_assignment = [ ] } : ( ) - > ( ) <nl> return <nl> module attributes { tf . versions = { producer = 888 : i32 } , tf . devices = [ " / job : wor <nl> func @ tail_single_outside_compiled_op_user ( ) - > tensor < i32 > { <nl> / / CHECK : % [ [ CLUSTER_OUT : . * ] ] = " tf_device . cluster " <nl> / / CHECK - NEXT : % [ [ A_OUT : . * ] ] = " tf . A " <nl> - / / CHECK - NEXT : " tf . C " <nl> + / / CHECK - NEXT : " tf . NoOp " <nl> / / CHECK - NEXT : tf_device . return % [ [ A_OUT ] ] <nl> / / CHECK - NEXT : { <nl> / / CHECK - DAG : num_cores_per_replica = 1 <nl> module attributes { tf . versions = { producer = 888 : i32 } , tf . devices = [ " / job : wor <nl> % cluster = " tf_device . cluster " ( ) ( { <nl> % a = " tf . A " ( ) : ( ) - > tensor < i32 > <nl> % b = " tf . B " ( % a ) { _xla_outside_compilation = " cluster1 " } : ( tensor < i32 > ) - > tensor < i32 > <nl> - " tf . C " ( ) : ( ) - > ( ) <nl> + " tf . NoOp " ( ) : ( ) - > ( ) <nl> tf_device . return % b : tensor < i32 > <nl> } ) { num_cores_per_replica = 1 , step_marker_location = " " , padding_map = [ ] , topology = " " , device_assignment = [ ] } : ( ) - > tensor < i32 > <nl> / / CHECK : return % [ [ LAUNCH_OUT ] ] <nl> module attributes { tf . versions = { producer = 888 : i32 } , tf . devices = [ " / job : wor <nl> % b = " tf . B " ( ) : ( ) - > tensor < i32 > <nl> / / CHECK : % [ [ CLUSTER_OUT : . * ] ] : 2 = " tf_device . cluster " <nl> / / CHECK - NEXT : % [ [ C_OUT : . * ] ] = " tf . C " <nl> - / / CHECK - NEXT : % [ [ E_OUT : . * ] ] = " tf . E " <nl> + / / CHECK - NEXT : % [ [ E_OUT : . * ] ] = " tf . Const " <nl> / / CHECK - NEXT : tf_device . return % [ [ C_OUT ] ] , % [ [ E_OUT ] ] <nl> / / CHECK - NEXT : { <nl> / / CHECK - DAG : num_cores_per_replica = 1 <nl> module attributes { tf . versions = { producer = 888 : i32 } , tf . devices = [ " / job : wor <nl> % cluster : 5 = " tf_device . cluster " ( ) ( { <nl> % c = " tf . C " ( ) : ( ) - > tensor < i32 > <nl> % d = " tf . D " ( % c , % a ) { _xla_outside_compilation = " cluster1 " } : ( tensor < i32 > , tensor < i32 > ) - > tensor < i32 > <nl> - % e = " tf . E " ( ) : ( ) - > tensor < i32 > <nl> + % e = " tf . Const " ( ) { value = dense < 0 > : tensor < i32 > } : ( ) - > tensor < i32 > <nl> tf_device . return % a , % b , % c , % d , % e : tensor < i32 > , tensor < i32 > , tensor < i32 > , tensor < i32 > , tensor < i32 > <nl> } ) { num_cores_per_replica = 1 , step_marker_location = " " , padding_map = [ ] , topology = " " , device_assignment = [ ] } : ( ) - > ( tensor < i32 > , tensor < i32 > , tensor < i32 > , tensor < i32 > , tensor < i32 > ) <nl> / / CHECK : return % [ [ A_OUT ] ] , % [ [ B_OUT ] ] , % [ [ CLUSTER_OUT ] ] # 0 , % [ [ LAUNCH_OUT ] ] , % [ [ CLUSTER_OUT ] ] # 1 <nl> module attributes { tf . versions = { producer = 888 : i32 } , tf . devices = [ " / job : wor <nl> func @ head_tail_no_extraction_middle_outside_compiled_ops ( % arg0 : tensor < i32 > ) { <nl> / / CHECK - NOT : " tf_device . launch " <nl> / / CHECK : " tf_device . cluster " <nl> - / / CHECK - NEXT : " tf . A " <nl> + / / CHECK - NEXT : " tf . Identity " <nl> / / CHECK - NEXT : " tf . B " <nl> - / / CHECK - NEXT : " tf . C " <nl> + / / CHECK - NEXT : " tf . Identity " <nl> / / CHECK - NEXT : tf_device . return <nl> " tf_device . cluster " ( ) ( { <nl> - % a = " tf . A " ( % arg0 ) : ( tensor < i32 > ) - > tensor < i32 > <nl> + % a = " tf . Identity " ( % arg0 ) : ( tensor < i32 > ) - > tensor < i32 > <nl> % b = " tf . B " ( % a ) { _xla_outside_compilation = " cluster1 " } : ( tensor < i32 > ) - > tensor < i32 > <nl> - " tf . C " ( % b ) : ( tensor < i32 > ) - > ( ) <nl> + % c = " tf . Identity " ( % b ) : ( tensor < i32 > ) - > tensor < i32 > <nl> tf_device . return <nl> } ) { num_cores_per_replica = 1 , step_marker_location = " " , padding_map = [ ] , topology = " " , device_assignment = [ ] } : ( ) - > ( ) <nl> return <nl> module attributes { tf . versions = { producer = 888 : i32 } , tf . devices = [ " / job : wor <nl> / / CHECK : % [ [ CLUSTER_OUT : . * ] ] = " tf_device . cluster " <nl> / / CHECK - NEXT : % [ [ B_OUT : . * ] ] = " tf . B " <nl> / / CHECK - NEXT : % [ [ C_OUT : . * ] ] = " tf . C " ( % [ [ RI ] ] , % [ [ B_OUT ] ] ) <nl> - / / CHECK - NEXT : " tf . E " ( % [ [ C_OUT ] ] , % [ [ HEAD_LAUNCH_OUT ] ] ) <nl> + / / CHECK - NEXT : " tf . IdentityN " ( % [ [ C_OUT ] ] , % [ [ HEAD_LAUNCH_OUT ] ] ) <nl> / / CHECK - NEXT : tf_device . return % [ [ C_OUT ] ] <nl> / / CHECK - NEXT : { <nl> / / CHECK - DAG : num_cores_per_replica = 1 <nl> module attributes { tf . versions = { producer = 888 : i32 } , tf . devices = [ " / job : wor <nl> % b = " tf . B " ( ) : ( ) - > tensor < i32 > <nl> % c = " tf . C " ( % ri , % b ) { _xla_outside_compilation = " cluster1 " } : ( tensor < i32 > , tensor < i32 > ) - > tensor < i32 > <nl> % d = " tf . D " ( % a , % c , % ri ) { _xla_outside_compilation = " cluster1 " } : ( tensor < i32 > , tensor < i32 > , tensor < i32 > ) - > tensor < i32 > <nl> - % e = " tf . E " ( % c , % a ) : ( tensor < i32 > , tensor < i32 > ) - > tensor < i32 > <nl> + % e : 2 = " tf . IdentityN " ( % c , % a ) : ( tensor < i32 > , tensor < i32 > ) - > ( tensor < i32 > , tensor < i32 > ) <nl> tf_device . return <nl> } ) { num_cores_per_replica = 1 , step_marker_location = " " , padding_map = [ ] , topology = " " , device_assignment = [ ] } : ( ) - > ( ) <nl> tf_device . return <nl> } <nl> return <nl> } <nl> + <nl> + / / CHECK - LABEL : func @ side_effect_middle <nl> + func @ side_effect_middle ( ) { <nl> + / / CHECK : " tf_device . cluster " <nl> + / / CHECK - NEXT : " tf . A " <nl> + / / CHECK - NEXT : " tf . B " <nl> + / / CHECK - NEXT : " tf . C " <nl> + / / CHECK - NEXT : tf_device . return <nl> + " tf_device . cluster " ( ) ( { <nl> + " tf . A " ( ) : ( ) - > ( ) <nl> + " tf . B " ( ) { _xla_outside_compilation = " cluster1 " } : ( ) - > ( ) <nl> + " tf . C " ( ) : ( ) - > ( ) <nl> + tf_device . return <nl> + } ) { num_cores_per_replica = 1 , step_marker_location = " " , padding_map = [ ] , topology = " " , device_assignment = [ ] } : ( ) - > ( ) <nl> + return <nl> + } <nl> + <nl> + / / CHECK - LABEL : func @ side_effect_head_no_operand <nl> + func @ side_effect_head_no_operand ( ) { <nl> + / / CHECK : % [ [ HEAD_LAUNCH_OUT : . * ] ] = " tf_device . launch " ( ) <nl> + / / CHECK - NEXT : " tf . B " <nl> + / / CHECK - NEXT : % [ [ C_OUT : . * ] ] = " tf . C " <nl> + / / CHECK - NEXT : tf_device . return % [ [ C_OUT ] ] <nl> + / / CHECK - NEXT : device = " / job : worker / replica : 0 / task : 0 / device : CPU : 0 " <nl> + <nl> + / / CHECK : " tf_device . cluster " <nl> + / / CHECK - NEXT : " tf . Const " <nl> + / / CHECK - NEXT : " tf . D " ( % [ [ HEAD_LAUNCH_OUT ] ] ) <nl> + / / CHECK - NEXT : tf_device . return <nl> + <nl> + " tf_device . cluster " ( ) ( { <nl> + % cst = " tf . Const " ( ) { value = dense < 0 > : tensor < i32 > } : ( ) - > tensor < i32 > <nl> + " tf . B " ( ) { _xla_outside_compilation = " cluster1 " } : ( ) - > ( ) <nl> + % c = " tf . C " ( ) { _xla_outside_compilation = " cluster1 " } : ( ) - > tensor < i32 > <nl> + " tf . D " ( % c ) : ( tensor < i32 > ) - > ( ) <nl> + tf_device . return <nl> + } ) { num_cores_per_replica = 1 , step_marker_location = " " , padding_map = [ ] , topology = " " , device_assignment = [ ] } : ( ) - > ( ) <nl> + return <nl> + } <nl> + <nl> + / / CHECK - LABEL : func @ side_effect_tail_no_operand <nl> + func @ side_effect_tail_no_operand ( ) { <nl> + / / CHECK : % [ [ CLUSTER_OUT : . * ] ] = " tf_device . cluster " <nl> + / / CHECK - NEXT : % [ [ A_OUT : . * ] ] = " tf . A " <nl> + / / CHECK - NEXT : " tf . Const " <nl> + / / CHECK - NEXT : tf_device . return % [ [ A_OUT ] ] <nl> + <nl> + / / CHECK : " tf_device . launch " ( ) <nl> + / / CHECK - NEXT : " tf . B " ( % [ [ CLUSTER_OUT ] ] ) <nl> + / / CHECK - NEXT : " tf . C " <nl> + / / CHECK - NEXT : tf_device . return <nl> + / / CHECK - NEXT : device = " / job : worker / replica : 0 / task : 0 / device : CPU : 0 " <nl> + " tf_device . cluster " ( ) ( { <nl> + % a = " tf . A " ( ) : ( ) - > tensor < i32 > <nl> + " tf . B " ( % a ) { _xla_outside_compilation = " cluster1 " } : ( tensor < i32 > ) - > ( ) <nl> + " tf . C " ( ) { _xla_outside_compilation = " cluster1 " } : ( ) - > ( ) <nl> + % cst = " tf . Const " ( ) { value = dense < 0 > : tensor < i32 > } : ( ) - > tensor < i32 > <nl> + tf_device . return <nl> + } ) { num_cores_per_replica = 1 , step_marker_location = " " , padding_map = [ ] , topology = " " , device_assignment = [ ] } : ( ) - > ( ) <nl> + return <nl> + } <nl> } <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / tpu_extract_head_tail_outside_compilation . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / tpu_extract_head_tail_outside_compilation . cc <nl> limitations under the License . <nl> # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / Block . h " / / from @ llvm - project <nl> # include " mlir / IR / Builders . h " / / from @ llvm - project <nl> + # include " mlir / IR / Function . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> # include " mlir / IR / Value . h " / / from @ llvm - project <nl> # include " mlir / IR / Visitors . h " / / from @ llvm - project <nl> limitations under the License . <nl> # include " mlir / Pass / PassRegistry . h " / / from @ llvm - project <nl> # include " mlir / Support / LogicalResult . h " / / from @ llvm - project <nl> # include " mlir / Transforms / RegionUtils . h " / / from @ llvm - project <nl> + # include " tensorflow / compiler / mlir / tensorflow / analysis / side_effect_analysis . h " <nl> # include " tensorflow / compiler / mlir / tensorflow / ir / tf_device . h " <nl> # include " tensorflow / compiler / mlir / tensorflow / ir / tf_structs . h " <nl> # include " tensorflow / compiler / mlir / tensorflow / transforms / passes . h " <nl> tf_device : : LaunchOp CreateLaunchForBlock ( OpBuilder * builder , Operation * op , <nl> / / computation or other ops that can be extracted , and have no operands from <nl> / / other ops in the TPU computation that cannot be extracted . <nl> llvm : : SmallVector < Operation * , 4 > FindOutsideCompiledOpsAtHead ( <nl> + const TF : : SideEffectAnalysis & side_effect_analysis , <nl> tf_device : : ClusterOp cluster ) { <nl> + const auto & analysis = side_effect_analysis . GetAnalysisForFunc ( <nl> + cluster . getParentOfType < FuncOp > ( ) ) ; <nl> Region * cluster_region = & cluster . body ( ) ; <nl> llvm : : SmallSetVector < Operation * , 4 > head_outside_compiled_ops ; <nl> <nl> llvm : : SmallVector < Operation * , 4 > FindOutsideCompiledOpsAtHead ( <nl> if ( ! HasOutsideCompilationAttribute ( & cluster_op ) ) continue ; <nl> / / An outside compiled op can be extracted if its operands are not from <nl> / / other ops in the cluster that cannot be extracted . <nl> + <nl> + / / Check if the side effecting op right before this side effecting op , if <nl> + / / it is side effecting , can be head extracted . Because of op ordering due <nl> + / / to side effects , if this is not true , this op cannot be head extracted . <nl> + auto predecessors = analysis . DirectControlPredecessors ( & cluster_op ) ; <nl> + if ( ! predecessors . empty ( ) & & <nl> + ! head_outside_compiled_ops . contains ( predecessors . back ( ) ) ) <nl> + continue ; <nl> + <nl> auto walk_result = cluster_op . walk ( [ & ] ( Operation * op ) { <nl> for ( Value operand : op - > getOperands ( ) ) { <nl> Operation * operand_op = GetOpOfValue ( operand ) ; <nl> void CreateHeadComputation ( OpBuilder * builder , tf_device : : ClusterOp cluster , <nl> / / Extracts and move outside compiled ops that have no dependencies in the <nl> / / cluster to before the cluster . <nl> mlir : : LogicalResult LiftHeadOutsideCompiledOps ( <nl> - OpBuilder * builder , const mlir : : TF : : RuntimeDevices & devices , <nl> - tf_device : : ClusterOp cluster , std : : string * host_device , <nl> - bool * cluster_updated ) { <nl> + OpBuilder * builder , const TF : : SideEffectAnalysis & side_effect_analysis , <nl> + const mlir : : TF : : RuntimeDevices & devices , tf_device : : ClusterOp cluster , <nl> + std : : string * host_device , bool * cluster_updated ) { <nl> llvm : : SmallVector < Operation * , 4 > head_outside_compiled_ops = <nl> - FindOutsideCompiledOpsAtHead ( cluster ) ; <nl> + FindOutsideCompiledOpsAtHead ( side_effect_analysis , cluster ) ; <nl> if ( head_outside_compiled_ops . empty ( ) ) return success ( ) ; <nl> if ( failed ( tensorflow : : GetHostDeviceOutsideComputation ( devices , cluster , <nl> host_device ) ) ) <nl> mlir : : LogicalResult LiftHeadOutsideCompiledOps ( <nl> / / TPU computation or other ops that can be extracted , and have no results used <nl> / / by other ops in the TPU computation that cannot be extracted . <nl> void FindOutsideCompiledOpsAtTailAndClusterResults ( <nl> + const TF : : SideEffectAnalysis & side_effect_analysis , <nl> tf_device : : ClusterOp cluster , <nl> llvm : : SmallVectorImpl < Operation * > * tail_outside_compiled_ops , <nl> llvm : : SmallVectorImpl < Value > * cluster_results ) { <nl> + const auto & analysis = side_effect_analysis . GetAnalysisForFunc ( <nl> + cluster . getParentOfType < FuncOp > ( ) ) ; <nl> Region * cluster_region = & cluster . body ( ) ; <nl> llvm : : SmallSetVector < Operation * , 4 > tail_outside_compiled_ops_set ; <nl> Operation * terminator = cluster . GetBody ( ) . getTerminator ( ) ; <nl> void FindOutsideCompiledOpsAtTailAndClusterResults ( <nl> for ( Operation & cluster_op : cluster_ops ) { <nl> if ( ! HasOutsideCompilationAttribute ( & cluster_op ) ) continue ; <nl> <nl> + / / Check if the side effecting op right after this side effecting op , if <nl> + / / it is side effecting , can be tail extracted . Because of op ordering due <nl> + / / to side effects , if this is not true , this op cannot be tail extracted . <nl> + auto successors = analysis . DirectControlSuccessors ( <nl> + & cluster_op , [ & terminator ] ( Operation * op ) { return op ! = terminator ; } ) ; <nl> + if ( ! successors . empty ( ) & & <nl> + ! tail_outside_compiled_ops_set . contains ( successors . front ( ) ) ) <nl> + continue ; <nl> + <nl> llvm : : SmallVector < int , 4 > results_to_forward ; <nl> bool can_be_extracted = <nl> llvm : : all_of ( cluster_op . getUsers ( ) , [ & ] ( Operation * op ) { <nl> tf_device : : ClusterOp UpdateClusterResults ( <nl> / / Extracts and move outside compiled ops that do not create dependencies in the <nl> / / cluster to after the cluster . <nl> mlir : : LogicalResult LiftTailOutsideCompiledOps ( <nl> - OpBuilder * builder , const mlir : : TF : : RuntimeDevices & devices , <nl> - std : : string host_device , tf_device : : ClusterOp * cluster , <nl> - bool * cluster_updated ) { <nl> + OpBuilder * builder , const TF : : SideEffectAnalysis & side_effect_analysis , <nl> + const mlir : : TF : : RuntimeDevices & devices , std : : string host_device , <nl> + tf_device : : ClusterOp * cluster , bool * cluster_updated ) { <nl> llvm : : SmallVector < Operation * , 4 > tail_outside_compiled_ops ; <nl> llvm : : SmallVector < Value , 4 > cluster_results ; <nl> - FindOutsideCompiledOpsAtTailAndClusterResults ( <nl> - * cluster , & tail_outside_compiled_ops , & cluster_results ) ; <nl> + FindOutsideCompiledOpsAtTailAndClusterResults ( side_effect_analysis , * cluster , <nl> + & tail_outside_compiled_ops , <nl> + & cluster_results ) ; <nl> if ( tail_outside_compiled_ops . empty ( ) ) return success ( ) ; <nl> <nl> if ( host_device . empty ( ) ) <nl> struct TPUExtractHeadTailOutsideCompilation <nl> } ; <nl> <nl> void TPUExtractHeadTailOutsideCompilation : : runOnOperation ( ) { <nl> + auto & side_effect_analysis = getAnalysis < TF : : SideEffectAnalysis > ( ) ; <nl> / / Get runtime devices information from the closest parent module . <nl> auto module = getOperation ( ) ; <nl> mlir : : TF : : RuntimeDevices devices ; <nl> void TPUExtractHeadTailOutsideCompilation : : runOnOperation ( ) { <nl> for ( tf_device : : ClusterOp cluster : clusters ) { <nl> std : : string host_device ; <nl> bool cluster_updated = false ; <nl> - if ( failed ( LiftHeadOutsideCompiledOps ( & builder , devices , cluster , <nl> - & host_device , & cluster_updated ) ) | | <nl> - failed ( LiftTailOutsideCompiledOps ( & builder , devices , host_device , <nl> - & cluster , & cluster_updated ) ) ) <nl> + if ( failed ( LiftHeadOutsideCompiledOps ( & builder , side_effect_analysis , <nl> + devices , cluster , & host_device , <nl> + & cluster_updated ) ) | | <nl> + failed ( LiftTailOutsideCompiledOps ( & builder , side_effect_analysis , <nl> + devices , host_device , & cluster , <nl> + & cluster_updated ) ) ) <nl> return signalPassFailure ( ) ; <nl> if ( cluster_updated ) RemoveClusterAliasedOutputs ( & builder , cluster ) ; <nl> } <nl> mmm a / tensorflow / compiler / tf2xla / kernels / strided_slice_op . cc <nl> ppp b / tensorflow / compiler / tf2xla / kernels / strided_slice_op . cc <nl> class StridedSliceAssignOp : public XlaOpKernel { <nl> <nl> TensorShape lhs_shape ; <nl> xla : : XlaOp lhs ; <nl> - OP_REQUIRES_OK ( ctx , ctx - > ReadVariableInput ( 0 , dtype_ , & lhs_shape , & lhs ) ) ; <nl> + if ( ctx - > input_type ( 0 ) = = DT_RESOURCE ) { <nl> + OP_REQUIRES_OK ( ctx , ctx - > ReadVariableInput ( 0 , dtype_ , & lhs_shape , & lhs ) ) ; <nl> + } else { <nl> + lhs_shape = ctx - > InputShape ( 0 ) ; <nl> + lhs = ctx - > Input ( 0 ) ; <nl> + } <nl> <nl> const TensorShape rhs_shape = ctx - > InputShape ( 4 ) ; <nl> <nl> class StridedSliceAssignOp : public XlaOpKernel { <nl> <nl> lhs = xla : : DynamicUpdateSlice ( lhs , rhs , slice_begin ) ; <nl> <nl> - OP_REQUIRES_OK ( ctx , ctx - > AssignVariable ( 0 , dtype_ , lhs ) ) ; <nl> + if ( ctx - > input_type ( 0 ) = = DT_RESOURCE ) { <nl> + OP_REQUIRES_OK ( ctx , ctx - > AssignVariable ( 0 , dtype_ , lhs ) ) ; <nl> + } else { <nl> + ctx - > SetOutput ( 0 , lhs ) ; <nl> + } <nl> } <nl> <nl> private : <nl> REGISTER_XLA_OP ( Name ( " ResourceStridedSliceAssign " ) <nl> . CompileTimeConstantInput ( " strides " ) , <nl> StridedSliceAssignOp ) ; <nl> <nl> + REGISTER_XLA_OP ( Name ( " TensorStridedSliceUpdate " ) <nl> + . CompileTimeConstantInput ( " begin " ) <nl> + . CompileTimeConstantInput ( " end " ) <nl> + . CompileTimeConstantInput ( " strides " ) , <nl> + StridedSliceAssignOp ) ; <nl> + <nl> } / / namespace <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / compiler / xla / python / jax_jit . cc <nl> ppp b / tensorflow / compiler / xla / python / jax_jit . cc <nl> class CompiledFunction { <nl> / / notification for the executable and others will wait until notified . <nl> / / It ' s safe because the first thread will be holding the GIL while <nl> / / initializing the ` Notification ` . <nl> - absl : : optional < absl : : Notification > first_compilation_complete_ = <nl> - absl : : nullopt ; <nl> + / / <nl> + / / absl : : optional < absl : : Notification > is not supported <nl> + bool first_compilation_started_ = false ; <nl> + absl : : Notification first_compilation_complete_ ; <nl> absl : : optional < std : : exception > first_compilation_error_ = absl : : nullopt ; <nl> } ; <nl> <nl> Status ConvertArgsToBuffers ( bool jax_enable_x64 , xla : : PyClient & pyclient , <nl> <nl> ArgSignature sig ; <nl> sig . dtype = buffer - > shape ( ) . element_type ( ) ; <nl> + sig . weak_type = false ; <nl> sig . shape . assign ( buffer - > shape ( ) . dimensions ( ) . begin ( ) , <nl> buffer - > shape ( ) . dimensions ( ) . end ( ) ) ; <nl> arguments . signature . dynamic_args_signatures . push_back ( sig ) ; <nl> py : : object CompiledFunction : : Call ( py : : args args , py : : kwargs kwargs ) { <nl> if ( ! default_device_ ) { <nl> / / TODO ( jblespiau ) : This code will deadlock if a jitted function <nl> / / recursively calls itself . <nl> - if ( first_compilation_complete_ ) { <nl> - if ( ! first_compilation_complete_ - > HasBeenNotified ( ) ) { <nl> + if ( first_compilation_started_ ) { <nl> + if ( ! first_compilation_complete_ . HasBeenNotified ( ) ) { <nl> py : : gil_scoped_release gil_release ; <nl> - first_compilation_complete_ - > WaitForNotification ( ) ; <nl> + first_compilation_complete_ . WaitForNotification ( ) ; <nl> if ( first_compilation_error_ ) { <nl> throw first_compilation_error_ . value ( ) ; <nl> } <nl> } <nl> } else { <nl> - first_compilation_complete_ . emplace ( ) ; <nl> + first_compilation_started_ = true ; <nl> try { <nl> cache_miss_result = cache_miss_fun_ ( * args , * * kwargs ) ; <nl> } catch ( const std : : exception & e ) { <nl> first_compilation_error_ = e ; <nl> - first_compilation_complete_ - > Notify ( ) ; <nl> + first_compilation_complete_ . Notify ( ) ; <nl> throw ; <nl> } <nl> auto executable = py : : cast < std : : shared_ptr < xla : : PyExecutable > > ( <nl> py : : object CompiledFunction : : Call ( py : : args args , py : : kwargs kwargs ) { <nl> <nl> pyclient_ = executable - > client ( ) ; <nl> default_device_ = executable - > LocalDevices ( ) [ 0 ] . contents ; <nl> - first_compilation_complete_ - > Notify ( ) ; <nl> + first_compilation_complete_ . Notify ( ) ; <nl> } <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / python / xla . cc <nl> ppp b / tensorflow / compiler / xla / python / xla . cc <nl> class TraceMeWrapper : public tensorflow : : profiler : : TraceMeWrapper { <nl> void BuildProfilerSubmodule ( py : : module * m ) { <nl> py : : module profiler = <nl> m - > def_submodule ( " profiler " , " TensorFlow profiler integration " ) ; <nl> - py : : class_ < tensorflow : : ProfilerServer , <nl> - std : : unique_ptr < tensorflow : : ProfilerServer > > <nl> + py : : class_ < tensorflow : : profiler : : ProfilerServer , <nl> + std : : unique_ptr < tensorflow : : profiler : : ProfilerServer > > <nl> profiler_server_class ( profiler , " ProfilerServer " ) ; <nl> profiler . def ( <nl> " start_server " , <nl> - [ ] ( int port ) - > std : : unique_ptr < tensorflow : : ProfilerServer > { <nl> - auto server = absl : : make_unique < tensorflow : : ProfilerServer > ( ) ; <nl> + [ ] ( int port ) - > std : : unique_ptr < tensorflow : : profiler : : ProfilerServer > { <nl> + auto server = absl : : make_unique < tensorflow : : profiler : : ProfilerServer > ( ) ; <nl> server - > StartProfilerServer ( port ) ; <nl> return server ; <nl> } , <nl> mmm a / tensorflow / compiler / xla / service / BUILD <nl> ppp b / tensorflow / compiler / xla / service / BUILD <nl> cc_library ( <nl> " : hlo_dce " , <nl> " : hlo_pass " , <nl> " : hlo_pass_pipeline " , <nl> + " : hlo_verifier " , <nl> " : tuple_simplifier " , <nl> " / / tensorflow / compiler / xla : literal " , <nl> " / / tensorflow / compiler / xla : shape_util " , <nl> mmm a / tensorflow / compiler / xla / service / cholesky_expander . cc <nl> ppp b / tensorflow / compiler / xla / service / cholesky_expander . cc <nl> limitations under the License . <nl> <nl> namespace xla { <nl> <nl> - namespace { <nl> - <nl> / / The Cholesky – Banachiewicz algorithm . See <nl> / / https : / / en . wikipedia . org / wiki / Cholesky_decomposition # The_Cholesky – Banachiewicz_and_Cholesky – Crout_algorithms <nl> / / for a description . <nl> namespace { <nl> / / l = temp / l [ . . . , j , j ) * mask + l <nl> / / return l <nl> / / Returns a ( result , error ) pair . <nl> - std : : pair < XlaOp , XlaOp > CholeskyUnblocked ( <nl> + StatusOr < std : : pair < XlaOp , XlaOp > > CholeskyExpander : : CholeskyUnblocked ( <nl> XlaOp a , PrecisionConfig : : Precision precision ) { <nl> XlaBuilder * builder = a . builder ( ) ; <nl> - auto result = [ & ] ( ) - > StatusOr < std : : pair < XlaOp , XlaOp > > { <nl> - TF_ASSIGN_OR_RETURN ( Shape a_shape , builder - > GetShape ( a ) ) ; <nl> - const int n_dims = a_shape . rank ( ) ; <nl> - const int64 n = ShapeUtil : : GetDimension ( a_shape , - 1 ) ; <nl> - auto major_dims = AsInt64Slice ( a_shape . dimensions ( ) ) <nl> - . subspan ( <nl> - / * pos = * / 0 , <nl> - / * len = * / n_dims - 2 ) ; <nl> - <nl> - auto matrix_dims = AsInt64Slice ( a_shape . dimensions ( ) ) <nl> - . subspan ( <nl> - / * pos = * / 0 , <nl> - / * len = * / n_dims ) ; <nl> - <nl> - XlaOp l = ZerosLike ( a ) ; <nl> - <nl> - / / Construct the for loop body to iterate over rows . <nl> - auto body_fn = <nl> - [ & ] ( XlaOp i , absl : : Span < const XlaOp > loop_vars , <nl> - XlaBuilder * body_builder ) - > StatusOr < std : : vector < XlaOp > > { <nl> - std : : vector < int64 > row_shape_dims ( major_dims . begin ( ) , major_dims . end ( ) ) ; <nl> - std : : vector < int64 > col_shape_dims ( major_dims . begin ( ) , major_dims . end ( ) ) ; <nl> - auto body_a = loop_vars [ 0 ] ; <nl> - auto body_l = loop_vars [ 1 ] ; <nl> - auto seen_error = loop_vars [ 2 ] ; <nl> - auto iota_row = Iota ( body_builder , ShapeUtil : : MakeShape ( S32 , matrix_dims ) , <nl> - n_dims - 1 ) ; <nl> - auto iota_col = Iota ( body_builder , ShapeUtil : : MakeShape ( S32 , matrix_dims ) , <nl> - n_dims - 2 ) ; <nl> - <nl> - auto mask_pred = Ge ( iota_col , iota_row ) ; <nl> - mask_pred = And ( mask_pred , Eq ( iota_row , i ) ) ; <nl> - auto mask_zeros = <nl> - Zeros ( body_builder , <nl> - ShapeUtil : : MakeShape ( a_shape . element_type ( ) , matrix_dims ) ) ; <nl> - / / L * L . T , This matrix has of a lot of multiplying with zero <nl> - / / ( namely , L [ : , j : ] = 0 ) and redundant computation , but it is faster <nl> - / / than slice . <nl> - auto l_square = BatchDot ( body_l , false , body_l , true , precision ) ; <nl> - <nl> - / / A - L * L . T <nl> - l_square = body_a - l_square ; <nl> - auto l_ii = DynamicSliceInMinorDims ( l_square , { i , i } , { 1 , 1 } ) ; <nl> - l_ii = Sqrt ( l_ii ) ; <nl> - / / L = ( A - L * L . T ) / l_ii * mask + L <nl> - body_l = Select ( mask_pred , l_square / l_ii , mask_zeros ) + body_l ; <nl> - <nl> - seen_error = <nl> - Or ( seen_error , Any ( Or ( Le ( l_ii , ZerosLike ( l_ii ) ) , IsNan ( l_ii ) ) ) ) ; <nl> - <nl> - return std : : vector < XlaOp > { body_a , body_l , seen_error } ; <nl> - } ; <nl> - <nl> - TF_ASSIGN_OR_RETURN ( <nl> - auto cholesky_while , <nl> - ForEachIndex ( n , S32 , body_fn , { a , l , ConstantR0 < bool > ( builder , false ) } , <nl> - " unblocked " , builder ) ) ; <nl> - <nl> - return std : : make_pair ( cholesky_while [ 1 ] , cholesky_while [ 2 ] ) ; <nl> - } ( ) ; <nl> - if ( ! result . ok ( ) ) { <nl> - XlaOp error = builder - > ReportError ( result . status ( ) ) ; <nl> - return { error , error } ; <nl> - } <nl> - return result . ValueOrDie ( ) ; <nl> + TF_ASSIGN_OR_RETURN ( Shape a_shape , builder - > GetShape ( a ) ) ; <nl> + const int n_dims = a_shape . rank ( ) ; <nl> + const int64 n = ShapeUtil : : GetDimension ( a_shape , - 1 ) ; <nl> + auto major_dims = AsInt64Slice ( a_shape . dimensions ( ) ) <nl> + . subspan ( <nl> + / * pos = * / 0 , <nl> + / * len = * / n_dims - 2 ) ; <nl> + <nl> + auto matrix_dims = AsInt64Slice ( a_shape . dimensions ( ) ) <nl> + . subspan ( <nl> + / * pos = * / 0 , <nl> + / * len = * / n_dims ) ; <nl> + <nl> + XlaOp l = ZerosLike ( a ) ; <nl> + <nl> + / / Construct the for loop body to iterate over rows . <nl> + auto body_fn = [ & ] ( XlaOp i , absl : : Span < const XlaOp > loop_vars , <nl> + XlaBuilder * body_builder ) - > StatusOr < std : : vector < XlaOp > > { <nl> + std : : vector < int64 > row_shape_dims ( major_dims . begin ( ) , major_dims . end ( ) ) ; <nl> + std : : vector < int64 > col_shape_dims ( major_dims . begin ( ) , major_dims . end ( ) ) ; <nl> + auto body_a = loop_vars [ 0 ] ; <nl> + auto body_l = loop_vars [ 1 ] ; <nl> + auto seen_error = loop_vars [ 2 ] ; <nl> + auto iota_row = <nl> + Iota ( body_builder , ShapeUtil : : MakeShape ( S32 , matrix_dims ) , n_dims - 1 ) ; <nl> + auto iota_col = <nl> + Iota ( body_builder , ShapeUtil : : MakeShape ( S32 , matrix_dims ) , n_dims - 2 ) ; <nl> + <nl> + auto mask_pred = Ge ( iota_col , iota_row ) ; <nl> + mask_pred = And ( mask_pred , Eq ( iota_row , i ) ) ; <nl> + auto mask_zeros = <nl> + Zeros ( body_builder , <nl> + ShapeUtil : : MakeShape ( a_shape . element_type ( ) , matrix_dims ) ) ; <nl> + / / L * L . T , This matrix has of a lot of multiplying with zero <nl> + / / ( namely , L [ : , j : ] = 0 ) and redundant computation , but it is faster <nl> + / / than slice . <nl> + auto l_square = BatchDot ( body_l , false , body_l , true , precision ) ; <nl> + <nl> + / / A - L * L . T <nl> + l_square = body_a - l_square ; <nl> + auto l_ii = DynamicSliceInMinorDims ( l_square , { i , i } , { 1 , 1 } ) ; <nl> + l_ii = Sqrt ( l_ii ) ; <nl> + / / L = ( A - L * L . T ) / l_ii * mask + L <nl> + body_l = Select ( mask_pred , l_square / l_ii , mask_zeros ) + body_l ; <nl> + <nl> + seen_error = <nl> + Or ( seen_error , Any ( Or ( Le ( l_ii , ZerosLike ( l_ii ) ) , IsNan ( l_ii ) ) ) ) ; <nl> + <nl> + return std : : vector < XlaOp > { body_a , body_l , seen_error } ; <nl> + } ; <nl> + <nl> + TF_ASSIGN_OR_RETURN ( <nl> + auto cholesky_while , <nl> + ForEachIndex ( n , S32 , body_fn , { a , l , ConstantR0 < bool > ( builder , false ) } , <nl> + " unblocked " , builder ) ) ; <nl> + <nl> + return std : : make_pair ( cholesky_while [ 1 ] , cholesky_while [ 2 ] ) ; <nl> } <nl> <nl> - XlaOp BuildCholesky ( XlaOp a , int64 block_size , <nl> - PrecisionConfig : : Precision precision ) { <nl> + XlaOp CholeskyExpander : : BuildCholesky ( XlaOp a , int64 block_size , <nl> + PrecisionConfig : : Precision precision ) { <nl> XlaBuilder * builder = a . builder ( ) ; <nl> return builder - > ReportErrorOrReturn ( [ & ] ( ) - > StatusOr < XlaOp > { <nl> TF_ASSIGN_OR_RETURN ( Shape a_shape , builder - > GetShape ( a ) ) ; <nl> XlaOp BuildCholesky ( XlaOp a , int64 block_size , <nl> " block_size argument to Cholesky must be > = 1 ; got % d " , block_size ) ; <nl> } <nl> <nl> + if ( n = = 1 ) { <nl> + return Sqrt ( a ) ; <nl> + } <nl> + <nl> / / Blocked left - looking Cholesky factorization . <nl> / / Algorithm 1 from <nl> / / Haidar , Azzam , et al . " High - performance Cholesky factorization for <nl> XlaOp BuildCholesky ( XlaOp a , int64 block_size , <nl> XlaOp seen_error = ConstantR0 < bool > ( builder , false ) ; <nl> for ( int64 i = 0 ; i < n ; i + = block_size ) { <nl> int64 k = std : : min ( block_size , n - i ) ; <nl> + auto panel = SliceInMinorDims ( a , { i , i } , { n , i + k } ) ; <nl> if ( i > 0 ) { <nl> / / TODO ( phawkins ) : consider implementing SYRK for the diagonal part of <nl> / / the panel . <nl> XlaOp BuildCholesky ( XlaOp a , int64 block_size , <nl> auto lhs = SliceInMinorDims ( l , { i , 0 } , { n , i } ) ; <nl> auto rhs = SliceInMinorDims ( l , { i , 0 } , { i + k , i } ) ; <nl> auto delta = BatchDot ( lhs , false , rhs , true , precision ) ; <nl> - auto before = SliceInMinorDims ( a , { i , i } , { n , i + k } ) ; <nl> - a = UpdateSliceInMinorDims ( a , before - delta , { i , i } ) ; <nl> + panel = panel - delta ; <nl> } <nl> <nl> / / l [ i : i + k , i : i + k ] = cholesky_unblocked ( a [ i : i + k , i : i + k ] ) <nl> - auto x = SliceInMinorDims ( a , { i , i } , { i + k , i + k } ) ; <nl> + auto x = SliceInMinorDims ( panel , { 0 , 0 } , { k , k } ) ; <nl> XlaOp factorized ; <nl> XlaOp factorized_error ; <nl> - std : : tie ( factorized , factorized_error ) = CholeskyUnblocked ( x , precision ) ; <nl> + TF_ASSIGN_OR_RETURN ( auto tile_output , CholeskyUnblocked ( x , precision ) ) ; <nl> + std : : tie ( factorized , factorized_error ) = tile_output ; <nl> seen_error = Or ( seen_error , factorized_error ) ; <nl> l = UpdateSliceInMinorDims ( l , factorized , { i , i } ) ; <nl> <nl> if ( i + k < n ) { <nl> / / l [ i + k : , i : i + k ] = <nl> / / trsm_right_transpose ( l [ i : i + k , i : i + k ] , a [ i + k : , i : i + k ] ) <nl> - auto panel = SliceInMinorDims ( a , { i + k , i } , { n , i + k } ) ; <nl> - auto update = <nl> - TriangularSolve ( factorized , panel , <nl> - / * left_side = * / false , <nl> - / * lower = * / true , <nl> - / * unit_diagonal = * / false , <nl> - / * transpose_a = * / TriangularSolveOptions : : TRANSPOSE ) ; <nl> + auto update = TriangularSolve ( <nl> + factorized , SliceInMinorDims ( panel , { k , 0 } , { n - i , k } ) , <nl> + / * left_side = * / false , <nl> + / * lower = * / true , <nl> + / * unit_diagonal = * / false , <nl> + / * transpose_a = * / TriangularSolveOptions : : TRANSPOSE ) ; <nl> l = UpdateSliceInMinorDims ( l , update , { i + k , i } ) ; <nl> } <nl> } <nl> XlaOp BuildCholesky ( XlaOp a , int64 block_size , <nl> } ) ; <nl> } <nl> <nl> - } / / namespace <nl> - <nl> bool CholeskyExpander : : InstructionMatchesPattern ( HloInstruction * instruction ) { <nl> return instruction - > opcode ( ) = = HloOpcode : : kCholesky ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / cholesky_expander . h <nl> ppp b / tensorflow / compiler / xla / service / cholesky_expander . h <nl> limitations under the License . <nl> # define TENSORFLOW_COMPILER_XLA_SERVICE_CHOLESKY_EXPANDER_H_ <nl> <nl> # include " absl / container / flat_hash_map . h " <nl> + # include " tensorflow / compiler / xla / client / xla_builder . h " <nl> # include " tensorflow / compiler / xla / service / op_expander_pass . h " <nl> <nl> namespace xla { <nl> class CholeskyExpander : public OpExpanderPass { <nl> StatusOr < HloInstruction * > ExpandInstruction ( <nl> HloInstruction * instruction ) override ; <nl> <nl> + virtual StatusOr < std : : pair < XlaOp , XlaOp > > CholeskyUnblocked ( <nl> + XlaOp a , PrecisionConfig : : Precision precision ) ; <nl> + <nl> private : <nl> + XlaOp BuildCholesky ( XlaOp a , int64 block_size , <nl> + PrecisionConfig : : Precision precision ) ; <nl> + <nl> / / Mapping from op signatures to existing computations . <nl> absl : : flat_hash_map < string , HloComputation * > computation_cache_ ; <nl> } ; <nl> mmm a / tensorflow / compiler / xla / service / conditional_code_motion . cc <nl> ppp b / tensorflow / compiler / xla / service / conditional_code_motion . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / service / hlo_instructions . h " <nl> # include " tensorflow / compiler / xla / service / hlo_opcode . h " <nl> # include " tensorflow / compiler / xla / service / hlo_pass_pipeline . h " <nl> + # include " tensorflow / compiler / xla / service / hlo_verifier . h " <nl> # include " tensorflow / compiler / xla / service / tuple_simplifier . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> int64 ReusesCarriedBy ( HloInstruction * op , HloInstruction * user ) { <nl> case HloOpcode : : kConstant : <nl> case HloOpcode : : kGetTupleElement : <nl> return 0 ; <nl> + case HloOpcode : : kConditional : <nl> + return 10 ; <nl> default : <nl> / / Assume fusion will not happen anyway if user count > 1 ) <nl> if ( op - > user_count ( ) > 1 ) { <nl> StatusOr < bool > ConditionalCodeMotion : : MoveInstructionIn ( <nl> / / to replace the conditional directly in the new computation . <nl> b_opd_use . mutable_operands ( ) . push_back ( conditional ) ; <nl> } <nl> + <nl> HloInstruction * new_root = <nl> computation - > AddInstruction ( HloInstruction : : CreateTuple ( operands ) ) ; <nl> VLOG ( 2 ) < < " setting new root : " < < new_root - > ToString ( ) < < " \ n " ; <nl> StatusOr < bool > ConditionalCodeMotion : : MoveInstructionIn ( <nl> } <nl> VLOG ( 2 ) < < " new branch computation : " < < computation - > ToString ( ) < < " \ n " ; <nl> } <nl> + / / Update get tuple element index of the conditional . <nl> + if ( use_index ! = - 1 ) { <nl> + for ( auto * user : conditional - > users ( ) ) { <nl> + if ( user - > opcode ( ) = = HloOpcode : : kGetTupleElement & & <nl> + user - > tuple_index ( ) > use_index ) { <nl> + user - > set_tuple_index ( user - > tuple_index ( ) - 1 ) ; <nl> + } <nl> + } <nl> + } <nl> hoisted_instructions [ conditional ] = b_old_root ; <nl> int64 cp_start = 0 ; <nl> if ( use_index > = 0 ) { <nl> class GroupConnectedBoundaries { <nl> : conditional_ ( conditional ) , <nl> conditional_parent_ ( conditional - > parent ( ) ) , <nl> is_layout_sensitive_ ( is_layout_sensitive ) { } <nl> - / / Returns true if ` instruction ` is worth hoisting out . <nl> + / / Returns true if ` instruction ` is worth hoisting . <nl> bool WorthHoisting ( HloInstruction * instruction ) { <nl> / / This is needed for the " moving - in " transformation , to prevent the root <nl> / / of the parent computation ( which contains the conditional ) to be moved <nl> class GroupConnectedBoundaries { <nl> case HloOpcode : : kAllReduce : <nl> case HloOpcode : : kAdd : <nl> case HloOpcode : : kPower : <nl> + case HloOpcode : : kCopy : <nl> case HloOpcode : : kConstant : <nl> case HloOpcode : : kSubtract : <nl> case HloOpcode : : kMultiply : <nl> StatusOr < bool > ConditionalCodeMotion : : Run ( HloModule * module ) { <nl> subpipeline . AddPass < HloDCE > ( ) ; <nl> subpipeline . AddPass < TupleSimplifier > ( ) ; <nl> subpipeline . AddPass < HloDCE > ( ) ; <nl> + subpipeline . AddPass < HloVerifier > ( false , true ) ; <nl> TF_ASSIGN_OR_RETURN ( bool cleanup_changed , subpipeline . Run ( module ) ) ; <nl> changed | = cleanup_changed ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / conditional_code_motion_test . cc <nl> ppp b / tensorflow / compiler / xla / service / conditional_code_motion_test . cc <nl> ENTRY main { <nl> EXPECT_THAT ( root , AllOf ( op : : GetTupleElement ( op : : Conditional ( ) ) ) ) ; <nl> } <nl> <nl> + TEST_F ( ConditionalCodeMotionTest , MoveCopyInBranch ) { <nl> + absl : : string_view hlo_string = <nl> + R " ( <nl> + HloModule RemoveIdenticalInstruction <nl> + <nl> + branch1 { <nl> + arg_tuple . 1 = ( s32 [ ] , f32 [ 10 , 3 ] { 0 , 1 } ) parameter ( 0 ) <nl> + constant . 1 = s32 [ ] constant ( 4 ) <nl> + get - tuple - element . 1 = s32 [ ] get - tuple - element ( arg_tuple . 1 ) , index = 0 <nl> + add . 1 = s32 [ ] add ( get - tuple - element . 1 , constant . 1 ) <nl> + get - tuple - element . 2 = f32 [ 10 , 3 ] { 0 , 1 } get - tuple - element ( arg_tuple . 1 ) , index = 1 <nl> + slice . 1 = f32 [ 4 , 3 ] { 0 , 1 } slice ( get - tuple - element . 2 ) , <nl> + slice = { [ 0 : 4 : 1 ] , [ 0 : 3 : 1 ] } <nl> + constant . 2 = f32 [ ] constant ( 0 . 0 ) <nl> + ROOT tuple . 1 = ( f32 [ 4 , 3 ] { 0 , 1 } , s32 [ ] , f32 [ ] ) tuple ( slice . 1 , add . 1 , constant . 2 ) <nl> + } <nl> + <nl> + branch2 { <nl> + arg_tuple . 2 = ( s32 [ ] , f32 [ 4 , 3 ] { 1 , 0 } ) parameter ( 0 ) <nl> + get - tuple - element . 3 = s32 [ ] get - tuple - element ( arg_tuple . 2 ) , index = 0 <nl> + copy . 1 = s32 [ ] copy ( get - tuple - element . 3 ) <nl> + get - tuple - element . 4 = f32 [ 4 , 3 ] { 1 , 0 } get - tuple - element ( arg_tuple . 2 ) , index = 1 <nl> + copy . 2 = f32 [ 4 , 3 ] { 0 , 1 } copy ( get - tuple - element . 4 ) <nl> + constant . 2 = f32 [ ] constant ( 0 . 0 ) <nl> + ROOT tuple . 2 = ( f32 [ 4 , 3 ] { 0 , 1 } , s32 [ ] , f32 [ ] ) tuple ( copy . 2 , copy . 1 , constant . 2 ) <nl> + } <nl> + <nl> + ENTRY main { <nl> + pred . 1 = pred [ ] parameter ( 0 ) <nl> + tuple . 3 = ( s32 [ ] , f32 [ 10 , 3 ] { 0 , 1 } ) parameter ( 1 ) <nl> + tuple . 4 = ( s32 [ ] , f32 [ 4 , 3 ] { 1 , 0 } ) parameter ( 2 ) <nl> + conditional = ( f32 [ 4 , 3 ] { 0 , 1 } , s32 [ ] , f32 [ ] ) <nl> + conditional ( pred . 1 , tuple . 3 , tuple . 4 ) , true_computation = branch1 , <nl> + false_computation = branch2 <nl> + get - zero - index = f32 [ 4 , 3 ] { 0 , 1 } get - tuple - element ( conditional ) , index = 0 <nl> + get - first - index = s32 [ ] get - tuple - element ( conditional ) , index = 1 <nl> + get - second - index = f32 [ ] get - tuple - element ( conditional ) , index = 2 <nl> + copy . 3 = f32 [ 4 , 3 ] { 1 , 0 } copy ( get - zero - index ) <nl> + ROOT tuple . 5 = ( f32 [ 4 , 3 ] { 0 , 1 } , s32 [ ] , f32 [ ] ) tuple ( copy . 3 , get - first - index , <nl> + get - second - index ) <nl> + } <nl> + ) " ; <nl> + auto module = ParseAndReturnVerifiedModule ( hlo_string ) . ValueOrDie ( ) ; <nl> + ConditionalCodeMotion pass ( true , true ) ; <nl> + ASSERT_TRUE ( pass . Run ( & * module ) . ValueOrDie ( ) ) ; <nl> + VLOG ( 1 ) < < module - > ToString ( ) ; <nl> + <nl> + const HloInstruction * conditional = <nl> + FindInstruction ( module . get ( ) , " conditional " ) ; <nl> + const HloComputation * on_true = conditional - > branch_computation ( 0 ) ; <nl> + ASSERT_EQ ( on_true - > instruction_count ( ) , 9 ) ; <nl> + const HloComputation * on_false = conditional - > branch_computation ( 1 ) ; <nl> + ASSERT_EQ ( on_false - > instruction_count ( ) , 8 ) ; <nl> + HloInstruction * root = module - > entry_computation ( ) - > root_instruction ( ) ; <nl> + EXPECT_THAT ( root , <nl> + AllOf ( op : : Tuple ( op : : GetTupleElement ( op : : Conditional ( ) , 2 ) , <nl> + op : : GetTupleElement ( op : : Conditional ( ) , 0 ) , <nl> + op : : GetTupleElement ( op : : Conditional ( ) , 1 ) ) ) ) ; <nl> + } <nl> + <nl> } / / namespace conditional_opt <nl> <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / service / hlo_sharding_util . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_sharding_util . cc <nl> HloSharding ScatterIndexSharding ( const HloSharding & data_sharding , <nl> if ( index_tile_assignment_dims . size ( ) < hlo - > operand ( 1 ) - > shape ( ) . rank ( ) ) { <nl> index_tile_assignment_dims . push_back ( 1 ) ; <nl> } <nl> + if ( data_sharding . ReplicateOnLastTileDim ( ) ) { <nl> + index_tile_assignment_dims . push_back ( <nl> + data_sharding . tile_assignment ( ) . dimensions ( ) . back ( ) ) ; <nl> + } <nl> Array < int64 > new_tile_assignment = data_sharding . tile_assignment ( ) ; <nl> if ( new_tile_assignment . num_elements ( ) ! = <nl> Product ( index_tile_assignment_dims ) ) { <nl> return HloSharding : : Replicate ( ) ; <nl> } <nl> new_tile_assignment . Reshape ( index_tile_assignment_dims ) ; <nl> - return HloSharding : : Tile ( new_tile_assignment ) ; <nl> + return data_sharding . ReplicateOnLastTileDim ( ) <nl> + ? HloSharding : : PartialTile ( new_tile_assignment ) <nl> + : HloSharding : : Tile ( new_tile_assignment ) ; <nl> } <nl> <nl> HloSharding ScatterDataSharding ( const HloSharding & index_sharding , <nl> HloSharding ScatterDataSharding ( const HloSharding & index_sharding , <nl> index_dim + + ; <nl> } <nl> } <nl> + if ( index_sharding . ReplicateOnLastTileDim ( ) ) { <nl> + data_tile_assignment_dims . push_back ( <nl> + index_sharding . tile_assignment ( ) . dimensions ( ) . back ( ) ) ; <nl> + } <nl> Array < int64 > new_tile_assignment = index_sharding . tile_assignment ( ) ; <nl> if ( new_tile_assignment . num_elements ( ) ! = <nl> Product ( data_tile_assignment_dims ) ) { <nl> return HloSharding : : Replicate ( ) ; <nl> } <nl> new_tile_assignment . Reshape ( data_tile_assignment_dims ) ; <nl> - return HloSharding : : Tile ( new_tile_assignment ) ; <nl> + return index_sharding . ReplicateOnLastTileDim ( ) <nl> + ? HloSharding : : PartialTile ( new_tile_assignment ) <nl> + : HloSharding : : Tile ( new_tile_assignment ) ; <nl> } <nl> <nl> HloSharding ScatterEffectiveIndexSharding ( const HloSharding & index_sharding , <nl> mmm a / tensorflow / compiler / xla / service / sharding_propagation_test . cc <nl> ppp b / tensorflow / compiler / xla / service / sharding_propagation_test . cc <nl> ENTRY entry { <nl> op : : Sharding ( " { devices = [ 1 , 2 ] 0 , 1 } " ) ) ; <nl> } <nl> <nl> + TEST_F ( ShardingPropagationTest , DataOperandToScatter_PartialReplicate ) { <nl> + const char * const hlo_string = R " ( <nl> + HloModule module <nl> + <nl> + add ( lhs : f32 [ ] , rhs : f32 [ ] ) - > f32 [ ] { <nl> + lhs = f32 [ ] parameter ( 0 ) <nl> + rhs = f32 [ ] parameter ( 1 ) <nl> + ROOT sum = f32 [ ] add ( lhs , rhs ) <nl> + } <nl> + <nl> + ENTRY entry { <nl> + % input = f32 [ 2 , 9 ] parameter ( 0 ) , <nl> + sharding = { devices = [ 1 , 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } <nl> + % indices = s32 [ 3 ] parameter ( 1 ) , sharding = { replicated } <nl> + % updates = f32 [ 3 , 9 ] parameter ( 2 ) , sharding = { replicated } <nl> + % scatter = f32 [ 2 , 9 ] scatter ( % input , % indices , % updates ) , <nl> + to_apply = add , <nl> + update_window_dims = { 1 } , <nl> + inserted_window_dims = { 0 } , <nl> + scatter_dims_to_operand_dims = { 0 } , <nl> + index_vector_dim = 1 <nl> + ROOT % copy = f32 [ 2 , 9 ] copy ( % scatter ) <nl> + } ) " ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto module , <nl> + ParseAndReturnVerifiedModule ( hlo_string ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( <nl> + bool changed , ShardingPropagation ( / * is_spmd = * / true ) . Run ( module . get ( ) ) ) ; <nl> + EXPECT_TRUE ( changed ) ; <nl> + EXPECT_THAT ( FindInstruction ( module . get ( ) , " scatter " ) , <nl> + op : : Sharding ( " { devices = [ 1 , 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } " ) ) ; <nl> + } <nl> + <nl> TEST_F ( ShardingPropagationTest , UpdateOperandToScatter ) { <nl> const char * const hlo_string = R " ( <nl> HloModule module <nl> ENTRY entry { <nl> op : : Sharding ( " { devices = [ 1 , 2 ] 0 , 1 } " ) ) ; <nl> } <nl> <nl> + TEST_F ( ShardingPropagationTest , UpdateOperandToScatter_PartialReplicate ) { <nl> + const char * const hlo_string = R " ( <nl> + HloModule module <nl> + <nl> + add ( lhs : f32 [ ] , rhs : f32 [ ] ) - > f32 [ ] { <nl> + lhs = f32 [ ] parameter ( 0 ) <nl> + rhs = f32 [ ] parameter ( 1 ) <nl> + ROOT sum = f32 [ ] add ( lhs , rhs ) <nl> + } <nl> + <nl> + ENTRY entry { <nl> + % input = f32 [ 2 , 9 ] parameter ( 0 ) , sharding = { replicated } <nl> + % indices = s32 [ 3 ] parameter ( 1 ) , sharding = { replicated } <nl> + % updates = f32 [ 3 , 9 ] parameter ( 2 ) , <nl> + sharding = { devices = [ 1 , 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } <nl> + % scatter = f32 [ 2 , 9 ] scatter ( % input , % indices , % updates ) , <nl> + to_apply = add , <nl> + update_window_dims = { 1 } , <nl> + inserted_window_dims = { 0 } , <nl> + scatter_dims_to_operand_dims = { 0 } , <nl> + index_vector_dim = 1 <nl> + ROOT % copy = f32 [ 2 , 9 ] copy ( % scatter ) <nl> + } ) " ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto module , <nl> + ParseAndReturnVerifiedModule ( hlo_string ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( <nl> + bool changed , ShardingPropagation ( / * is_spmd = * / true ) . Run ( module . get ( ) ) ) ; <nl> + EXPECT_TRUE ( changed ) ; <nl> + EXPECT_THAT ( FindInstruction ( module . get ( ) , " scatter " ) , <nl> + op : : Sharding ( " { devices = [ 1 , 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } " ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ShardingPropagationTest , ScatterToDataOperand_PartialReplicate ) { <nl> + const char * const hlo_string = R " ( <nl> + HloModule module <nl> + <nl> + add ( lhs : f32 [ ] , rhs : f32 [ ] ) - > f32 [ ] { <nl> + lhs = f32 [ ] parameter ( 0 ) <nl> + rhs = f32 [ ] parameter ( 1 ) <nl> + ROOT sum = f32 [ ] add ( lhs , rhs ) <nl> + } <nl> + <nl> + ENTRY entry { <nl> + % p0 = f32 [ 2 , 9 ] parameter ( 0 ) <nl> + % input = f32 [ 2 , 9 ] copy ( % p0 ) <nl> + % indices = s32 [ 3 ] parameter ( 1 ) , sharding = { replicated } <nl> + % updates = f32 [ 3 , 9 ] parameter ( 2 ) , sharding = { replicated } <nl> + ROOT % scatter = f32 [ 2 , 9 ] scatter ( % input , % indices , % updates ) , <nl> + to_apply = add , <nl> + update_window_dims = { 1 } , <nl> + inserted_window_dims = { 0 } , <nl> + scatter_dims_to_operand_dims = { 0 } , <nl> + index_vector_dim = 1 , <nl> + sharding = { devices = [ 1 , 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } <nl> + } ) " ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto module , <nl> + ParseAndReturnVerifiedModule ( hlo_string ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( bool changed , <nl> + ShardingPropagation ( ) . Run ( module . get ( ) ) ) ; <nl> + EXPECT_TRUE ( changed ) ; <nl> + EXPECT_THAT ( FindInstruction ( module . get ( ) , " input " ) , <nl> + op : : Sharding ( " { devices = [ 1 , 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } " ) ) ; <nl> + } <nl> + <nl> TEST_F ( ShardingPropagationTest , ScatterToDataOperand ) { <nl> const char * const hlo_string = R " ( <nl> HloModule module <nl> ENTRY entry { <nl> op : : Sharding ( " { devices = [ 1 , 2 ] 0 , 1 } " ) ) ; <nl> } <nl> <nl> + TEST_F ( ShardingPropagationTest , ScatterToUpdateOperand_PartialReplicate ) { <nl> + const char * const hlo_string = R " ( <nl> + HloModule module <nl> + <nl> + add ( lhs : f32 [ ] , rhs : f32 [ ] ) - > f32 [ ] { <nl> + lhs = f32 [ ] parameter ( 0 ) <nl> + rhs = f32 [ ] parameter ( 1 ) <nl> + ROOT sum = f32 [ ] add ( lhs , rhs ) <nl> + } <nl> + <nl> + ENTRY entry { <nl> + % input = f32 [ 2 , 9 ] parameter ( 0 ) <nl> + % indices = s32 [ 3 ] parameter ( 1 ) , sharding = { replicated } <nl> + % p2 = f32 [ 3 , 9 ] parameter ( 2 ) <nl> + % updates = f32 [ 3 , 9 ] copy ( % p2 ) <nl> + ROOT % scatter = f32 [ 2 , 9 ] scatter ( % input , % indices , % updates ) , <nl> + to_apply = add , <nl> + update_window_dims = { 1 } , <nl> + inserted_window_dims = { 0 } , <nl> + scatter_dims_to_operand_dims = { 0 } , <nl> + index_vector_dim = 1 , <nl> + sharding = { devices = [ 1 , 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } <nl> + } ) " ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto module , <nl> + ParseAndReturnVerifiedModule ( hlo_string ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( <nl> + bool changed , ShardingPropagation ( / * is_spmd = * / true ) . Run ( module . get ( ) ) ) ; <nl> + EXPECT_TRUE ( changed ) ; <nl> + EXPECT_THAT ( FindInstruction ( module . get ( ) , " updates " ) , <nl> + op : : Sharding ( " { devices = [ 1 , 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } " ) ) ; <nl> + } <nl> + <nl> TEST_F ( ShardingPropagationTest , ScatterToUpdateOperand ) { <nl> const char * const hlo_string = R " ( <nl> HloModule module <nl> ENTRY entry { <nl> op : : Sharding ( " { devices = [ 2 ] 0 , 1 } " ) ) ; <nl> } <nl> <nl> + TEST_F ( ShardingPropagationTest , ScatterUpdateToIndex_PartialReplicate ) { <nl> + const char * const hlo_string = R " ( <nl> + HloModule module <nl> + <nl> + add ( lhs : f32 [ ] , rhs : f32 [ ] ) - > f32 [ ] { <nl> + lhs = f32 [ ] parameter ( 0 ) <nl> + rhs = f32 [ ] parameter ( 1 ) <nl> + ROOT sum = f32 [ ] add ( lhs , rhs ) <nl> + } <nl> + <nl> + ENTRY entry { <nl> + % input = f32 [ 2 , 9 ] parameter ( 0 ) , sharding = { replicated } <nl> + % p1 = s32 [ 3 ] parameter ( 1 ) , sharding = { replicated } <nl> + % indices = s32 [ 3 ] copy ( % p1 ) <nl> + % updates = f32 [ 3 , 9 ] parameter ( 2 ) , <nl> + sharding = { devices = [ 2 , 1 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } <nl> + ROOT % scatter = f32 [ 2 , 9 ] scatter ( % input , % indices , % updates ) , <nl> + to_apply = add , <nl> + update_window_dims = { 1 } , <nl> + inserted_window_dims = { 0 } , <nl> + scatter_dims_to_operand_dims = { 0 } , <nl> + index_vector_dim = 1 , sharding = { replicated } <nl> + } ) " ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto module , <nl> + ParseAndReturnVerifiedModule ( hlo_string ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( bool changed , <nl> + ShardingPropagation ( ) . Run ( module . get ( ) ) ) ; <nl> + EXPECT_TRUE ( changed ) ; <nl> + EXPECT_THAT ( FindInstruction ( module . get ( ) , " indices " ) , <nl> + op : : Sharding ( " { devices = [ 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } " ) ) ; <nl> + } <nl> + <nl> TEST_F ( ShardingPropagationTest , ScatterIndexToUpdate ) { <nl> const char * const hlo_string = R " ( <nl> HloModule module <nl> ENTRY entry { <nl> op : : Sharding ( " { devices = [ 2 , 1 ] 0 , 1 } " ) ) ; <nl> } <nl> <nl> + TEST_F ( ShardingPropagationTest , ScatterIndexToUpdate_PartialReplicate ) { <nl> + const char * const hlo_string = R " ( <nl> + HloModule module <nl> + <nl> + add ( lhs : f32 [ ] , rhs : f32 [ ] ) - > f32 [ ] { <nl> + lhs = f32 [ ] parameter ( 0 ) <nl> + rhs = f32 [ ] parameter ( 1 ) <nl> + ROOT sum = f32 [ ] add ( lhs , rhs ) <nl> + } <nl> + <nl> + ENTRY entry { <nl> + % input = f32 [ 2 , 9 ] parameter ( 0 ) , sharding = { replicated } <nl> + % indices = s32 [ 3 ] parameter ( 1 ) , <nl> + sharding = { devices = [ 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } <nl> + % p2 = f32 [ 3 , 9 ] parameter ( 2 ) , sharding = { replicated } <nl> + % updates = f32 [ 3 , 9 ] copy ( % p2 ) <nl> + ROOT % scatter = f32 [ 2 , 9 ] scatter ( % input , % indices , % updates ) , <nl> + to_apply = add , <nl> + update_window_dims = { 1 } , <nl> + inserted_window_dims = { 0 } , <nl> + scatter_dims_to_operand_dims = { 0 } , <nl> + index_vector_dim = 1 , sharding = { replicated } <nl> + } ) " ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto module , <nl> + ParseAndReturnVerifiedModule ( hlo_string ) ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( bool changed , <nl> + ShardingPropagation ( ) . Run ( module . get ( ) ) ) ; <nl> + EXPECT_TRUE ( changed ) ; <nl> + EXPECT_THAT ( FindInstruction ( module . get ( ) , " updates " ) , <nl> + op : : Sharding ( " { devices = [ 2 , 1 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } " ) ) ; <nl> + } <nl> + <nl> TEST_F ( ShardingPropagationTest , PartialShardingOnElementwise ) { <nl> const char * const hlo_string = R " ( <nl> HloModule module <nl> mmm a / tensorflow / compiler / xla / service / spmd / spmd_partitioner . cc <nl> ppp b / tensorflow / compiler / xla / service / spmd / spmd_partitioner . cc <nl> Status SpmdPartitioningVisitor : : HandleScatter ( HloInstruction * hlo ) { <nl> update_dim_to_index_dim ) ; <nl> CHECK ( new_updates_sharding . has_value ( ) ) ; <nl> updates = updates . Reshard ( * new_updates_sharding ) ; <nl> + / / Update collective_ops_creator and partition_id for partial replicate . <nl> + auto collective_ops_creator = collective_ops_creator_ ; <nl> + auto partition_id = partition_id_ ; <nl> + if ( indices . sharding ( ) . ReplicateOnLastTileDim ( ) ) { <nl> + auto sharding_grouped = GroupShardingOnDims ( <nl> + indices . sharding ( ) , <nl> + { indices . sharding ( ) . tile_assignment ( ) . num_dimensions ( ) - 1 } ) ; <nl> + auto per_group_partitioner_state = CreatePerGroupPartitioningState ( <nl> + indices . state ( ) , sharding_grouped . device_groups , & b_ ) ; <nl> + collective_ops_creator = <nl> + per_group_partitioner_state . collective_ops_creator ; <nl> + partition_id = per_group_partitioner_state . partition_id ; <nl> + } <nl> / / To avoid accumulating the initial operand multiple times during <nl> / / all - reduce , we use identity operands for all non - zero partitions . <nl> auto not_partition_zero = b_ . AddInstruction ( HloInstruction : : CreateConvert ( <nl> - ShapeUtil : : MakeScalarShape ( PRED ) , partition_id_ ) ) ; <nl> + ShapeUtil : : MakeScalarShape ( PRED ) , partition_id ) ) ; <nl> not_partition_zero = b_ . AddInstruction ( HloInstruction : : CreateBroadcast ( <nl> ShapeUtil : : ChangeElementType ( identity - > shape ( ) , PRED ) , <nl> not_partition_zero , { } ) ) ; <nl> Status SpmdPartitioningVisitor : : HandleScatter ( HloInstruction * hlo ) { <nl> auto pscatter = b_ . AddInstruction ( scatter - > CloneWithNewOperands ( <nl> scatter - > shape ( ) , { select_operand , indices . hlo ( ) , updates . hlo ( ) } ) ) ; <nl> auto all_reduce = <nl> - collective_ops_creator_ . create_cross_partition_all_reduce ( <nl> + collective_ops_creator . create_cross_partition_all_reduce ( <nl> & b_ , pscatter , scatter - > to_apply ( ) , { } , NewChannel ( ) ) ; <nl> all_reduce - > set_sharding ( HloSharding : : Replicate ( ) ) ; <nl> SetPartitionedHlo ( hlo , [ & ] ( ) { <nl> mmm a / tensorflow / compiler / xla / service / spmd / spmd_partitioner_test . cc <nl> ppp b / tensorflow / compiler / xla / service / spmd / spmd_partitioner_test . cc <nl> ENTRY entry { <nl> op : : Shape ( " f32 [ 2 , 5 ] " ) ) ) ; <nl> } <nl> <nl> + TEST_F ( SpmdPartitioningTest , PassthroughScatter_PartialReplicate ) { <nl> + const char * const hlo_string = R " ( <nl> + HloModule module <nl> + <nl> + add ( lhs : f32 [ ] , rhs : f32 [ ] ) - > f32 [ ] { <nl> + lhs = f32 [ ] parameter ( 0 ) <nl> + rhs = f32 [ ] parameter ( 1 ) <nl> + ROOT sum = f32 [ ] add ( lhs , rhs ) <nl> + } <nl> + <nl> + ENTRY entry { <nl> + % input = f32 [ 2 , 9 ] parameter ( 0 ) , <nl> + sharding = { devices = [ 1 , 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } <nl> + % indices = s32 [ 3 ] parameter ( 1 ) , sharding = { replicated } <nl> + % updates = f32 [ 3 , 9 ] parameter ( 2 ) , <nl> + sharding = { devices = [ 1 , 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } <nl> + ROOT % scatter = f32 [ 2 , 9 ] scatter ( % input , % indices , % updates ) , <nl> + to_apply = add , <nl> + update_window_dims = { 1 } , <nl> + inserted_window_dims = { 0 } , <nl> + scatter_dims_to_operand_dims = { 0 } , <nl> + index_vector_dim = 1 , <nl> + sharding = { devices = [ 1 , 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } <nl> + } ) " ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto module , <nl> + PartitionComputation ( hlo_string , / * num_devices = * / 4 ) ) ; <nl> + VLOG ( 1 ) < < module - > ToString ( ) ; <nl> + HloInstruction * root = module - > entry_computation ( ) - > root_instruction ( ) ; <nl> + EXPECT_THAT ( root , AllOf ( op : : Scatter ( op : : Parameter ( 0 ) , op : : Parameter ( 1 ) , <nl> + op : : Parameter ( 2 ) ) , <nl> + op : : Shape ( " f32 [ 2 , 5 ] " ) ) ) ; <nl> + } <nl> + <nl> TEST_F ( SpmdPartitioningTest , IndexPassthroughScatter ) { <nl> const char * const hlo_string = R " ( <nl> HloModule module <nl> ENTRY entry { <nl> op : : Shape ( " f32 [ 2 , 9 , 8 ] " ) ) ) ; <nl> } <nl> <nl> + TEST_F ( SpmdPartitioningTest , IndexPassthroughScatter_PartialReplicate ) { <nl> + const char * const hlo_string = R " ( <nl> + HloModule module <nl> + <nl> + add ( lhs : f32 [ ] , rhs : f32 [ ] ) - > f32 [ ] { <nl> + lhs = f32 [ ] parameter ( 0 ) <nl> + rhs = f32 [ ] parameter ( 1 ) <nl> + ROOT sum = f32 [ ] add ( lhs , rhs ) <nl> + } <nl> + <nl> + ENTRY entry { <nl> + % input = f32 [ 2 , 9 , 8 ] parameter ( 0 ) , sharding = { replicated } <nl> + % indices = s32 [ 4 , 2 , 4 ] parameter ( 1 ) , <nl> + sharding = { devices = [ 2 , 1 , 2 , 2 ] 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 last_tile_dim_replicate } <nl> + % updates = f32 [ 4 , 4 , 8 ] parameter ( 2 ) , <nl> + sharding = { devices = [ 2 , 2 , 1 , 2 ] 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 last_tile_dim_replicate } <nl> + ROOT % scatter = f32 [ 2 , 9 , 8 ] scatter ( % input , % indices , % updates ) , <nl> + to_apply = add , <nl> + update_window_dims = { 2 } , <nl> + inserted_window_dims = { 0 , 1 } , <nl> + scatter_dims_to_operand_dims = { 0 , 1 } , <nl> + index_vector_dim = 1 , sharding = { replicated } <nl> + } ) " ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto module , <nl> + PartitionComputation ( hlo_string , / * num_devices = * / 8 ) ) ; <nl> + VLOG ( 1 ) < < module - > ToString ( ) ; <nl> + HloInstruction * root = module - > entry_computation ( ) - > root_instruction ( ) ; <nl> + EXPECT_THAT ( <nl> + root , <nl> + AllOf ( op : : AllReduce ( op : : Scatter ( <nl> + op : : Select ( op : : Broadcast ( op : : Convert ( op : : Reshape ( ) ) ) , <nl> + op : : Broadcast ( op : : Constant ( ) ) , op : : Parameter ( 0 ) ) , <nl> + op : : Parameter ( 1 ) , op : : Parameter ( 2 ) ) ) , <nl> + op : : Shape ( " f32 [ 2 , 9 , 8 ] " ) ) ) ; <nl> + } <nl> + <nl> TEST_F ( SpmdPartitioningTest , IndexPassthroughScatter_Min ) { <nl> const char * const hlo_string = R " ( <nl> HloModule module <nl> ENTRY entry { <nl> op : : Shape ( " f32 [ 9 , 9 ] " ) ) ) ; <nl> } <nl> <nl> + TEST_F ( SpmdPartitioningTest , <nl> + ScatterPartitionedOnTrivialSliceDims_PartialReplicate ) { <nl> + const char * const hlo_string = R " ( <nl> + HloModule module <nl> + <nl> + add ( lhs : f32 [ ] , rhs : f32 [ ] ) - > f32 [ ] { <nl> + lhs = f32 [ ] parameter ( 0 ) <nl> + rhs = f32 [ ] parameter ( 1 ) <nl> + ROOT sum = f32 [ ] add ( lhs , rhs ) <nl> + } <nl> + <nl> + ENTRY entry { <nl> + % input = f32 [ 17 , 9 ] parameter ( 0 ) , <nl> + sharding = { devices = [ 2 , 1 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } <nl> + % indices = s32 [ 2 , 3 ] parameter ( 1 ) , sharding = { replicated } <nl> + % updates = f32 [ 2 , 3 , 9 ] parameter ( 2 ) , sharding = { replicated } <nl> + ROOT % scatter = f32 [ 17 , 9 ] scatter ( % input , % indices , % updates ) , <nl> + to_apply = add , <nl> + update_window_dims = { 2 } , <nl> + inserted_window_dims = { 0 } , <nl> + scatter_dims_to_operand_dims = { 0 } , <nl> + index_vector_dim = 2 , <nl> + sharding = { devices = [ 2 , 1 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } <nl> + } ) " ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( auto module , <nl> + PartitionComputation ( hlo_string , / * num_devices = * / 4 ) ) ; <nl> + VLOG ( 1 ) < < module - > ToString ( ) ; <nl> + auto offset = <nl> + op : : Reshape ( op : : DynamicSlice ( op : : Constant ( ) , op : : PartitionId ( ) ) ) ; <nl> + auto indices = op : : Subtract ( <nl> + op : : Parameter ( 1 ) , AllOf ( op : : Broadcast ( offset ) , op : : Shape ( " s32 [ 2 , 3 ] " ) ) ) ; <nl> + HloInstruction * root = module - > entry_computation ( ) - > root_instruction ( ) ; <nl> + EXPECT_THAT ( root , <nl> + AllOf ( op : : Scatter ( op : : Parameter ( 0 ) , indices , op : : Parameter ( 2 ) ) , <nl> + op : : Shape ( " f32 [ 9 , 9 ] " ) ) ) ; <nl> + } <nl> + <nl> TEST_F ( SpmdPartitioningTest , TiledReversePassthrough ) { <nl> const char * const hlo_string = R " ( <nl> HloModule module <nl> mmm a / tensorflow / compiler / xla / service / spmd / spmd_partitioner_util . h <nl> ppp b / tensorflow / compiler / xla / service / spmd / spmd_partitioner_util . h <nl> absl : : optional < HloInstruction * > PadFromPartialReplicateShape ( <nl> / / dimensions by dynamic slice . <nl> / / For example , if partial_sharding is <nl> / / { devices = [ 1 , 2 , 2 ] 0 , 1 , 2 , 3 last_tile_dim_replicate } <nl> - / / Target tile dims is { 2 , 2 } , the returned compatible sharding will be <nl> - / / sharding = { devices = [ 1 , 2 , 2 ] 0 , 2 , 1 , 3 last_tile_dim_replicate } . <nl> + / / Target sharding is { devices = [ 2 , 2 ] 0 , 1 , 2 , 3 } , the returned compatible sharding <nl> + / / will be sharding = { devices = [ 2 , 2 ] 0 , 2 , 1 , 3 } . <nl> / / If patial replicate sharding is not partial replicate or can ' t reshard to <nl> / / target_tile_dims by dynamic slice , return absl : : nullopt . <nl> / / If target_sharding is already compatible , returns it . <nl> mmm a / tensorflow / compiler / xla / shape_tree . h <nl> ppp b / tensorflow / compiler / xla / shape_tree . h <nl> void ShapeTree < T > : : CopySubtreeFrom ( const ShapeTree < T > & other , <nl> const ShapeIndex & target_base_index ) { <nl> CHECK ( ShapeUtil : : Compatible ( <nl> ShapeUtil : : GetSubshape ( shape ( ) , target_base_index ) , <nl> - ShapeUtil : : GetSubshape ( other . shape ( ) , source_base_index ) ) ) ; <nl> + ShapeUtil : : GetSubshape ( other . shape ( ) , source_base_index ) ) ) <nl> + < < ShapeUtil : : GetSubshape ( shape ( ) , target_base_index ) < < " vs " <nl> + < < ShapeUtil : : GetSubshape ( other . shape ( ) , source_base_index ) ; <nl> ForEachMutableElement ( [ this , & other , & source_base_index , & target_base_index ] ( <nl> const ShapeIndex & index , T * data ) { <nl> / / Copy the data element only if index is in the <nl> mmm a / tensorflow / core / BUILD <nl> ppp b / tensorflow / core / BUILD <nl> filegroup ( <nl> visibility = [ " / / visibility : public " ] , <nl> ) <nl> <nl> - filegroup ( <nl> + alias ( <nl> name = " lmdb_testdata " , <nl> - testonly = 1 , <nl> - srcs = [ <nl> - # A simple key - value store : <nl> - # 0 : ' b ' <nl> - # 1 : ' b ' <nl> - # . . . <nl> - # 9 : ' b ' <nl> - # Which is then overwritten with : <nl> - # 0 : ' a ' <nl> - # 1 : ' b ' <nl> - # . . . <nl> - # 9 : ' j ' <nl> - " lib / lmdb / testdata / data . mdb " , <nl> - # LMDB , being a memory - mapped database , uses a different file format on <nl> - # big - endian systems . <nl> - " lib / lmdb / testdata / data_bigendian . mdb " , <nl> - ] , <nl> - visibility = [ " / / visibility : public " ] , <nl> + actual = " / / tensorflow / core / lib / lmdb : lmdb_testdata " , <nl> ) <nl> <nl> alias ( <nl> mmm a / tensorflow / core / api_def / base_api / api_def_Acos . pbtxt <nl> ppp b / tensorflow / core / api_def / base_api / api_def_Acos . pbtxt <nl> op { <nl> summary : " Computes acos of x element - wise . " <nl> description : < < END <nl> <nl> - Provided an input tensor , the ` tf . math . acos ` operation returns the inverse cosine of each element of the tensor . If ` y = tf . math . cos ( x ) ` then , ` x = tf . math . acos ( y ) ` . <nl> - <nl> + Provided an input tensor , the ` tf . math . acos ` operation returns the inverse cosine of each element of the tensor . If ` y = tf . math . cos ( x ) ` then , ` x = tf . math . acos ( y ) ` . <nl> + <nl> Input range is ` [ - 1 , 1 ] ` and the output has a range of ` [ 0 , pi ] ` . <nl> <nl> END <nl> mmm a / tensorflow / core / api_def / base_api / api_def_Add . pbtxt <nl> ppp b / tensorflow / core / api_def / base_api / api_def_Add . pbtxt <nl> op { <nl> description : < < END <nl> * NOTE * : ` Add ` supports broadcasting . ` AddN ` does not . More about broadcasting <nl> [ here ] ( http : / / docs . scipy . org / doc / numpy / user / basics . broadcasting . html ) <nl> - <nl> - Given two input tensors , the ` tf . add ` operation computes the sum for every element in the tensor . <nl> + <nl> + Given two input tensors , the ` tf . add ` operation computes the sum for every element in the tensor . <nl> <nl> Both input and output have a range ` ( - inf , inf ) ` . <nl> <nl> mmm a / tensorflow / core / data / service / BUILD <nl> ppp b / tensorflow / core / data / service / BUILD <nl> cc_library ( <nl> hdrs = [ " credentials_factory . h " ] , <nl> deps = [ <nl> " / / tensorflow / core : lib " , <nl> + " @ com_google_absl / / absl / memory " , <nl> " @ com_google_absl / / absl / strings " , <nl> tf_grpc_cc_dependency ( ) , <nl> ] , <nl> cc_library ( <nl> srcs = [ " local_credentials_factory . cc " ] , <nl> deps = [ <nl> " : credentials_factory " , <nl> + " @ com_google_absl / / absl / memory " , <nl> tf_grpc_cc_dependency ( ) , <nl> ] , <nl> alwayslink = 1 , <nl> mmm a / tensorflow / core / data / service / credentials_factory . cc <nl> ppp b / tensorflow / core / data / service / credentials_factory . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / data / service / credentials_factory . h " <nl> <nl> + # include " absl / memory / memory . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / platform / mutex . h " <nl> <nl> CredentialsFactories & credentials_factories ( ) { <nl> } <nl> } / / namespace <nl> <nl> - void CredentialsFactory : : Register ( CredentialsFactory * factory ) { <nl> + void CredentialsFactory : : Register ( std : : unique_ptr < CredentialsFactory > factory ) { <nl> mutex_lock l ( * get_lock ( ) ) ; <nl> - if ( ! credentials_factories ( ) . insert ( { factory - > Protocol ( ) , factory } ) . second ) { <nl> + if ( ! credentials_factories ( ) <nl> + . insert ( { factory - > Protocol ( ) , factory . release ( ) } ) <nl> + . second ) { <nl> LOG ( ERROR ) <nl> < < " Two credentials factories are being registered with protocol " <nl> < < factory - > Protocol ( ) < < " . Which one gets used is undefined . " ; <nl> void CredentialsFactory : : Register ( CredentialsFactory * factory ) { <nl> } <nl> <nl> Status CredentialsFactory : : Get ( absl : : string_view protocol , <nl> - CredentialsFactory * * out ) { <nl> + CredentialsFactory * & out ) { <nl> mutex_lock l ( * get_lock ( ) ) ; <nl> auto it = credentials_factories ( ) . find ( std : : string ( protocol ) ) ; <nl> if ( it ! = credentials_factories ( ) . end ( ) ) { <nl> - * out = it - > second ; <nl> + out = it - > second ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status CredentialsFactory : : Get ( absl : : string_view protocol , <nl> <nl> Status CredentialsFactory : : CreateServerCredentials ( <nl> absl : : string_view protocol , <nl> - std : : shared_ptr < : : grpc : : ServerCredentials > * out ) { <nl> + std : : shared_ptr < : : grpc : : ServerCredentials > & out ) { <nl> CredentialsFactory * factory ; <nl> - TF_RETURN_IF_ERROR ( CredentialsFactory : : Get ( protocol , & factory ) ) ; <nl> + TF_RETURN_IF_ERROR ( CredentialsFactory : : Get ( protocol , factory ) ) ; <nl> TF_RETURN_IF_ERROR ( factory - > CreateServerCredentials ( out ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status CredentialsFactory : : CreateClientCredentials ( <nl> absl : : string_view protocol , <nl> - std : : shared_ptr < : : grpc : : ChannelCredentials > * out ) { <nl> + std : : shared_ptr < : : grpc : : ChannelCredentials > & out ) { <nl> CredentialsFactory * factory ; <nl> - TF_RETURN_IF_ERROR ( CredentialsFactory : : Get ( protocol , & factory ) ) ; <nl> + TF_RETURN_IF_ERROR ( CredentialsFactory : : Get ( protocol , factory ) ) ; <nl> TF_RETURN_IF_ERROR ( factory - > CreateClientCredentials ( out ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> class InsecureCredentialsFactory : public CredentialsFactory { <nl> std : : string Protocol ( ) override { return " grpc " ; } <nl> <nl> Status CreateServerCredentials ( <nl> - std : : shared_ptr < : : grpc : : ServerCredentials > * out ) override { <nl> - * out = : : grpc : : InsecureServerCredentials ( ) ; <nl> + std : : shared_ptr < : : grpc : : ServerCredentials > & out ) override { <nl> + out = : : grpc : : InsecureServerCredentials ( ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status CreateClientCredentials ( <nl> - std : : shared_ptr < : : grpc : : ChannelCredentials > * out ) override { <nl> - * out = : : grpc : : InsecureChannelCredentials ( ) ; <nl> + std : : shared_ptr < : : grpc : : ChannelCredentials > & out ) override { <nl> + out = : : grpc : : InsecureChannelCredentials ( ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> } ; <nl> class InsecureCredentialsFactory : public CredentialsFactory { <nl> class InsecureCredentialsRegistrar { <nl> public : <nl> InsecureCredentialsRegistrar ( ) { <nl> - auto factory = new InsecureCredentialsFactory ( ) ; <nl> - CredentialsFactory : : Register ( factory ) ; <nl> + CredentialsFactory : : Register ( <nl> + absl : : make_unique < InsecureCredentialsFactory > ( ) ) ; <nl> } <nl> } ; <nl> static InsecureCredentialsRegistrar registrar ; <nl> mmm a / tensorflow / core / data / service / credentials_factory . h <nl> ppp b / tensorflow / core / data / service / credentials_factory . h <nl> class CredentialsFactory { <nl> / / look up with ` GetCredentials ` to find the registered credentials factory . <nl> virtual std : : string Protocol ( ) = 0 ; <nl> <nl> - / / Stores server credentials to ` * out ` . <nl> + / / Stores server credentials to ` out ` . <nl> virtual Status CreateServerCredentials ( <nl> - std : : shared_ptr < : : grpc : : ServerCredentials > * out ) = 0 ; <nl> + std : : shared_ptr < : : grpc : : ServerCredentials > & out ) = 0 ; <nl> <nl> - / / Stores client credentials to ` * out ` . <nl> + / / Stores client credentials to ` out ` . <nl> virtual Status CreateClientCredentials ( <nl> - std : : shared_ptr < : : grpc : : ChannelCredentials > * out ) = 0 ; <nl> + std : : shared_ptr < : : grpc : : ChannelCredentials > & out ) = 0 ; <nl> <nl> / / Registers a credentials factory . <nl> - static void Register ( CredentialsFactory * factory ) ; <nl> + static void Register ( std : : unique_ptr < CredentialsFactory > factory ) ; <nl> <nl> / / Creates server credentials using the credentials factory registered as <nl> - / / ` protocol ` , and stores them to ` * out ` . <nl> + / / ` protocol ` , and stores them to ` out ` . <nl> static Status CreateServerCredentials ( <nl> absl : : string_view protocol , <nl> - std : : shared_ptr < : : grpc : : ServerCredentials > * out ) ; <nl> + std : : shared_ptr < : : grpc : : ServerCredentials > & out ) ; <nl> <nl> / / Creates client credentials using the credentials factory registered as <nl> - / / ` protocol ` , and stores them to ` * out ` . <nl> + / / ` protocol ` , and stores them to ` out ` . <nl> static Status CreateClientCredentials ( <nl> absl : : string_view protocol , <nl> - std : : shared_ptr < : : grpc : : ChannelCredentials > * out ) ; <nl> + std : : shared_ptr < : : grpc : : ChannelCredentials > & out ) ; <nl> <nl> private : <nl> - / / Gets the credentials factory registered via ` Register ` for the specified <nl> - / / protocol , and stores it to ` * out ` . <nl> - static Status Get ( const absl : : string_view protocol , CredentialsFactory * * out ) ; <nl> + / / Borrows a pointer to the credentials factory registered via ` Register ` <nl> + / / for the specified protocol , and stores it to ` out ` . <nl> + static Status Get ( const absl : : string_view protocol , CredentialsFactory * & out ) ; <nl> } ; <nl> <nl> } / / namespace data <nl> mmm a / tensorflow / core / data / service / credentials_factory_test . cc <nl> ppp b / tensorflow / core / data / service / credentials_factory_test . cc <nl> class TestCredentialsFactory : public CredentialsFactory { <nl> std : : string Protocol ( ) override { return " test " ; } <nl> <nl> Status CreateServerCredentials ( <nl> - std : : shared_ptr < grpc : : ServerCredentials > * out ) override { <nl> + std : : shared_ptr < grpc : : ServerCredentials > & out ) override { <nl> return errors : : Internal ( kFailedToCreateServerCredentials ) ; <nl> } <nl> <nl> Status CreateClientCredentials ( <nl> - std : : shared_ptr < grpc : : ChannelCredentials > * out ) override { <nl> + std : : shared_ptr < grpc : : ChannelCredentials > & out ) override { <nl> return errors : : Internal ( kFailedToCreateClientCredentials ) ; <nl> } <nl> } ; <nl> } / / namespace <nl> <nl> TEST ( CredentialsFactory , Register ) { <nl> - TestCredentialsFactory test_factory ; <nl> - CredentialsFactory : : Register ( & test_factory ) ; <nl> + auto test_factory = absl : : make_unique < TestCredentialsFactory > ( ) ; <nl> + std : : string protocol = test_factory - > Protocol ( ) ; <nl> + CredentialsFactory : : Register ( std : : move ( test_factory ) ) ; <nl> std : : shared_ptr < grpc : : ServerCredentials > server_credentials ; <nl> ASSERT_EQ ( errors : : Internal ( kFailedToCreateServerCredentials ) , <nl> - CredentialsFactory : : CreateServerCredentials ( test_factory . Protocol ( ) , <nl> - & server_credentials ) ) ; <nl> + CredentialsFactory : : CreateServerCredentials ( protocol , <nl> + server_credentials ) ) ; <nl> std : : shared_ptr < grpc : : ChannelCredentials > client_credentials ; <nl> ASSERT_EQ ( errors : : Internal ( kFailedToCreateClientCredentials ) , <nl> - CredentialsFactory : : CreateClientCredentials ( test_factory . Protocol ( ) , <nl> - & client_credentials ) ) ; <nl> + CredentialsFactory : : CreateClientCredentials ( protocol , <nl> + client_credentials ) ) ; <nl> } <nl> <nl> TEST ( CredentialsFactory , DefaultGrpcProtocol ) { <nl> std : : shared_ptr < grpc : : ServerCredentials > server_credentials ; <nl> TF_ASSERT_OK ( <nl> - CredentialsFactory : : CreateServerCredentials ( " grpc " , & server_credentials ) ) ; <nl> + CredentialsFactory : : CreateServerCredentials ( " grpc " , server_credentials ) ) ; <nl> std : : shared_ptr < grpc : : ChannelCredentials > client_credentials ; <nl> TF_ASSERT_OK ( <nl> - CredentialsFactory : : CreateClientCredentials ( " grpc " , & client_credentials ) ) ; <nl> + CredentialsFactory : : CreateClientCredentials ( " grpc " , client_credentials ) ) ; <nl> } <nl> <nl> TEST ( CredentialsFactory , MissingServerProtocol ) { <nl> std : : shared_ptr < grpc : : ServerCredentials > server_credentials ; <nl> Status s = CredentialsFactory : : CreateServerCredentials ( " unknown_protocol " , <nl> - & server_credentials ) ; <nl> + server_credentials ) ; <nl> ASSERT_EQ ( error : : Code : : NOT_FOUND , s . code ( ) ) ; <nl> ASSERT_TRUE ( <nl> absl : : StrContains ( s . ToString ( ) , <nl> TEST ( CredentialsFactory , MissingServerProtocol ) { <nl> TEST ( CredentialsFactory , MissingClientProtocol ) { <nl> std : : shared_ptr < grpc : : ChannelCredentials > client_credentials ; <nl> Status s = CredentialsFactory : : CreateClientCredentials ( " unknown_protocol " , <nl> - & client_credentials ) ; <nl> + client_credentials ) ; <nl> ASSERT_EQ ( error : : Code : : NOT_FOUND , s . code ( ) ) ; <nl> ASSERT_TRUE ( <nl> absl : : StrContains ( s . ToString ( ) , <nl> mmm a / tensorflow / core / data / service / data_service . cc <nl> ppp b / tensorflow / core / data / service / data_service . cc <nl> constexpr const char kParallelEpochs [ ] = " parallel_epochs " ; <nl> constexpr const char kOneEpoch [ ] = " one_epoch " ; <nl> } / / namespace <nl> <nl> - Status ParseProcessingMode ( const std : : string & s , ProcessingMode * mode ) { <nl> + Status ParseProcessingMode ( const std : : string & s , ProcessingMode & mode ) { <nl> if ( s = = kParallelEpochs ) { <nl> - * mode = ProcessingMode : : PARALLEL_EPOCHS ; <nl> + mode = ProcessingMode : : PARALLEL_EPOCHS ; <nl> } else if ( s = = kOneEpoch ) { <nl> - * mode = ProcessingMode : : ONE_EPOCH ; <nl> + mode = ProcessingMode : : ONE_EPOCH ; <nl> } else { <nl> return errors : : InvalidArgument ( " Unrecognized processing mode : " , s ) ; <nl> } <nl> Status DataServiceDispatcherClient : : GetDatasetDef ( int64 dataset_id , <nl> } <nl> <nl> Status DataServiceDispatcherClient : : RegisterDataset ( GraphDef dataset , <nl> - int64 * dataset_id ) { <nl> + int64 & dataset_id ) { <nl> TF_RETURN_IF_ERROR ( EnsureInitialized ( ) ) ; <nl> GetOrRegisterDatasetRequest req ; <nl> * req . mutable_dataset ( ) - > mutable_graph ( ) = dataset ; <nl> Status DataServiceDispatcherClient : : RegisterDataset ( GraphDef dataset , <nl> if ( ! status . ok ( ) ) { <nl> return grpc_util : : WrapError ( " Failed to register dataset " , status ) ; <nl> } <nl> - * dataset_id = resp . dataset_id ( ) ; <nl> + dataset_id = resp . dataset_id ( ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status DataServiceDispatcherClient : : CreateJob ( int64 dataset_id , <nl> ProcessingMode processing_mode , <nl> - int64 * job_client_id ) { <nl> + int64 & job_client_id ) { <nl> TF_RETURN_IF_ERROR ( EnsureInitialized ( ) ) ; <nl> CreateJobRequest req ; <nl> req . set_dataset_id ( dataset_id ) ; <nl> Status DataServiceDispatcherClient : : CreateJob ( int64 dataset_id , <nl> absl : : StrCat ( " Failed to create job for dataset with id " , dataset_id ) , <nl> status ) ; <nl> } <nl> - * job_client_id = resp . job_client_id ( ) ; <nl> + job_client_id = resp . job_client_id ( ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status DataServiceDispatcherClient : : GetOrCreateJob ( <nl> int64 dataset_id , ProcessingMode processing_mode , <nl> - const std : : string & job_name , int job_name_index , int64 * job_client_id ) { <nl> + const std : : string & job_name , int job_name_index , int64 & job_client_id ) { <nl> TF_RETURN_IF_ERROR ( EnsureInitialized ( ) ) ; <nl> GetOrCreateJobRequest req ; <nl> req . set_dataset_id ( dataset_id ) ; <nl> Status DataServiceDispatcherClient : : GetOrCreateJob ( <nl> dataset_id ) , <nl> status ) ; <nl> } <nl> - * job_client_id = resp . job_client_id ( ) ; <nl> + job_client_id = resp . job_client_id ( ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status DataServiceDispatcherClient : : ReleaseJobClient ( int64 job_client_id ) { <nl> } <nl> <nl> Status DataServiceDispatcherClient : : GetTasks ( int64 job_client_id , <nl> - std : : vector < TaskInfo > * tasks , <nl> - bool * job_finished ) { <nl> + std : : vector < TaskInfo > & tasks , <nl> + bool & job_finished ) { <nl> TF_RETURN_IF_ERROR ( EnsureInitialized ( ) ) ; <nl> GetTasksRequest req ; <nl> req . set_job_client_id ( job_client_id ) ; <nl> Status DataServiceDispatcherClient : : GetTasks ( int64 job_client_id , <nl> if ( ! s . ok ( ) ) { <nl> return grpc_util : : WrapError ( " Failed to get tasks " , s ) ; <nl> } <nl> - tasks - > clear ( ) ; <nl> + tasks . clear ( ) ; <nl> for ( auto & task : resp . task_info ( ) ) { <nl> - tasks - > push_back ( task ) ; <nl> + tasks . push_back ( task ) ; <nl> } <nl> - * job_finished = resp . job_finished ( ) ; <nl> + job_finished = resp . job_finished ( ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status DataServiceDispatcherClient : : GetWorkers ( <nl> - std : : vector < WorkerInfo > * workers ) { <nl> + std : : vector < WorkerInfo > & workers ) { <nl> TF_RETURN_IF_ERROR ( EnsureInitialized ( ) ) ; <nl> GetWorkersRequest req ; <nl> GetWorkersResponse resp ; <nl> Status DataServiceDispatcherClient : : GetWorkers ( <nl> if ( ! s . ok ( ) ) { <nl> return grpc_util : : WrapError ( " Failed to get workers " , s ) ; <nl> } <nl> - workers - > clear ( ) ; <nl> + workers . clear ( ) ; <nl> for ( auto & worker : resp . workers ( ) ) { <nl> - workers - > push_back ( worker ) ; <nl> + workers . push_back ( worker ) ; <nl> } <nl> return Status : : OK ( ) ; <nl> } <nl> Status DataServiceDispatcherClient : : EnsureInitialized ( ) { <nl> } <nl> std : : shared_ptr < grpc : : ChannelCredentials > credentials ; <nl> TF_RETURN_IF_ERROR ( <nl> - CredentialsFactory : : CreateClientCredentials ( protocol_ , & credentials ) ) ; <nl> + CredentialsFactory : : CreateClientCredentials ( protocol_ , credentials ) ) ; <nl> auto channel = grpc : : CreateChannel ( address_ , credentials ) ; <nl> stub_ = DispatcherService : : NewStub ( channel ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status DataServiceWorkerClient : : GetElement ( int64 task_id , <nl> - CompressedElement * element , <nl> - bool * end_of_sequence ) { <nl> + CompressedElement & element , <nl> + bool & end_of_sequence ) { <nl> TF_RETURN_IF_ERROR ( EnsureInitialized ( ) ) ; <nl> GetElementRequest req ; <nl> req . set_task_id ( task_id ) ; <nl> Status DataServiceWorkerClient : : GetElement ( int64 task_id , <nl> if ( ! s . ok ( ) ) { <nl> return grpc_util : : WrapError ( " Failed to get element " , s ) ; <nl> } <nl> - * end_of_sequence = resp . end_of_sequence ( ) ; <nl> - if ( ! * end_of_sequence ) { <nl> - * element = std : : move ( * resp . mutable_compressed_element ( ) ) ; <nl> + end_of_sequence = resp . end_of_sequence ( ) ; <nl> + if ( ! end_of_sequence ) { <nl> + element = std : : move ( * resp . mutable_compressed_element ( ) ) ; <nl> } <nl> return Status : : OK ( ) ; <nl> } <nl> Status DataServiceWorkerClient : : EnsureInitialized ( ) { <nl> } <nl> std : : shared_ptr < grpc : : ChannelCredentials > credentials ; <nl> TF_RETURN_IF_ERROR ( <nl> - CredentialsFactory : : CreateClientCredentials ( protocol_ , & credentials ) ) ; <nl> + CredentialsFactory : : CreateClientCredentials ( protocol_ , credentials ) ) ; <nl> grpc : : ChannelArguments args ; <nl> args . SetMaxReceiveMessageSize ( - 1 ) ; <nl> auto channel = grpc : : CreateCustomChannel ( address_ , credentials , args ) ; <nl> Status DataServiceWorkerClient : : EnsureInitialized ( ) { <nl> <nl> Status CreateDataServiceDispatcherClient ( <nl> const std : : string & address , const std : : string & protocol , <nl> - std : : unique_ptr < DataServiceDispatcherClient > * out ) { <nl> + std : : unique_ptr < DataServiceDispatcherClient > & out ) { <nl> auto client = <nl> absl : : make_unique < DataServiceDispatcherClient > ( address , protocol ) ; <nl> TF_RETURN_IF_ERROR ( client - > Initialize ( ) ) ; <nl> - * out = std : : move ( client ) ; <nl> + out = std : : move ( client ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status CreateDataServiceWorkerClient ( <nl> const std : : string & address , const std : : string & protocol , <nl> - std : : unique_ptr < DataServiceWorkerClient > * out ) { <nl> + std : : unique_ptr < DataServiceWorkerClient > & out ) { <nl> auto client = absl : : make_unique < DataServiceWorkerClient > ( address , protocol ) ; <nl> TF_RETURN_IF_ERROR ( client - > Initialize ( ) ) ; <nl> - * out = std : : move ( client ) ; <nl> + out = std : : move ( client ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> } / / namespace data <nl> mmm a / tensorflow / core / data / service / data_service . h <nl> ppp b / tensorflow / core / data / service / data_service . h <nl> enum class ProcessingMode : int64 { <nl> } ; <nl> <nl> / / Parses a string representing a processing mode and stores the result in <nl> - / / * mode . Returns an InvalidArgument status if the string is not recognized . <nl> - Status ParseProcessingMode ( const std : : string & s , ProcessingMode * mode ) ; <nl> + / / ` mode ` . Returns an InvalidArgument status if the string is not recognized . <nl> + Status ParseProcessingMode ( const std : : string & s , ProcessingMode & mode ) ; <nl> <nl> / / Converts a processing mode to its corresponding string . <nl> std : : string ProcessingModeToString ( ProcessingMode mode ) ; <nl> class DataServiceDispatcherClient : public DataServiceClientBase { <nl> Status GetDatasetDef ( int64 dataset_id , DatasetDef & dataset_def ) ; <nl> <nl> / / Registers a dataset with the tf . data service , and stores the generated <nl> - / / dataset id in ` * dataset_id ` . <nl> - Status RegisterDataset ( GraphDef dataset , int64 * dataset_id ) ; <nl> + / / dataset id in ` dataset_id ` . <nl> + Status RegisterDataset ( GraphDef dataset , int64 & dataset_id ) ; <nl> <nl> / / Creates a new tf . data service job for the specified dataset . The id for the <nl> - / / created job will be stored in ` * job_client_id ` . <nl> + / / created job will be stored in ` job_client_id ` . <nl> Status CreateJob ( int64 dataset_id , ProcessingMode processing_mode , <nl> - int64 * job_client_id ) ; <nl> + int64 & job_client_id ) ; <nl> <nl> / / Gets the job id for the job represented by the tuple <nl> - / / ( job_name , job_name_index ) , and stores the id in * job_client_id . If the <nl> + / / ( job_name , job_name_index ) , and stores the id in ` job_client_id ` . If the <nl> / / job doesn ' t exist yet , it will be created . <nl> Status GetOrCreateJob ( int64 dataset_id , ProcessingMode processing_mode , <nl> const std : : string & job_name , int job_name_index , <nl> - int64 * job_client_id ) ; <nl> + int64 & job_client_id ) ; <nl> <nl> / / Releases a job client id , indicating that the id will no longer be used to <nl> / / read from the job . <nl> Status ReleaseJobClient ( int64 job_client_id ) ; <nl> <nl> / / Queries the dispatcher for the tasks associated with the specified job . <nl> - / / The tasks will be stored in * tasks , and whether the job is finished will <nl> - / / be stored in ` * job_finished ` . <nl> - Status GetTasks ( int64 job_client_id , std : : vector < TaskInfo > * tasks , <nl> - bool * job_finished ) ; <nl> + / / The tasks will be stored in ` tasks ` , and whether the job is finished will <nl> + / / be stored in ` job_finished ` . <nl> + Status GetTasks ( int64 job_client_id , std : : vector < TaskInfo > & tasks , <nl> + bool & job_finished ) ; <nl> <nl> / / Queries the dispatcher for its registered workers . The worker info will be <nl> - / / stored in ` * workers ` . <nl> - Status GetWorkers ( std : : vector < WorkerInfo > * workers ) ; <nl> + / / stored in ` workers ` . <nl> + Status GetWorkers ( std : : vector < WorkerInfo > & workers ) ; <nl> <nl> protected : <nl> Status EnsureInitialized ( ) override ; <nl> class DataServiceWorkerClient : public DataServiceClientBase { <nl> : DataServiceClientBase ( address , protocol ) { } <nl> <nl> / / Fetches the next element for the specified task_id . The element ' s <nl> - / / compressed tensors will be stored in * element . If no element is available , <nl> - / / ` * end_of_sequence ` will be ` true ` , and ` element ` will be left unchanged . <nl> - Status GetElement ( int64 task_id , CompressedElement * element , <nl> - bool * end_of_sequence ) ; <nl> + / / compressed tensors will be stored in ` element ` . If no element is available , <nl> + / / ` end_of_sequence ` will be ` true ` , and ` element ` will be left unchanged . <nl> + Status GetElement ( int64 task_id , CompressedElement & element , <nl> + bool & end_of_sequence ) ; <nl> <nl> protected : <nl> Status EnsureInitialized ( ) override ; <nl> class DataServiceWorkerClient : public DataServiceClientBase { <nl> / / Creates and initializes a new tf . data service dispatcher client . <nl> Status CreateDataServiceDispatcherClient ( <nl> const std : : string & address , const std : : string & protocol , <nl> - std : : unique_ptr < DataServiceDispatcherClient > * out ) ; <nl> + std : : unique_ptr < DataServiceDispatcherClient > & out ) ; <nl> <nl> / / Creates and initializes a new tf . data service worker client . <nl> Status CreateDataServiceWorkerClient ( <nl> const std : : string & address , const std : : string & protocol , <nl> - std : : unique_ptr < DataServiceWorkerClient > * out ) ; <nl> + std : : unique_ptr < DataServiceWorkerClient > & out ) ; <nl> <nl> } / / namespace data <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / data / service / data_service_test . cc <nl> ppp b / tensorflow / core / data / service / data_service_test . cc <nl> constexpr const char kProtocol [ ] = " grpc + local " ; <nl> <nl> TEST ( DataService , ParseParallelEpochsProcessingMode ) { <nl> ProcessingMode mode ; <nl> - TF_ASSERT_OK ( ParseProcessingMode ( " parallel_epochs " , & mode ) ) ; <nl> + TF_ASSERT_OK ( ParseProcessingMode ( " parallel_epochs " , mode ) ) ; <nl> EXPECT_EQ ( mode , ProcessingMode : : PARALLEL_EPOCHS ) ; <nl> } <nl> <nl> TEST ( DataService , ParseOneEpochProcessingMode ) { <nl> ProcessingMode mode ; <nl> - TF_ASSERT_OK ( ParseProcessingMode ( " one_epoch " , & mode ) ) ; <nl> + TF_ASSERT_OK ( ParseProcessingMode ( " one_epoch " , mode ) ) ; <nl> EXPECT_EQ ( mode , ProcessingMode : : ONE_EPOCH ) ; <nl> } <nl> <nl> TEST ( DataService , ParseInvalidProcessingMode ) { <nl> ProcessingMode mode ; <nl> - Status s = ParseProcessingMode ( " invalid " , & mode ) ; <nl> + Status s = ParseProcessingMode ( " invalid " , mode ) ; <nl> EXPECT_EQ ( s . code ( ) , error : : Code : : INVALID_ARGUMENT ) ; <nl> } <nl> <nl> TEST ( DataService , GetWorkers ) { <nl> DataServiceDispatcherClient dispatcher ( cluster . DispatcherAddress ( ) , <nl> kProtocol ) ; <nl> std : : vector < WorkerInfo > workers ; <nl> - TF_EXPECT_OK ( dispatcher . GetWorkers ( & workers ) ) ; <nl> + TF_EXPECT_OK ( dispatcher . GetWorkers ( workers ) ) ; <nl> EXPECT_EQ ( 1 , workers . size ( ) ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / data / service / dispatcher_impl . cc <nl> ppp b / tensorflow / core / data / service / dispatcher_impl . cc <nl> std : : string DatasetKey ( int64 id , uint64 fingerprint ) { <nl> } <nl> <nl> Status CreateWorkerStub ( const std : : string & address , const std : : string & protocol , <nl> - std : : unique_ptr < WorkerService : : Stub > * stub ) { <nl> + std : : unique_ptr < WorkerService : : Stub > & stub ) { <nl> : : grpc : : ChannelArguments args ; <nl> args . SetMaxReceiveMessageSize ( - 1 ) ; <nl> std : : shared_ptr < : : grpc : : ChannelCredentials > credentials ; <nl> TF_RETURN_IF_ERROR ( <nl> - CredentialsFactory : : CreateClientCredentials ( protocol , & credentials ) ) ; <nl> + CredentialsFactory : : CreateClientCredentials ( protocol , credentials ) ) ; <nl> auto channel = : : grpc : : CreateCustomChannel ( address , credentials , args ) ; <nl> - * stub = WorkerService : : NewStub ( channel ) ; <nl> + stub = WorkerService : : NewStub ( channel ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> } / / namespace <nl> Status DataServiceDispatcherImpl : : Start ( ) { <nl> Update update ; <nl> bool end_of_journal = false ; <nl> FileJournalReader reader ( Env : : Default ( ) , JournalDir ( config_ . work_dir ( ) ) ) ; <nl> - Status s = reader . Read ( & update , & end_of_journal ) ; <nl> + Status s = reader . Read ( update , end_of_journal ) ; <nl> if ( errors : : IsNotFound ( s ) ) { <nl> LOG ( INFO ) < < " No journal found . Starting dispatcher from new state . " ; <nl> } else if ( ! s . ok ( ) ) { <nl> Status DataServiceDispatcherImpl : : Start ( ) { <nl> } else { <nl> while ( ! end_of_journal ) { <nl> TF_RETURN_IF_ERROR ( ApplyWithoutJournaling ( update ) ) ; <nl> - TF_RETURN_IF_ERROR ( reader . Read ( & update , & end_of_journal ) ) ; <nl> + TF_RETURN_IF_ERROR ( reader . Read ( update , end_of_journal ) ) ; <nl> } <nl> } <nl> / / Initialize the journal writer in ` Start ` so that we fail fast in case it <nl> Status DataServiceDispatcherImpl : : RegisterWorker ( <nl> if ( it ! = tasks_by_job . end ( ) ) { <nl> task = it - > second ; <nl> } else { <nl> - TF_RETURN_IF_ERROR ( CreateTask ( job , worker_address , & task ) ) ; <nl> + TF_RETURN_IF_ERROR ( CreateTask ( job , worker_address , task ) ) ; <nl> } <nl> TaskDef * task_def = response - > add_tasks ( ) ; <nl> std : : shared_ptr < const Dataset > dataset ; <nl> - TF_RETURN_IF_ERROR ( state_ . DatasetFromId ( job - > dataset_id , & dataset ) ) ; <nl> + TF_RETURN_IF_ERROR ( state_ . DatasetFromId ( job - > dataset_id , dataset ) ) ; <nl> std : : string dataset_key = <nl> DatasetKey ( dataset - > dataset_id , dataset - > fingerprint ) ; <nl> if ( config_ . work_dir ( ) . empty ( ) ) { <nl> Status DataServiceDispatcherImpl : : WorkerUpdate ( <nl> for ( auto & update : request - > updates ( ) ) { <nl> int64 task_id = update . task_id ( ) ; <nl> std : : shared_ptr < const Task > task ; <nl> - TF_RETURN_IF_ERROR ( state_ . TaskFromId ( task_id , & task ) ) ; <nl> + TF_RETURN_IF_ERROR ( state_ . TaskFromId ( task_id , task ) ) ; <nl> if ( update . completed ( ) ) { <nl> if ( task - > finished ) { <nl> VLOG ( 1 ) < < " Received completion update for already - finished task " <nl> Status DataServiceDispatcherImpl : : GetDatasetDef ( <nl> const GetDatasetDefRequest * request , GetDatasetDefResponse * response ) { <nl> mutex_lock l ( mu_ ) ; <nl> std : : shared_ptr < const Dataset > dataset ; <nl> - TF_RETURN_IF_ERROR ( state_ . DatasetFromId ( request - > dataset_id ( ) , & dataset ) ) ; <nl> + TF_RETURN_IF_ERROR ( state_ . DatasetFromId ( request - > dataset_id ( ) , dataset ) ) ; <nl> std : : string key = DatasetKey ( dataset - > dataset_id , dataset - > fingerprint ) ; <nl> std : : shared_ptr < const DatasetDef > dataset_def ; <nl> TF_RETURN_IF_ERROR ( dataset_store_ - > Get ( key , dataset_def ) ) ; <nl> Status DataServiceDispatcherImpl : : GetOrRegisterDataset ( <nl> VLOG ( 4 ) < < " Registering dataset graph : " < < graph . DebugString ( ) ; <nl> # endif <nl> std : : shared_ptr < const Dataset > dataset ; <nl> - Status s = state_ . DatasetFromFingerprint ( fingerprint , & dataset ) ; <nl> + Status s = state_ . DatasetFromFingerprint ( fingerprint , dataset ) ; <nl> if ( s . ok ( ) ) { <nl> int64 id = dataset - > dataset_id ; <nl> VLOG ( 3 ) < < " Received duplicate RegisterDataset request with fingerprint " <nl> Status DataServiceDispatcherImpl : : GetOrRegisterDataset ( <nl> } <nl> <nl> int64 id ; <nl> - TF_RETURN_IF_ERROR ( RegisterDataset ( fingerprint , request - > dataset ( ) , & id ) ) ; <nl> + TF_RETURN_IF_ERROR ( RegisterDataset ( fingerprint , request - > dataset ( ) , id ) ) ; <nl> response - > set_dataset_id ( id ) ; <nl> VLOG ( 3 ) < < " Registered new dataset with id " < < id ; <nl> return Status : : OK ( ) ; <nl> Status DataServiceDispatcherImpl : : GetOrRegisterDataset ( <nl> <nl> Status DataServiceDispatcherImpl : : RegisterDataset ( uint64 fingerprint , <nl> const DatasetDef & dataset , <nl> - int64 * dataset_id ) <nl> + int64 & dataset_id ) <nl> EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) { <nl> - * dataset_id = state_ . NextAvailableDatasetId ( ) ; <nl> + dataset_id = state_ . NextAvailableDatasetId ( ) ; <nl> Update update ; <nl> RegisterDatasetUpdate * register_dataset = update . mutable_register_dataset ( ) ; <nl> - register_dataset - > set_dataset_id ( * dataset_id ) ; <nl> + register_dataset - > set_dataset_id ( dataset_id ) ; <nl> register_dataset - > set_fingerprint ( fingerprint ) ; <nl> TF_RETURN_IF_ERROR ( <nl> - dataset_store_ - > Put ( DatasetKey ( * dataset_id , fingerprint ) , dataset ) ) ; <nl> + dataset_store_ - > Put ( DatasetKey ( dataset_id , fingerprint ) , dataset ) ) ; <nl> return Apply ( update ) ; <nl> } <nl> <nl> Status DataServiceDispatcherImpl : : CreateJob ( const CreateJobRequest * request , <nl> { <nl> mutex_lock l ( mu_ ) ; <nl> TF_RETURN_IF_ERROR ( CreateJob ( request - > dataset_id ( ) , processing_mode , <nl> - absl : : optional < NamedJobKey > ( ) , & job ) ) ; <nl> + absl : : optional < NamedJobKey > ( ) , job ) ) ; <nl> int64 job_client_id ; <nl> TF_RETURN_IF_ERROR ( AcquireJobClientId ( job , job_client_id ) ) ; <nl> response - > set_job_client_id ( job_client_id ) ; <nl> - TF_RETURN_IF_ERROR ( CreateTasksForJob ( job , & tasks ) ) ; <nl> + TF_RETURN_IF_ERROR ( CreateTasksForJob ( job , tasks ) ) ; <nl> } <nl> TF_RETURN_IF_ERROR ( AssignTasks ( tasks ) ) ; <nl> <nl> Status DataServiceDispatcherImpl : : GetOrCreateJob ( <nl> std : : vector < std : : shared_ptr < const Task > > tasks ; <nl> { <nl> mutex_lock l ( mu_ ) ; <nl> - Status s = state_ . NamedJobByKey ( key , & job ) ; <nl> + Status s = state_ . NamedJobByKey ( key , job ) ; <nl> if ( s . ok ( ) ) { <nl> TF_RETURN_IF_ERROR ( ValidateMatchingJob ( job , requested_processing_mode , <nl> request - > dataset_id ( ) ) ) ; <nl> Status DataServiceDispatcherImpl : : GetOrCreateJob ( <nl> return s ; <nl> } <nl> TF_RETURN_IF_ERROR ( <nl> - CreateJob ( request - > dataset_id ( ) , requested_processing_mode , key , & job ) ) ; <nl> + CreateJob ( request - > dataset_id ( ) , requested_processing_mode , key , job ) ) ; <nl> int64 job_client_id ; <nl> TF_RETURN_IF_ERROR ( AcquireJobClientId ( job , job_client_id ) ) ; <nl> response - > set_job_client_id ( job_client_id ) ; <nl> - TF_RETURN_IF_ERROR ( CreateTasksForJob ( job , & tasks ) ) ; <nl> + TF_RETURN_IF_ERROR ( CreateTasksForJob ( job , tasks ) ) ; <nl> } <nl> TF_RETURN_IF_ERROR ( AssignTasks ( tasks ) ) ; <nl> VLOG ( 3 ) < < " Created job " < < job - > job_id < < " for dataset " <nl> Status DataServiceDispatcherImpl : : ValidateMatchingJob ( <nl> <nl> Status DataServiceDispatcherImpl : : CreateJob ( <nl> int64 dataset_id , ProcessingMode processing_mode , <nl> - absl : : optional < NamedJobKey > named_job_key , std : : shared_ptr < const Job > * job ) <nl> + absl : : optional < NamedJobKey > named_job_key , std : : shared_ptr < const Job > & job ) <nl> EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) { <nl> switch ( processing_mode ) { <nl> case ProcessingMode : : PARALLEL_EPOCHS : <nl> Status DataServiceDispatcherImpl : : AcquireJobClientId ( <nl> <nl> Status DataServiceDispatcherImpl : : CreateTasksForJob ( <nl> std : : shared_ptr < const Job > job , <nl> - std : : vector < std : : shared_ptr < const Task > > * tasks ) <nl> + std : : vector < std : : shared_ptr < const Task > > & tasks ) <nl> EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) { <nl> std : : vector < std : : shared_ptr < const Worker > > workers = state_ . ListWorkers ( ) ; <nl> - tasks - > clear ( ) ; <nl> - tasks - > reserve ( workers . size ( ) ) ; <nl> + tasks . clear ( ) ; <nl> + tasks . reserve ( workers . size ( ) ) ; <nl> for ( const auto & worker : workers ) { <nl> std : : shared_ptr < const Task > task ; <nl> - TF_RETURN_IF_ERROR ( CreateTask ( job , worker - > address , & task ) ) ; <nl> - tasks - > push_back ( task ) ; <nl> + TF_RETURN_IF_ERROR ( CreateTask ( job , worker - > address , task ) ) ; <nl> + tasks . push_back ( task ) ; <nl> } <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status DataServiceDispatcherImpl : : CreateTask ( std : : shared_ptr < const Job > job , <nl> const std : : string & worker_address , <nl> - std : : shared_ptr < const Task > * task ) <nl> + std : : shared_ptr < const Task > & task ) <nl> EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) { <nl> int64 task_id = state_ . NextAvailableTaskId ( ) ; <nl> Update update ; <nl> Status DataServiceDispatcherImpl : : AssignTasks ( <nl> } <nl> <nl> Status DataServiceDispatcherImpl : : GetOrCreateWorkerStub ( <nl> - const std : : string & worker_address , WorkerService : : Stub * * out_stub ) <nl> + const std : : string & worker_address , WorkerService : : Stub * & out_stub ) <nl> LOCKS_EXCLUDED ( mu_ ) { <nl> { <nl> mutex_lock l ( mu_ ) ; <nl> auto it = worker_stubs_ . find ( worker_address ) ; <nl> if ( it ! = worker_stubs_ . end ( ) ) { <nl> - * out_stub = it - > second . get ( ) ; <nl> + out_stub = it - > second . get ( ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> } <nl> std : : unique_ptr < WorkerService : : Stub > stub ; <nl> TF_RETURN_IF_ERROR ( <nl> - CreateWorkerStub ( worker_address , config_ . protocol ( ) , & stub ) ) ; <nl> + CreateWorkerStub ( worker_address , config_ . protocol ( ) , stub ) ) ; <nl> { <nl> mutex_lock l ( mu_ ) ; <nl> / / A concurrent call could have already created the stub . <nl> Status DataServiceDispatcherImpl : : GetOrCreateWorkerStub ( <nl> if ( worker = = nullptr ) { <nl> worker = std : : move ( stub ) ; <nl> } <nl> - * out_stub = worker . get ( ) ; <nl> + out_stub = worker . get ( ) ; <nl> } <nl> return Status : : OK ( ) ; <nl> } <nl> Status DataServiceDispatcherImpl : : AssignTask ( std : : shared_ptr < const Task > task ) <nl> { <nl> mutex_lock l ( mu_ ) ; <nl> std : : shared_ptr < const Dataset > dataset ; <nl> - TF_RETURN_IF_ERROR ( state_ . DatasetFromId ( task - > dataset_id , & dataset ) ) ; <nl> + TF_RETURN_IF_ERROR ( state_ . DatasetFromId ( task - > dataset_id , dataset ) ) ; <nl> std : : string dataset_key = <nl> DatasetKey ( dataset - > dataset_id , dataset - > fingerprint ) ; <nl> if ( config_ . work_dir ( ) . empty ( ) ) { <nl> Status DataServiceDispatcherImpl : : AssignTask ( std : : shared_ptr < const Task > task ) <nl> task_def - > set_task_id ( task - > task_id ) ; <nl> ProcessTaskResponse resp ; <nl> WorkerService : : Stub * stub ; <nl> - TF_RETURN_IF_ERROR ( GetOrCreateWorkerStub ( task - > worker_address , & stub ) ) ; <nl> + TF_RETURN_IF_ERROR ( GetOrCreateWorkerStub ( task - > worker_address , stub ) ) ; <nl> grpc : : Status s = stub - > ProcessTask ( & client_ctx , req , & resp ) ; <nl> if ( ! s . ok ( ) ) { <nl> return grpc_util : : WrapError ( <nl> Status DataServiceDispatcherImpl : : GetTasks ( const GetTasksRequest * request , <nl> std : : shared_ptr < const Job > job ; <nl> TF_RETURN_IF_ERROR ( state_ . JobForJobClientId ( request - > job_client_id ( ) , job ) ) ; <nl> std : : vector < std : : shared_ptr < const Task > > tasks ; <nl> - TF_RETURN_IF_ERROR ( state_ . TasksForJob ( job - > job_id , & tasks ) ) ; <nl> + TF_RETURN_IF_ERROR ( state_ . TasksForJob ( job - > job_id , tasks ) ) ; <nl> for ( const auto & task : tasks ) { <nl> TaskInfo * task_info = response - > mutable_task_info ( ) - > Add ( ) ; <nl> task_info - > set_worker_address ( task - > worker_address ) ; <nl> mmm a / tensorflow / core / data / service / dispatcher_impl . h <nl> ppp b / tensorflow / core / data / service / dispatcher_impl . h <nl> class DataServiceDispatcherImpl { <nl> <nl> private : <nl> / / Registers a dataset with the given fingerprint , storing the new dataset ' s <nl> - / / id in ` * dataset - id ` . <nl> + / / id in ` dataset_id ` . <nl> Status RegisterDataset ( uint64 fingerprint , const DatasetDef & dataset , <nl> - int64 * dataset_id ) EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) ; <nl> + int64 & dataset_id ) EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) ; <nl> / / Gets a worker ' s stub from ` worker_stubs_ ` , or if none exists , creates a <nl> - / / stub and stores it in ` worker_stubs_ ` . <nl> + / / stub and stores it in ` worker_stubs_ ` . A borrowed pointer to the stub is <nl> + / / stored in ` out_stub ` . <nl> Status GetOrCreateWorkerStub ( const std : : string & worker_address , <nl> - WorkerService : : Stub * * out_stub ) <nl> + WorkerService : : Stub * & out_stub ) <nl> LOCKS_EXCLUDED ( mu_ ) ; <nl> - / / Creates a job and stores it in ` * job ` . This method updates the <nl> + / / Creates a job and stores it in ` job ` . This method updates the <nl> / / dispatcher state with the new job , but does not assign tasks to workers . <nl> Status CreateJob ( int64 dataset_id , ProcessingMode processing_mode , <nl> absl : : optional < DispatcherState : : NamedJobKey > named_job_key , <nl> - std : : shared_ptr < const DispatcherState : : Job > * job ) <nl> + std : : shared_ptr < const DispatcherState : : Job > & job ) <nl> EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) ; <nl> / / Acquires a job client id to read from the given job and sets <nl> / / ` job_client_id ` . <nl> class DataServiceDispatcherImpl { <nl> const std : : shared_ptr < const DispatcherState : : Job > & job , <nl> int64 & job_client_id ) EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) ; <nl> / / Creates one task for each worker , for the given job . The created tasks are <nl> - / / stored in ` * tasks ` . This method only updates dispatcher metadata with the <nl> + / / stored in ` tasks ` . This method only updates dispatcher metadata with the <nl> / / new tasks , but doesn ' t assign the tasks to the workers . <nl> Status CreateTasksForJob ( <nl> std : : shared_ptr < const DispatcherState : : Job > job , <nl> - std : : vector < std : : shared_ptr < const DispatcherState : : Task > > * tasks ) <nl> + std : : vector < std : : shared_ptr < const DispatcherState : : Task > > & tasks ) <nl> EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) ; <nl> <nl> - / / Creates a new task for a job , storing the created task in ` * task ` . <nl> + / / Creates a new task for a job , storing the created task in ` task ` . <nl> Status CreateTask ( std : : shared_ptr < const DispatcherState : : Job > job , <nl> const std : : string & worker_address , <nl> - std : : shared_ptr < const DispatcherState : : Task > * task ) ; <nl> + std : : shared_ptr < const DispatcherState : : Task > & task ) ; <nl> / / Assigns the list of tasks to the workers indicated by their <nl> / / ` worker_address ` fields . <nl> Status AssignTasks ( <nl> mmm a / tensorflow / core / data / service / dispatcher_state . cc <nl> ppp b / tensorflow / core / data / service / dispatcher_state . cc <nl> namespace data { <nl> <nl> DispatcherState : : DispatcherState ( ) { } <nl> <nl> - Status DispatcherState : : Apply ( Update update ) { <nl> + Status DispatcherState : : Apply ( const Update & update ) { <nl> switch ( update . update_type_case ( ) ) { <nl> case Update : : kRegisterDataset : <nl> RegisterDataset ( update . register_dataset ( ) ) ; <nl> int64 DispatcherState : : NextAvailableDatasetId ( ) const { <nl> } <nl> <nl> Status DispatcherState : : DatasetFromId ( <nl> - int64 id , std : : shared_ptr < const Dataset > * dataset ) const { <nl> + int64 id , std : : shared_ptr < const Dataset > & dataset ) const { <nl> auto it = datasets_by_id_ . find ( id ) ; <nl> if ( it = = datasets_by_id_ . end ( ) ) { <nl> return errors : : NotFound ( " Dataset id " , id , " not found " ) ; <nl> } <nl> - * dataset = it - > second ; <nl> + dataset = it - > second ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status DispatcherState : : DatasetFromFingerprint ( <nl> - uint64 fingerprint , std : : shared_ptr < const Dataset > * dataset ) const { <nl> + uint64 fingerprint , std : : shared_ptr < const Dataset > & dataset ) const { <nl> auto it = datasets_by_fingerprint_ . find ( fingerprint ) ; <nl> if ( it = = datasets_by_fingerprint_ . end ( ) ) { <nl> return errors : : NotFound ( " Dataset fingerprint " , fingerprint , " not found " ) ; <nl> } <nl> - * dataset = it - > second ; <nl> + dataset = it - > second ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status DispatcherState : : WorkerFromAddress ( <nl> - const std : : string & address , std : : shared_ptr < const Worker > * worker ) const { <nl> + const std : : string & address , std : : shared_ptr < const Worker > & worker ) const { <nl> auto it = workers_ . find ( address ) ; <nl> if ( it = = workers_ . end ( ) ) { <nl> return errors : : NotFound ( " Worker with address " , address , " not found . " ) ; <nl> } <nl> - * worker = it - > second ; <nl> + worker = it - > second ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> DispatcherState : : ListJobs ( ) { <nl> } <nl> <nl> Status DispatcherState : : JobFromId ( int64 id , <nl> - std : : shared_ptr < const Job > * job ) const { <nl> + std : : shared_ptr < const Job > & job ) const { <nl> auto it = jobs_ . find ( id ) ; <nl> if ( it = = jobs_ . end ( ) ) { <nl> return errors : : NotFound ( " Job id " , id , " not found " ) ; <nl> } <nl> - * job = it - > second ; <nl> + job = it - > second ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status DispatcherState : : NamedJobByKey ( NamedJobKey named_job_key , <nl> - std : : shared_ptr < const Job > * job ) const { <nl> + std : : shared_ptr < const Job > & job ) const { <nl> auto it = named_jobs_ . find ( named_job_key ) ; <nl> if ( it = = named_jobs_ . end ( ) ) { <nl> return errors : : NotFound ( " Named job key ( " , named_job_key . name , " , " , <nl> named_job_key . index , " ) not found " ) ; <nl> } <nl> - * job = it - > second ; <nl> + job = it - > second ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> int64 DispatcherState : : NextAvailableJobClientId ( ) const { <nl> } <nl> <nl> Status DispatcherState : : TaskFromId ( int64 id , <nl> - std : : shared_ptr < const Task > * task ) const { <nl> + std : : shared_ptr < const Task > & task ) const { <nl> auto it = tasks_ . find ( id ) ; <nl> if ( it = = tasks_ . end ( ) ) { <nl> return errors : : NotFound ( " Task " , id , " not found " ) ; <nl> } <nl> - * task = it - > second ; <nl> + task = it - > second ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status DispatcherState : : TasksForJob ( <nl> - int64 job_id , std : : vector < std : : shared_ptr < const Task > > * tasks ) const { <nl> + int64 job_id , std : : vector < std : : shared_ptr < const Task > > & tasks ) const { <nl> auto it = tasks_by_job_ . find ( job_id ) ; <nl> if ( it = = tasks_by_job_ . end ( ) ) { <nl> return errors : : NotFound ( " Job " , job_id , " not found " ) ; <nl> } <nl> - tasks - > clear ( ) ; <nl> - tasks - > reserve ( it - > second . size ( ) ) ; <nl> + tasks . clear ( ) ; <nl> + tasks . reserve ( it - > second . size ( ) ) ; <nl> for ( const auto & task : it - > second ) { <nl> - tasks - > push_back ( task ) ; <nl> + tasks . push_back ( task ) ; <nl> } <nl> return Status : : OK ( ) ; <nl> } <nl> mmm a / tensorflow / core / data / service / dispatcher_state . h <nl> ppp b / tensorflow / core / data / service / dispatcher_state . h <nl> class DispatcherState { <nl> DispatcherState & operator = ( const DispatcherState & ) = delete ; <nl> <nl> / / Applies the given update to the dispatcher ' s state . <nl> - Status Apply ( Update update ) ; <nl> + Status Apply ( const Update & update ) ; <nl> <nl> / / A dataset registered with the dispatcher . <nl> struct Dataset { <nl> class DispatcherState { <nl> / / Returns the next available dataset id . <nl> int64 NextAvailableDatasetId ( ) const ; <nl> / / Gets a dataset by id . Returns NOT_FOUND if there is no such dataset . <nl> - Status DatasetFromId ( int64 id , std : : shared_ptr < const Dataset > * dataset ) const ; <nl> + Status DatasetFromId ( int64 id , std : : shared_ptr < const Dataset > & dataset ) const ; <nl> / / Gets a dataset by fingerprint . Returns NOT_FOUND if there is no such <nl> / / dataset . <nl> Status DatasetFromFingerprint ( uint64 fingerprint , <nl> - std : : shared_ptr < const Dataset > * dataset ) const ; <nl> + std : : shared_ptr < const Dataset > & dataset ) const ; <nl> <nl> / / Gets a worker by address . Returns NOT_FOUND if there is no such worker . <nl> Status WorkerFromAddress ( const std : : string & address , <nl> - std : : shared_ptr < const Worker > * worker ) const ; <nl> + std : : shared_ptr < const Worker > & worker ) const ; <nl> / / Lists all workers registered with the dispatcher . <nl> std : : vector < std : : shared_ptr < const Worker > > ListWorkers ( ) const ; <nl> <nl> class DispatcherState { <nl> / / Returns a list of all jobs . <nl> std : : vector < std : : shared_ptr < const Job > > ListJobs ( ) ; <nl> / / Gets a job by id . Returns NOT_FOUND if there is no such job . <nl> - Status JobFromId ( int64 id , std : : shared_ptr < const Job > * job ) const ; <nl> + Status JobFromId ( int64 id , std : : shared_ptr < const Job > & job ) const ; <nl> / / Gets a named job by key . Returns NOT_FOUND if there is no such job . <nl> - Status NamedJobByKey ( NamedJobKey key , std : : shared_ptr < const Job > * job ) const ; <nl> + Status NamedJobByKey ( NamedJobKey key , std : : shared_ptr < const Job > & job ) const ; <nl> <nl> / / Returns the job associated with the given job client id . Returns NOT_FOUND <nl> / / if the job_client_id is unknown or has been released . <nl> class DispatcherState { <nl> / / Returns the next available task id . <nl> int64 NextAvailableTaskId ( ) const ; <nl> / / Gets a task by id . Returns NOT_FOUND if there is no such task . <nl> - Status TaskFromId ( int64 id , std : : shared_ptr < const Task > * task ) const ; <nl> - / / Stores a list of all tasks for the given job to ` * tasks ` . Returns NOT_FOUND <nl> + Status TaskFromId ( int64 id , std : : shared_ptr < const Task > & task ) const ; <nl> + / / Stores a list of all tasks for the given job to ` tasks ` . Returns NOT_FOUND <nl> / / if there is no such job . <nl> Status TasksForJob ( int64 job_id , <nl> - std : : vector < std : : shared_ptr < const Task > > * tasks ) const ; <nl> - / / Stores a list of all tasks for the given worker to ` * tasks ` . Returns <nl> + std : : vector < std : : shared_ptr < const Task > > & tasks ) const ; <nl> + / / Stores a list of all tasks for the given worker to ` tasks ` . Returns <nl> / / NOT_FOUND if there is no such worker . <nl> Status TasksForWorker ( const absl : : string_view worker_address , <nl> std : : vector < std : : shared_ptr < const Task > > & tasks ) const ; <nl> mmm a / tensorflow / core / data / service / dispatcher_state_test . cc <nl> ppp b / tensorflow / core / data / service / dispatcher_state_test . cc <nl> using Task = DispatcherState : : Task ; <nl> using : : testing : : IsEmpty ; <nl> using : : testing : : SizeIs ; <nl> <nl> - Status RegisterDataset ( int64 id , uint64 fingerprint , DispatcherState * state ) { <nl> + Status RegisterDataset ( int64 id , uint64 fingerprint , DispatcherState & state ) { <nl> Update update ; <nl> RegisterDatasetUpdate * register_dataset = update . mutable_register_dataset ( ) ; <nl> register_dataset - > set_dataset_id ( id ) ; <nl> register_dataset - > set_fingerprint ( fingerprint ) ; <nl> - TF_RETURN_IF_ERROR ( state - > Apply ( update ) ) ; <nl> + TF_RETURN_IF_ERROR ( state . Apply ( update ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - Status RegisterDataset ( int64 id , DispatcherState * state ) { <nl> + Status RegisterDataset ( int64 id , DispatcherState & state ) { <nl> return RegisterDataset ( id , / * fingerprint = * / 1 , state ) ; <nl> } <nl> <nl> - Status RegisterWorker ( std : : string worker_address , DispatcherState * state ) { <nl> + Status RegisterWorker ( std : : string worker_address , DispatcherState & state ) { <nl> Update update ; <nl> update . mutable_register_worker ( ) - > set_worker_address ( worker_address ) ; <nl> - TF_RETURN_IF_ERROR ( state - > Apply ( update ) ) ; <nl> + TF_RETURN_IF_ERROR ( state . Apply ( update ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status CreateAnonymousJob ( int64 job_id , int64 dataset_id , <nl> - DispatcherState * state ) { <nl> + DispatcherState & state ) { <nl> Update update ; <nl> CreateJobUpdate * create_job = update . mutable_create_job ( ) ; <nl> create_job - > set_job_id ( job_id ) ; <nl> create_job - > set_dataset_id ( dataset_id ) ; <nl> create_job - > set_processing_mode ( ProcessingModeDef : : PARALLEL_EPOCHS ) ; <nl> - TF_RETURN_IF_ERROR ( state - > Apply ( update ) ) ; <nl> + TF_RETURN_IF_ERROR ( state . Apply ( update ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status CreateNamedJob ( int64 job_id , int64 dataset_id , NamedJobKey named_job_key , <nl> - DispatcherState * state ) { <nl> + DispatcherState & state ) { <nl> Update update ; <nl> CreateJobUpdate * create_job = update . mutable_create_job ( ) ; <nl> create_job - > set_job_id ( job_id ) ; <nl> Status CreateNamedJob ( int64 job_id , int64 dataset_id , NamedJobKey named_job_key , <nl> NamedJobKeyDef * key = create_job - > mutable_named_job_key ( ) ; <nl> key - > set_name ( named_job_key . name ) ; <nl> key - > set_index ( named_job_key . index ) ; <nl> - TF_RETURN_IF_ERROR ( state - > Apply ( update ) ) ; <nl> + TF_RETURN_IF_ERROR ( state . Apply ( update ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status AcquireJobClientId ( int64 job_id , int64 job_client_id , <nl> - DispatcherState * state ) { <nl> + DispatcherState & state ) { <nl> Update update ; <nl> AcquireJobClientUpdate * acquire_job_client = <nl> update . mutable_acquire_job_client ( ) ; <nl> acquire_job_client - > set_job_id ( job_id ) ; <nl> acquire_job_client - > set_job_client_id ( job_client_id ) ; <nl> - TF_RETURN_IF_ERROR ( state - > Apply ( update ) ) ; <nl> + TF_RETURN_IF_ERROR ( state . Apply ( update ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status ReleaseJobClientId ( int64 job_client_id , int64 release_time , <nl> - DispatcherState * state ) { <nl> + DispatcherState & state ) { <nl> Update update ; <nl> ReleaseJobClientUpdate * release_job_client = <nl> update . mutable_release_job_client ( ) ; <nl> release_job_client - > set_job_client_id ( job_client_id ) ; <nl> release_job_client - > set_time_micros ( release_time ) ; <nl> - TF_RETURN_IF_ERROR ( state - > Apply ( update ) ) ; <nl> + TF_RETURN_IF_ERROR ( state . Apply ( update ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status CreateTask ( int64 task_id , int64 job_id , int64 dataset_id , <nl> - const std : : string & worker_address , DispatcherState * state ) { <nl> + const std : : string & worker_address , DispatcherState & state ) { <nl> Update update ; <nl> CreateTaskUpdate * create_task = update . mutable_create_task ( ) ; <nl> create_task - > set_task_id ( task_id ) ; <nl> create_task - > set_job_id ( job_id ) ; <nl> create_task - > set_dataset_id ( dataset_id ) ; <nl> create_task - > set_worker_address ( worker_address ) ; <nl> - TF_RETURN_IF_ERROR ( state - > Apply ( update ) ) ; <nl> + TF_RETURN_IF_ERROR ( state . Apply ( update ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - Status FinishTask ( int64 task_id , DispatcherState * state ) { <nl> + Status FinishTask ( int64 task_id , DispatcherState & state ) { <nl> Update update ; <nl> FinishTaskUpdate * finish_task = update . mutable_finish_task ( ) ; <nl> finish_task - > set_task_id ( task_id ) ; <nl> - TF_RETURN_IF_ERROR ( state - > Apply ( update ) ) ; <nl> + TF_RETURN_IF_ERROR ( state . Apply ( update ) ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> } / / namespace <nl> TEST ( DispatcherState , RegisterDataset ) { <nl> int64 id = 10 ; <nl> uint64 fingerprint = 20 ; <nl> DispatcherState state ; <nl> - TF_EXPECT_OK ( RegisterDataset ( id , fingerprint , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterDataset ( id , fingerprint , state ) ) ; <nl> EXPECT_EQ ( state . NextAvailableDatasetId ( ) , id + 1 ) ; <nl> <nl> { <nl> std : : shared_ptr < const Dataset > dataset ; <nl> - TF_EXPECT_OK ( state . DatasetFromFingerprint ( fingerprint , & dataset ) ) ; <nl> + TF_EXPECT_OK ( state . DatasetFromFingerprint ( fingerprint , dataset ) ) ; <nl> EXPECT_EQ ( dataset - > dataset_id , id ) ; <nl> } <nl> { <nl> std : : shared_ptr < const Dataset > dataset ; <nl> - TF_EXPECT_OK ( state . DatasetFromId ( id , & dataset ) ) ; <nl> + TF_EXPECT_OK ( state . DatasetFromId ( id , dataset ) ) ; <nl> EXPECT_EQ ( dataset - > fingerprint , fingerprint ) ; <nl> } <nl> } <nl> TEST ( DispatcherState , RegisterDataset ) { <nl> TEST ( DispatcherState , MissingDatasetId ) { <nl> DispatcherState state ; <nl> std : : shared_ptr < const Dataset > dataset ; <nl> - Status s = state . DatasetFromId ( 0 , & dataset ) ; <nl> + Status s = state . DatasetFromId ( 0 , dataset ) ; <nl> EXPECT_EQ ( s . code ( ) , error : : NOT_FOUND ) ; <nl> } <nl> <nl> TEST ( DispatcherState , MissingDatasetFingerprint ) { <nl> DispatcherState state ; <nl> std : : shared_ptr < const Dataset > dataset ; <nl> - Status s = state . DatasetFromFingerprint ( 0 , & dataset ) ; <nl> + Status s = state . DatasetFromFingerprint ( 0 , dataset ) ; <nl> EXPECT_EQ ( s . code ( ) , error : : NOT_FOUND ) ; <nl> } <nl> <nl> TEST ( DispatcherState , NextAvailableDatasetId ) { <nl> DispatcherState state ; <nl> int64 id = state . NextAvailableDatasetId ( ) ; <nl> uint64 fingerprint = 20 ; <nl> - TF_EXPECT_OK ( RegisterDataset ( id , fingerprint , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterDataset ( id , fingerprint , state ) ) ; <nl> EXPECT_NE ( state . NextAvailableDatasetId ( ) , id ) ; <nl> EXPECT_EQ ( state . NextAvailableDatasetId ( ) , state . NextAvailableDatasetId ( ) ) ; <nl> } <nl> TEST ( DispatcherState , NextAvailableDatasetId ) { <nl> TEST ( DispatcherState , RegisterWorker ) { <nl> DispatcherState state ; <nl> std : : string address = " test_worker_address " ; <nl> - TF_EXPECT_OK ( RegisterWorker ( address , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterWorker ( address , state ) ) ; <nl> std : : shared_ptr < const Worker > worker ; <nl> - TF_EXPECT_OK ( state . WorkerFromAddress ( address , & worker ) ) ; <nl> + TF_EXPECT_OK ( state . WorkerFromAddress ( address , worker ) ) ; <nl> EXPECT_EQ ( worker - > address , address ) ; <nl> } <nl> <nl> TEST ( DispatcherState , ListWorkers ) { <nl> std : : vector < std : : shared_ptr < const Worker > > workers = state . ListWorkers ( ) ; <nl> EXPECT_THAT ( workers , IsEmpty ( ) ) ; <nl> } <nl> - TF_EXPECT_OK ( RegisterWorker ( address_1 , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterWorker ( address_1 , state ) ) ; <nl> { <nl> std : : vector < std : : shared_ptr < const Worker > > workers = state . ListWorkers ( ) ; <nl> EXPECT_THAT ( workers , SizeIs ( 1 ) ) ; <nl> } <nl> - TF_EXPECT_OK ( RegisterWorker ( address_2 , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterWorker ( address_2 , state ) ) ; <nl> { <nl> std : : vector < std : : shared_ptr < const Worker > > workers = state . ListWorkers ( ) ; <nl> EXPECT_THAT ( workers , SizeIs ( 2 ) ) ; <nl> TEST ( DispatcherState , ListWorkers ) { <nl> TEST ( DispatcherState , MissingWorker ) { <nl> DispatcherState state ; <nl> std : : shared_ptr < const Worker > worker ; <nl> - Status s = state . WorkerFromAddress ( " test_worker_address " , & worker ) ; <nl> + Status s = state . WorkerFromAddress ( " test_worker_address " , worker ) ; <nl> EXPECT_EQ ( s . code ( ) , error : : NOT_FOUND ) ; <nl> } <nl> <nl> TEST ( DispatcherState , AnonymousJob ) { <nl> int64 job_id = 3 ; <nl> int64 dataset_id = 10 ; <nl> DispatcherState state ; <nl> - TF_EXPECT_OK ( RegisterDataset ( dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterDataset ( dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , state ) ) ; <nl> std : : shared_ptr < const Job > job ; <nl> - TF_EXPECT_OK ( state . JobFromId ( job_id , & job ) ) ; <nl> + TF_EXPECT_OK ( state . JobFromId ( job_id , job ) ) ; <nl> EXPECT_EQ ( state . NextAvailableJobId ( ) , job_id + 1 ) ; <nl> EXPECT_EQ ( job - > dataset_id , dataset_id ) ; <nl> EXPECT_EQ ( job - > job_id , job_id ) ; <nl> std : : vector < std : : shared_ptr < const Task > > tasks ; <nl> - TF_EXPECT_OK ( state . TasksForJob ( job_id , & tasks ) ) ; <nl> + TF_EXPECT_OK ( state . TasksForJob ( job_id , tasks ) ) ; <nl> EXPECT_THAT ( tasks , IsEmpty ( ) ) ; <nl> EXPECT_FALSE ( job - > finished ) ; <nl> } <nl> TEST ( DispatcherState , NamedJob ) { <nl> int64 job_id = 3 ; <nl> int64 dataset_id = 10 ; <nl> DispatcherState state ; <nl> - TF_EXPECT_OK ( RegisterDataset ( dataset_id , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterDataset ( dataset_id , state ) ) ; <nl> NamedJobKey named_job_key ( " test " , 1 ) ; <nl> - TF_EXPECT_OK ( CreateNamedJob ( job_id , dataset_id , named_job_key , & state ) ) ; <nl> + TF_EXPECT_OK ( CreateNamedJob ( job_id , dataset_id , named_job_key , state ) ) ; <nl> std : : shared_ptr < const Job > job ; <nl> - TF_EXPECT_OK ( state . NamedJobByKey ( named_job_key , & job ) ) ; <nl> + TF_EXPECT_OK ( state . NamedJobByKey ( named_job_key , job ) ) ; <nl> EXPECT_EQ ( state . NextAvailableJobId ( ) , job_id + 1 ) ; <nl> EXPECT_EQ ( job - > dataset_id , dataset_id ) ; <nl> EXPECT_EQ ( job - > job_id , job_id ) ; <nl> TEST ( DispatcherState , CreateTask ) { <nl> int64 task_id = 8 ; <nl> std : : string worker_address = " test_worker_address " ; <nl> DispatcherState state ; <nl> - TF_EXPECT_OK ( RegisterDataset ( dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( CreateTask ( task_id , job_id , dataset_id , worker_address , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterDataset ( dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( CreateTask ( task_id , job_id , dataset_id , worker_address , state ) ) ; <nl> EXPECT_EQ ( state . NextAvailableTaskId ( ) , task_id + 1 ) ; <nl> { <nl> std : : shared_ptr < const Task > task ; <nl> - TF_EXPECT_OK ( state . TaskFromId ( task_id , & task ) ) ; <nl> + TF_EXPECT_OK ( state . TaskFromId ( task_id , task ) ) ; <nl> EXPECT_EQ ( task - > task_id , task_id ) ; <nl> EXPECT_EQ ( task - > job_id , job_id ) ; <nl> EXPECT_EQ ( task - > dataset_id , dataset_id ) ; <nl> TEST ( DispatcherState , CreateTask ) { <nl> } <nl> { <nl> std : : vector < std : : shared_ptr < const Task > > tasks ; <nl> - TF_EXPECT_OK ( state . TasksForJob ( job_id , & tasks ) ) ; <nl> + TF_EXPECT_OK ( state . TasksForJob ( job_id , tasks ) ) ; <nl> EXPECT_THAT ( tasks , SizeIs ( 1 ) ) ; <nl> } <nl> { <nl> TEST ( DispatcherState , CreateTasksForSameJob ) { <nl> int64 task_id_2 = 9 ; <nl> std : : string worker_address = " test_worker_address " ; <nl> DispatcherState state ; <nl> - TF_EXPECT_OK ( RegisterDataset ( dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterDataset ( dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , state ) ) ; <nl> TF_EXPECT_OK ( <nl> - CreateTask ( task_id_1 , job_id , dataset_id , worker_address , & state ) ) ; <nl> + CreateTask ( task_id_1 , job_id , dataset_id , worker_address , state ) ) ; <nl> TF_EXPECT_OK ( <nl> - CreateTask ( task_id_2 , job_id , dataset_id , worker_address , & state ) ) ; <nl> + CreateTask ( task_id_2 , job_id , dataset_id , worker_address , state ) ) ; <nl> { <nl> std : : vector < std : : shared_ptr < const Task > > tasks ; <nl> - TF_EXPECT_OK ( state . TasksForJob ( job_id , & tasks ) ) ; <nl> + TF_EXPECT_OK ( state . TasksForJob ( job_id , tasks ) ) ; <nl> EXPECT_THAT ( tasks , SizeIs ( 2 ) ) ; <nl> } <nl> } <nl> TEST ( DispatcherState , CreateTasksForDifferentJobs ) { <nl> int64 task_id_2 = 9 ; <nl> std : : string worker_address = " test_worker_address " ; <nl> DispatcherState state ; <nl> - TF_EXPECT_OK ( RegisterDataset ( dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( CreateAnonymousJob ( job_id_1 , dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( CreateAnonymousJob ( job_id_2 , dataset_id , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterDataset ( dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( CreateAnonymousJob ( job_id_1 , dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( CreateAnonymousJob ( job_id_2 , dataset_id , state ) ) ; <nl> TF_EXPECT_OK ( <nl> - CreateTask ( task_id_1 , job_id_1 , dataset_id , worker_address , & state ) ) ; <nl> + CreateTask ( task_id_1 , job_id_1 , dataset_id , worker_address , state ) ) ; <nl> TF_EXPECT_OK ( <nl> - CreateTask ( task_id_2 , job_id_2 , dataset_id , worker_address , & state ) ) ; <nl> + CreateTask ( task_id_2 , job_id_2 , dataset_id , worker_address , state ) ) ; <nl> { <nl> std : : vector < std : : shared_ptr < const Task > > tasks ; <nl> - TF_EXPECT_OK ( state . TasksForJob ( job_id_1 , & tasks ) ) ; <nl> + TF_EXPECT_OK ( state . TasksForJob ( job_id_1 , tasks ) ) ; <nl> EXPECT_THAT ( tasks , SizeIs ( 1 ) ) ; <nl> } <nl> { <nl> std : : vector < std : : shared_ptr < const Task > > tasks ; <nl> - TF_EXPECT_OK ( state . TasksForJob ( job_id_2 , & tasks ) ) ; <nl> + TF_EXPECT_OK ( state . TasksForJob ( job_id_2 , tasks ) ) ; <nl> EXPECT_THAT ( tasks , SizeIs ( 1 ) ) ; <nl> } <nl> } <nl> TEST ( DispatcherState , CreateTasksForSameWorker ) { <nl> int64 task_id_2 = 9 ; <nl> std : : string worker_address = " test_worker_address " ; <nl> DispatcherState state ; <nl> - TF_EXPECT_OK ( RegisterDataset ( dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterDataset ( dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , state ) ) ; <nl> TF_EXPECT_OK ( <nl> - CreateTask ( task_id_1 , job_id , dataset_id , worker_address , & state ) ) ; <nl> + CreateTask ( task_id_1 , job_id , dataset_id , worker_address , state ) ) ; <nl> TF_EXPECT_OK ( <nl> - CreateTask ( task_id_2 , job_id , dataset_id , worker_address , & state ) ) ; <nl> + CreateTask ( task_id_2 , job_id , dataset_id , worker_address , state ) ) ; <nl> { <nl> std : : vector < std : : shared_ptr < const Task > > tasks ; <nl> TF_EXPECT_OK ( state . TasksForWorker ( worker_address , tasks ) ) ; <nl> TEST ( DispatcherState , CreateTasksForDifferentWorkers ) { <nl> std : : string worker_address_1 = " test_worker_address_1 " ; <nl> std : : string worker_address_2 = " test_worker_address_2 " ; <nl> DispatcherState state ; <nl> - TF_EXPECT_OK ( RegisterDataset ( dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterDataset ( dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , state ) ) ; <nl> TF_EXPECT_OK ( <nl> - CreateTask ( task_id_1 , job_id , dataset_id , worker_address_1 , & state ) ) ; <nl> + CreateTask ( task_id_1 , job_id , dataset_id , worker_address_1 , state ) ) ; <nl> TF_EXPECT_OK ( <nl> - CreateTask ( task_id_2 , job_id , dataset_id , worker_address_2 , & state ) ) ; <nl> + CreateTask ( task_id_2 , job_id , dataset_id , worker_address_2 , state ) ) ; <nl> { <nl> std : : vector < std : : shared_ptr < const Task > > tasks ; <nl> TF_EXPECT_OK ( state . TasksForWorker ( worker_address_1 , tasks ) ) ; <nl> TEST ( DispatcherState , CreateTasksForDifferentWorkers ) { <nl> TEST ( DispatcherState , GetTasksForWorkerEmpty ) { <nl> std : : string worker_address = " test_worker_address " ; <nl> DispatcherState state ; <nl> - TF_EXPECT_OK ( RegisterWorker ( worker_address , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterWorker ( worker_address , state ) ) ; <nl> { <nl> std : : vector < std : : shared_ptr < const Task > > tasks ; <nl> TF_EXPECT_OK ( state . TasksForWorker ( worker_address , tasks ) ) ; <nl> TEST ( DispatcherState , FinishTask ) { <nl> int64 task_id = 4 ; <nl> std : : string worker_address = " test_worker_address " ; <nl> DispatcherState state ; <nl> - TF_EXPECT_OK ( RegisterDataset ( dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( CreateTask ( task_id , job_id , dataset_id , worker_address , & state ) ) ; <nl> - TF_EXPECT_OK ( FinishTask ( task_id , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterDataset ( dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( CreateTask ( task_id , job_id , dataset_id , worker_address , state ) ) ; <nl> + TF_EXPECT_OK ( FinishTask ( task_id , state ) ) ; <nl> std : : shared_ptr < const Task > task ; <nl> - TF_EXPECT_OK ( state . TaskFromId ( task_id , & task ) ) ; <nl> + TF_EXPECT_OK ( state . TaskFromId ( task_id , task ) ) ; <nl> EXPECT_TRUE ( task - > finished ) ; <nl> std : : shared_ptr < const Job > job ; <nl> - TF_EXPECT_OK ( state . JobFromId ( job_id , & job ) ) ; <nl> + TF_EXPECT_OK ( state . JobFromId ( job_id , job ) ) ; <nl> EXPECT_TRUE ( job - > finished ) ; <nl> } <nl> <nl> TEST ( DispatcherState , FinishMultiTaskJob ) { <nl> int64 task_id_2 = 5 ; <nl> std : : string worker_address = " test_worker_address " ; <nl> DispatcherState state ; <nl> - TF_EXPECT_OK ( RegisterDataset ( dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterDataset ( dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , state ) ) ; <nl> TF_EXPECT_OK ( <nl> - CreateTask ( task_id_1 , job_id , dataset_id , worker_address , & state ) ) ; <nl> + CreateTask ( task_id_1 , job_id , dataset_id , worker_address , state ) ) ; <nl> TF_EXPECT_OK ( <nl> - CreateTask ( task_id_2 , job_id , dataset_id , worker_address , & state ) ) ; <nl> + CreateTask ( task_id_2 , job_id , dataset_id , worker_address , state ) ) ; <nl> <nl> - TF_EXPECT_OK ( FinishTask ( task_id_1 , & state ) ) ; <nl> + TF_EXPECT_OK ( FinishTask ( task_id_1 , state ) ) ; <nl> { <nl> std : : shared_ptr < const Job > job ; <nl> - TF_EXPECT_OK ( state . JobFromId ( job_id , & job ) ) ; <nl> + TF_EXPECT_OK ( state . JobFromId ( job_id , job ) ) ; <nl> EXPECT_FALSE ( job - > finished ) ; <nl> } <nl> <nl> - TF_EXPECT_OK ( FinishTask ( task_id_2 , & state ) ) ; <nl> + TF_EXPECT_OK ( FinishTask ( task_id_2 , state ) ) ; <nl> { <nl> std : : shared_ptr < const Job > job ; <nl> - TF_EXPECT_OK ( state . JobFromId ( job_id , & job ) ) ; <nl> + TF_EXPECT_OK ( state . JobFromId ( job_id , job ) ) ; <nl> EXPECT_TRUE ( job - > finished ) ; <nl> } <nl> } <nl> TEST ( DispatcherState , AcquireJobClientId ) { <nl> int64 job_client_id_2 = 2 ; <nl> int64 dataset_id = 10 ; <nl> DispatcherState state ; <nl> - TF_EXPECT_OK ( RegisterDataset ( dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( AcquireJobClientId ( job_id , job_client_id_1 , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterDataset ( dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( AcquireJobClientId ( job_id , job_client_id_1 , state ) ) ; <nl> { <nl> std : : shared_ptr < const Job > job ; <nl> - TF_EXPECT_OK ( state . JobFromId ( job_id , & job ) ) ; <nl> + TF_EXPECT_OK ( state . JobFromId ( job_id , job ) ) ; <nl> EXPECT_EQ ( job - > num_clients , 1 ) ; <nl> - TF_EXPECT_OK ( AcquireJobClientId ( job_id , job_client_id_2 , & state ) ) ; <nl> + TF_EXPECT_OK ( AcquireJobClientId ( job_id , job_client_id_2 , state ) ) ; <nl> EXPECT_EQ ( job - > num_clients , 2 ) ; <nl> } <nl> { <nl> TEST ( DispatcherState , ReleaseJobClientId ) { <nl> int64 job_client_id = 6 ; <nl> int64 release_time = 100 ; <nl> DispatcherState state ; <nl> - TF_EXPECT_OK ( RegisterDataset ( dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , & state ) ) ; <nl> - TF_EXPECT_OK ( AcquireJobClientId ( job_id , job_client_id , & state ) ) ; <nl> - TF_EXPECT_OK ( ReleaseJobClientId ( job_client_id , release_time , & state ) ) ; <nl> + TF_EXPECT_OK ( RegisterDataset ( dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( CreateAnonymousJob ( job_id , dataset_id , state ) ) ; <nl> + TF_EXPECT_OK ( AcquireJobClientId ( job_id , job_client_id , state ) ) ; <nl> + TF_EXPECT_OK ( ReleaseJobClientId ( job_client_id , release_time , state ) ) ; <nl> std : : shared_ptr < const Job > job ; <nl> - TF_EXPECT_OK ( state . JobFromId ( job_id , & job ) ) ; <nl> + TF_EXPECT_OK ( state . JobFromId ( job_id , job ) ) ; <nl> EXPECT_EQ ( job - > num_clients , 0 ) ; <nl> Status s = state . JobForJobClientId ( job_client_id , job ) ; <nl> EXPECT_EQ ( s . code ( ) , error : : NOT_FOUND ) ; <nl> mmm a / tensorflow / core / data / service / grpc_dispatcher_impl . cc <nl> ppp b / tensorflow / core / data / service / grpc_dispatcher_impl . cc <nl> using : : grpc : : ServerBuilder ; <nl> using : : grpc : : ServerContext ; <nl> <nl> GrpcDispatcherImpl : : GrpcDispatcherImpl ( <nl> - ServerBuilder * server_builder , const experimental : : DispatcherConfig & config ) <nl> + const experimental : : DispatcherConfig & config , ServerBuilder & server_builder ) <nl> : impl_ ( config ) { <nl> - server_builder - > RegisterService ( this ) ; <nl> + server_builder . RegisterService ( this ) ; <nl> VLOG ( 1 ) < < " Registered data service dispatcher " ; <nl> } <nl> <nl> mmm a / tensorflow / core / data / service / grpc_dispatcher_impl . h <nl> ppp b / tensorflow / core / data / service / grpc_dispatcher_impl . h <nl> namespace tensorflow { <nl> namespace data { <nl> <nl> / / This class is a wrapper that handles communication for gRPC . <nl> - / / <nl> - / / Example usage : <nl> - / / <nl> - / / : : grpc : : ServerBuilder builder ; <nl> - / / / / configure builder <nl> - / / GrpcDispatcherImpl data_service ( & builder ) ; <nl> - / / builder . BuildAndStart ( ) <nl> - / / <nl> class GrpcDispatcherImpl : public DispatcherService : : Service { <nl> public : <nl> - explicit GrpcDispatcherImpl ( : : grpc : : ServerBuilder * server_builder , <nl> - const experimental : : DispatcherConfig & config ) ; <nl> + / / Constructs a GrpcDispatcherImpl with the given config , and registers it <nl> + / / with ` server_builder ` . <nl> + explicit GrpcDispatcherImpl ( const experimental : : DispatcherConfig & config , <nl> + : : grpc : : ServerBuilder & server_builder ) ; <nl> ~ GrpcDispatcherImpl ( ) override { } <nl> <nl> Status Start ( ) ; <nl> mmm a / tensorflow / core / data / service / grpc_worker_impl . cc <nl> ppp b / tensorflow / core / data / service / grpc_worker_impl . cc <nl> namespace data { <nl> using : : grpc : : ServerBuilder ; <nl> using : : grpc : : ServerContext ; <nl> <nl> - GrpcWorkerImpl : : GrpcWorkerImpl ( ServerBuilder * server_builder , <nl> - const experimental : : WorkerConfig & config ) <nl> + GrpcWorkerImpl : : GrpcWorkerImpl ( const experimental : : WorkerConfig & config , <nl> + ServerBuilder & server_builder ) <nl> : impl_ ( config ) { <nl> - server_builder - > RegisterService ( this ) ; <nl> + server_builder . RegisterService ( this ) ; <nl> VLOG ( 1 ) < < " Registered data service worker " ; <nl> } <nl> <nl> mmm a / tensorflow / core / data / service / grpc_worker_impl . h <nl> ppp b / tensorflow / core / data / service / grpc_worker_impl . h <nl> namespace tensorflow { <nl> namespace data { <nl> <nl> / / This class is a wrapper that handles communication for gRPC . <nl> - / / <nl> - / / Example usage : <nl> - / / <nl> - / / : : grpc : : ServerBuilder builder ; <nl> - / / / / configure builder <nl> - / / GrpcWorkerImpl data_service ( & builder ) ; <nl> - / / builder . BuildAndStart ( ) <nl> - / / <nl> class GrpcWorkerImpl : public WorkerService : : Service { <nl> public : <nl> - explicit GrpcWorkerImpl ( : : grpc : : ServerBuilder * server_builder , <nl> - const experimental : : WorkerConfig & config ) ; <nl> + / / Constructs a GrpcWorkerImpl with the given config , and registers it with <nl> + / / ` server_builder ` . <nl> + explicit GrpcWorkerImpl ( const experimental : : WorkerConfig & config , <nl> + : : grpc : : ServerBuilder & server_builder ) ; <nl> ~ GrpcWorkerImpl ( ) override { } <nl> <nl> Status Start ( const std : : string & worker_address ) ; <nl> mmm a / tensorflow / core / data / service / journal . cc <nl> ppp b / tensorflow / core / data / service / journal . cc <nl> Status FileJournalReader : : EnsureInitialized ( ) { <nl> return UpdateFile ( DataServiceJournalFile ( journal_dir_ , 0 ) ) ; <nl> } <nl> <nl> - Status FileJournalReader : : Read ( Update * update , bool * end_of_journal ) { <nl> + Status FileJournalReader : : Read ( Update & update , bool & end_of_journal ) { <nl> TF_RETURN_IF_ERROR ( EnsureInitialized ( ) ) ; <nl> while ( true ) { <nl> tstring record ; <nl> Status FileJournalReader : : Read ( Update * update , bool * end_of_journal ) { <nl> if ( errors : : IsNotFound ( env_ - > FileExists ( next_journal_file ) ) ) { <nl> VLOG ( 3 ) < < " Next journal file " < < next_journal_file <nl> < < " does not exist . End of journal reached . " ; <nl> - * end_of_journal = true ; <nl> + end_of_journal = true ; <nl> return Status : : OK ( ) ; <nl> } <nl> TF_RETURN_IF_ERROR ( UpdateFile ( next_journal_file ) ) ; <nl> continue ; <nl> } <nl> TF_RETURN_IF_ERROR ( s ) ; <nl> - if ( ! update - > ParseFromString ( record ) ) { <nl> + if ( ! update . ParseFromString ( record ) ) { <nl> return errors : : DataLoss ( " Failed to parse journal record . " ) ; <nl> } <nl> if ( VLOG_IS_ON ( 4 ) ) { <nl> - VLOG ( 4 ) < < " Read journal entry : " < < update - > DebugString ( ) ; <nl> + VLOG ( 4 ) < < " Read journal entry : " < < update . DebugString ( ) ; <nl> } <nl> - * end_of_journal = false ; <nl> + end_of_journal = false ; <nl> return Status : : OK ( ) ; <nl> } <nl> } <nl> mmm a / tensorflow / core / data / service / journal . h <nl> ppp b / tensorflow / core / data / service / journal . h <nl> class FileJournalWriter : public JournalWriter { <nl> class JournalReader { <nl> public : <nl> virtual ~ JournalReader ( ) = default ; <nl> - / / Reads the next update from the journal . Sets ` * end_of_journal = true ` if <nl> + / / Reads the next update from the journal . Sets ` end_of_journal = true ` if <nl> / / there are no more updates left in the journal . <nl> - virtual Status Read ( Update * update , bool * end_of_journal ) = 0 ; <nl> + virtual Status Read ( Update & update , bool & end_of_journal ) = 0 ; <nl> } ; <nl> <nl> / / JournalReader is not thread - safe , requiring external synchronization when <nl> class FileJournalReader : public JournalReader { <nl> FileJournalReader ( const FileJournalReader & ) = delete ; <nl> FileJournalReader & operator = ( const FileJournalReader & ) = delete ; <nl> <nl> - Status Read ( Update * update , bool * end_of_journal ) override ; <nl> + Status Read ( Update & update , bool & end_of_journal ) override ; <nl> <nl> private : <nl> / / Initializes the reader if it is not yet initialized . <nl> mmm a / tensorflow / core / data / service / journal_test . cc <nl> ppp b / tensorflow / core / data / service / journal_test . cc <nl> namespace data { <nl> namespace { <nl> using : : testing : : HasSubstr ; <nl> <nl> - bool NewJournalDir ( std : : string * journal_dir ) { <nl> + bool NewJournalDir ( std : : string & journal_dir ) { <nl> std : : string filename = testing : : TmpDir ( ) ; <nl> if ( ! Env : : Default ( ) - > CreateUniqueFileName ( & filename , " journal_dir " ) ) { <nl> return false ; <nl> } <nl> - * journal_dir = filename ; <nl> + journal_dir = filename ; <nl> return true ; <nl> } <nl> <nl> Status CheckJournalContent ( StringPiece journal_dir , <nl> for ( const auto & update : expected ) { <nl> Update result ; <nl> bool end_of_journal = true ; <nl> - TF_RETURN_IF_ERROR ( reader . Read ( & result , & end_of_journal ) ) ; <nl> + TF_RETURN_IF_ERROR ( reader . Read ( result , end_of_journal ) ) ; <nl> EXPECT_FALSE ( end_of_journal ) ; <nl> / / We can ' t use the testing : : EqualsProto matcher because it is not available <nl> / / in OSS . <nl> Status CheckJournalContent ( StringPiece journal_dir , <nl> } <nl> Update result ; <nl> bool end_of_journal = false ; <nl> - TF_RETURN_IF_ERROR ( reader . Read ( & result , & end_of_journal ) ) ; <nl> + TF_RETURN_IF_ERROR ( reader . Read ( result , end_of_journal ) ) ; <nl> EXPECT_TRUE ( end_of_journal ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> Status CheckJournalContent ( StringPiece journal_dir , <nl> <nl> TEST ( Journal , RoundTripMultiple ) { <nl> std : : string journal_dir ; <nl> - EXPECT_TRUE ( NewJournalDir ( & journal_dir ) ) ; <nl> + EXPECT_TRUE ( NewJournalDir ( journal_dir ) ) ; <nl> std : : vector < Update > updates = { MakeCreateJobUpdate ( ) , <nl> MakeRegisterDatasetUpdate ( ) , <nl> MakeFinishTaskUpdate ( ) } ; <nl> TEST ( Journal , RoundTripMultiple ) { <nl> <nl> TEST ( Journal , AppendExistingJournal ) { <nl> std : : string journal_dir ; <nl> - EXPECT_TRUE ( NewJournalDir ( & journal_dir ) ) ; <nl> + EXPECT_TRUE ( NewJournalDir ( journal_dir ) ) ; <nl> std : : vector < Update > updates = { MakeCreateJobUpdate ( ) , <nl> MakeRegisterDatasetUpdate ( ) , <nl> MakeFinishTaskUpdate ( ) } ; <nl> TEST ( Journal , AppendExistingJournal ) { <nl> <nl> TEST ( Journal , MissingFile ) { <nl> std : : string journal_dir ; <nl> - EXPECT_TRUE ( NewJournalDir ( & journal_dir ) ) ; <nl> + EXPECT_TRUE ( NewJournalDir ( journal_dir ) ) ; <nl> FileJournalReader reader ( Env : : Default ( ) , journal_dir ) ; <nl> Update result ; <nl> bool end_of_journal = true ; <nl> - Status s = reader . Read ( & result , & end_of_journal ) ; <nl> + Status s = reader . Read ( result , end_of_journal ) ; <nl> EXPECT_TRUE ( errors : : IsNotFound ( s ) ) ; <nl> } <nl> <nl> TEST ( Journal , NonRecordData ) { <nl> std : : string journal_dir ; <nl> - EXPECT_TRUE ( NewJournalDir ( & journal_dir ) ) ; <nl> + EXPECT_TRUE ( NewJournalDir ( journal_dir ) ) ; <nl> <nl> TF_ASSERT_OK ( Env : : Default ( ) - > RecursivelyCreateDir ( journal_dir ) ) ; <nl> { <nl> TEST ( Journal , NonRecordData ) { <nl> FileJournalReader reader ( Env : : Default ( ) , journal_dir ) ; <nl> Update result ; <nl> bool end_of_journal = true ; <nl> - Status s = reader . Read ( & result , & end_of_journal ) ; <nl> + Status s = reader . Read ( result , end_of_journal ) ; <nl> EXPECT_THAT ( s . error_message ( ) , HasSubstr ( " corrupted record " ) ) ; <nl> EXPECT_EQ ( s . code ( ) , error : : DATA_LOSS ) ; <nl> } <nl> <nl> TEST ( Journal , InvalidRecordData ) { <nl> std : : string journal_dir ; <nl> - EXPECT_TRUE ( NewJournalDir ( & journal_dir ) ) ; <nl> + EXPECT_TRUE ( NewJournalDir ( journal_dir ) ) ; <nl> <nl> TF_ASSERT_OK ( Env : : Default ( ) - > RecursivelyCreateDir ( journal_dir ) ) ; <nl> { <nl> TEST ( Journal , InvalidRecordData ) { <nl> FileJournalReader reader ( Env : : Default ( ) , journal_dir ) ; <nl> Update result ; <nl> bool end_of_journal = true ; <nl> - Status s = reader . Read ( & result , & end_of_journal ) ; <nl> + Status s = reader . Read ( result , end_of_journal ) ; <nl> EXPECT_THAT ( s . error_message ( ) , HasSubstr ( " Failed to parse journal record " ) ) ; <nl> EXPECT_EQ ( s . code ( ) , error : : DATA_LOSS ) ; <nl> } <nl> mmm a / tensorflow / core / data / service / local_credentials_factory . cc <nl> ppp b / tensorflow / core / data / service / local_credentials_factory . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> + # include " absl / memory / memory . h " <nl> # include " tensorflow / core / data / service / credentials_factory . h " <nl> <nl> namespace tensorflow { <nl> class LocalCredentialsFactory : public CredentialsFactory { <nl> std : : string Protocol ( ) override { return " grpc + local " ; } <nl> <nl> Status CreateServerCredentials ( <nl> - std : : shared_ptr < : : grpc : : ServerCredentials > * out ) override { <nl> - * out = grpc : : experimental : : LocalServerCredentials ( LOCAL_TCP ) ; <nl> + std : : shared_ptr < : : grpc : : ServerCredentials > & out ) override { <nl> + out = grpc : : experimental : : LocalServerCredentials ( LOCAL_TCP ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status CreateClientCredentials ( <nl> - std : : shared_ptr < : : grpc : : ChannelCredentials > * out ) override { <nl> - * out = grpc : : experimental : : LocalCredentials ( LOCAL_TCP ) ; <nl> + std : : shared_ptr < : : grpc : : ChannelCredentials > & out ) override { <nl> + out = grpc : : experimental : : LocalCredentials ( LOCAL_TCP ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> } ; <nl> class LocalCredentialsFactory : public CredentialsFactory { <nl> class LocalCredentialsRegistrar { <nl> public : <nl> LocalCredentialsRegistrar ( ) { <nl> - auto factory = new LocalCredentialsFactory ( ) ; <nl> - CredentialsFactory : : Register ( factory ) ; <nl> + CredentialsFactory : : Register ( absl : : make_unique < LocalCredentialsFactory > ( ) ) ; <nl> } <nl> } ; <nl> static LocalCredentialsRegistrar registrar ; <nl> mmm a / tensorflow / core / data / service / server_lib . cc <nl> ppp b / tensorflow / core / data / service / server_lib . cc <nl> Status GrpcDataServerBase : : Start ( ) { <nl> : : grpc : : ServerBuilder builder ; <nl> std : : shared_ptr < : : grpc : : ServerCredentials > credentials ; <nl> TF_RETURN_IF_ERROR ( <nl> - CredentialsFactory : : CreateServerCredentials ( protocol_ , & credentials ) ) ; <nl> + CredentialsFactory : : CreateServerCredentials ( protocol_ , credentials ) ) ; <nl> builder . AddListeningPort ( strings : : StrCat ( " 0 . 0 . 0 . 0 : " , requested_port_ ) , <nl> credentials , & bound_port_ ) ; <nl> builder . SetMaxReceiveMessageSize ( - 1 ) ; <nl> <nl> - AddDataServiceToBuilder ( & builder ) ; <nl> - AddProfilerServiceToBuilder ( & builder ) ; <nl> + AddDataServiceToBuilder ( builder ) ; <nl> + AddProfilerServiceToBuilder ( builder ) ; <nl> server_ = builder . BuildAndStart ( ) ; <nl> if ( ! server_ ) { <nl> return errors : : Internal ( " Could not start gRPC server " ) ; <nl> void GrpcDataServerBase : : Join ( ) { server_ - > Wait ( ) ; } <nl> int GrpcDataServerBase : : BoundPort ( ) { return bound_port ( ) ; } <nl> <nl> void GrpcDataServerBase : : AddProfilerServiceToBuilder ( <nl> - : : grpc : : ServerBuilder * builder ) { <nl> - profiler_service_ = CreateProfilerService ( ) ; <nl> - builder - > RegisterService ( profiler_service_ . get ( ) ) ; <nl> + : : grpc : : ServerBuilder & builder ) { <nl> + profiler_service_ = profiler : : CreateProfilerService ( ) ; <nl> + builder . RegisterService ( profiler_service_ . get ( ) ) ; <nl> } <nl> <nl> DispatchGrpcDataServer : : DispatchGrpcDataServer ( <nl> DispatchGrpcDataServer : : DispatchGrpcDataServer ( <nl> DispatchGrpcDataServer : : ~ DispatchGrpcDataServer ( ) { delete service_ ; } <nl> <nl> void DispatchGrpcDataServer : : AddDataServiceToBuilder ( <nl> - : : grpc : : ServerBuilder * builder ) { <nl> - service_ = absl : : make_unique < GrpcDispatcherImpl > ( builder , config_ ) . release ( ) ; <nl> + : : grpc : : ServerBuilder & builder ) { <nl> + service_ = absl : : make_unique < GrpcDispatcherImpl > ( config_ , builder ) . release ( ) ; <nl> } <nl> <nl> Status DispatchGrpcDataServer : : StartServiceInternal ( ) { <nl> WorkerGrpcDataServer : : WorkerGrpcDataServer ( <nl> WorkerGrpcDataServer : : ~ WorkerGrpcDataServer ( ) { delete service_ ; } <nl> <nl> void WorkerGrpcDataServer : : AddDataServiceToBuilder ( <nl> - : : grpc : : ServerBuilder * builder ) { <nl> - service_ = absl : : make_unique < GrpcWorkerImpl > ( builder , config_ ) . release ( ) ; <nl> + : : grpc : : ServerBuilder & builder ) { <nl> + service_ = absl : : make_unique < GrpcWorkerImpl > ( config_ , builder ) . release ( ) ; <nl> } <nl> <nl> Status WorkerGrpcDataServer : : StartServiceInternal ( ) { <nl> Status WorkerGrpcDataServer : : StartServiceInternal ( ) { <nl> } <nl> <nl> Status NewDispatchServer ( const experimental : : DispatcherConfig & config , <nl> - std : : unique_ptr < DispatchGrpcDataServer > * out_server ) { <nl> - * out_server = absl : : make_unique < DispatchGrpcDataServer > ( config ) ; <nl> + std : : unique_ptr < DispatchGrpcDataServer > & out_server ) { <nl> + out_server = absl : : make_unique < DispatchGrpcDataServer > ( config ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status NewWorkerServer ( const experimental : : WorkerConfig & config , <nl> - std : : unique_ptr < WorkerGrpcDataServer > * out_server ) { <nl> - * out_server = absl : : make_unique < WorkerGrpcDataServer > ( config ) ; <nl> + std : : unique_ptr < WorkerGrpcDataServer > & out_server ) { <nl> + out_server = absl : : make_unique < WorkerGrpcDataServer > ( config ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / data / service / server_lib . h <nl> ppp b / tensorflow / core / data / service / server_lib . h <nl> class GrpcDataServerBase { <nl> int BoundPort ( ) ; <nl> <nl> protected : <nl> - virtual void AddDataServiceToBuilder ( : : grpc : : ServerBuilder * builder ) = 0 ; <nl> - void AddProfilerServiceToBuilder ( : : grpc : : ServerBuilder * builder ) ; <nl> + virtual void AddDataServiceToBuilder ( : : grpc : : ServerBuilder & builder ) = 0 ; <nl> + void AddProfilerServiceToBuilder ( : : grpc : : ServerBuilder & builder ) ; <nl> / / Starts the service . This will be called after building the service , so <nl> / / bound_port ( ) will return the actual bound port . <nl> virtual Status StartServiceInternal ( ) = 0 ; <nl> class DispatchGrpcDataServer : public GrpcDataServerBase { <nl> Status NumWorkers ( int * num_workers ) ; <nl> <nl> protected : <nl> - void AddDataServiceToBuilder ( : : grpc : : ServerBuilder * builder ) override ; <nl> + void AddDataServiceToBuilder ( : : grpc : : ServerBuilder & builder ) override ; <nl> Status StartServiceInternal ( ) override ; <nl> <nl> private : <nl> class WorkerGrpcDataServer : public GrpcDataServerBase { <nl> ~ WorkerGrpcDataServer ( ) override ; <nl> <nl> protected : <nl> - void AddDataServiceToBuilder ( : : grpc : : ServerBuilder * builder ) override ; <nl> + void AddDataServiceToBuilder ( : : grpc : : ServerBuilder & builder ) override ; <nl> Status StartServiceInternal ( ) override ; <nl> <nl> private : <nl> class WorkerGrpcDataServer : public GrpcDataServerBase { <nl> GrpcWorkerImpl * service_ ; <nl> } ; <nl> <nl> - / / Creates a dispatch tf . data server and stores it in ` * out_server ` . <nl> + / / Creates a dispatch tf . data server and stores it in ` out_server ` . <nl> Status NewDispatchServer ( const experimental : : DispatcherConfig & config , <nl> - std : : unique_ptr < DispatchGrpcDataServer > * out_server ) ; <nl> + std : : unique_ptr < DispatchGrpcDataServer > & out_server ) ; <nl> <nl> - / / Creates a worker tf . data server and stores it in ` * out_server ` . <nl> + / / Creates a worker tf . data server and stores it in ` out_server ` . <nl> Status NewWorkerServer ( const experimental : : WorkerConfig & config , <nl> - std : : unique_ptr < WorkerGrpcDataServer > * out_server ) ; <nl> + std : : unique_ptr < WorkerGrpcDataServer > & out_server ) ; <nl> <nl> } / / namespace data <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / data / service / test_cluster . cc <nl> ppp b / tensorflow / core / data / service / test_cluster . cc <nl> Status TestCluster : : Initialize ( ) { <nl> experimental : : DispatcherConfig config ; <nl> config . set_port ( 0 ) ; <nl> config . set_protocol ( kProtocol ) ; <nl> - TF_RETURN_IF_ERROR ( NewDispatchServer ( config , & dispatcher_ ) ) ; <nl> + TF_RETURN_IF_ERROR ( NewDispatchServer ( config , dispatcher_ ) ) ; <nl> TF_RETURN_IF_ERROR ( dispatcher_ - > Start ( ) ) ; <nl> dispatcher_address_ = absl : : StrCat ( " localhost : " , dispatcher_ - > BoundPort ( ) ) ; <nl> workers_ . reserve ( num_workers_ ) ; <nl> Status TestCluster : : AddWorker ( ) { <nl> config . set_protocol ( kProtocol ) ; <nl> config . set_dispatcher_address ( dispatcher_address_ ) ; <nl> config . set_worker_address ( " localhost : % port % " ) ; <nl> - TF_RETURN_IF_ERROR ( NewWorkerServer ( config , & worker ) ) ; <nl> + TF_RETURN_IF_ERROR ( NewWorkerServer ( config , worker ) ) ; <nl> TF_RETURN_IF_ERROR ( worker - > Start ( ) ) ; <nl> worker_addresses_ . push_back ( absl : : StrCat ( " localhost : " , worker - > BoundPort ( ) ) ) ; <nl> workers_ . push_back ( std : : move ( worker ) ) ; <nl> mmm a / tensorflow / core / distributed_runtime / rpc / grpc_server_lib . cc <nl> ppp b / tensorflow / core / distributed_runtime / rpc / grpc_server_lib . cc <nl> Status GrpcServer : : Init ( const GrpcServerOptions & opts ) { <nl> . release ( ) ; <nl> eager_service_ = new eager : : GrpcEagerServiceImpl ( & worker_env_ , & builder ) ; <nl> <nl> - profiler_service_ = CreateProfilerService ( ) ; <nl> + profiler_service_ = profiler : : CreateProfilerService ( ) ; <nl> builder . RegisterService ( profiler_service_ . get ( ) ) ; <nl> <nl> / / extra service : <nl> mmm a / tensorflow / core / grappler / optimizers / data / disable_intra_op_parallelism . cc <nl> ppp b / tensorflow / core / grappler / optimizers / data / disable_intra_op_parallelism . cc <nl> Status DisableIntraOpParallelism : : OptimizeAndCollectStats ( <nl> if ( last_node - > attr ( ) . find ( attr ) ! = last_node - > attr ( ) . end ( ) ) { <nl> graph_utils : : CopyAttribute ( attr , * last_node , & insert_node ) ; <nl> } else { <nl> - auto inferred_attr = strings : : StrCat ( " T " , attr ) ; <nl> - if ( last_node - > attr ( ) . find ( inferred_attr ) ! = last_node - > attr ( ) . end ( ) ) { <nl> - graph_utils : : CopyAttribute ( inferred_attr , * last_node , & insert_node ) ; <nl> - } else { <nl> - return Status : : OK ( ) ; <nl> - } <nl> + return Status : : OK ( ) ; <nl> } <nl> } <nl> <nl> mmm a / tensorflow / core / kernels / conv_ops_fused_impl . h <nl> ppp b / tensorflow / core / kernels / conv_ops_fused_impl . h <nl> class LaunchFusedConv2DWithOutputKernel { <nl> template < typename OutputKernel > <nl> void operator ( ) ( const OutputKernel & output_kernel , OpKernelContext * ctx , <nl> const Tensor & input , const Tensor & filter , Tensor * output ) { <nl> + / / Wrap output_kernel into type erased function to reduce the number of <nl> + / / unique template instantiations for Eigen Tensor contraction expressions . <nl> + using OutputKernelFn = <nl> + std : : function < void ( const ContractionOutputMapper < T , Eigen : : Index > & , <nl> + const Eigen : : TensorContractionParams & , Eigen : : Index , <nl> + Eigen : : Index , Eigen : : Index , Eigen : : Index ) > ; <nl> + <nl> + OutputKernelFn output_kernel_fn = <nl> + [ & output_kernel ] ( <nl> + const ContractionOutputMapper < T , Eigen : : Index > & output_mapper , <nl> + const Eigen : : TensorContractionParams & params , Eigen : : Index i , <nl> + Eigen : : Index j , Eigen : : Index num_rows , Eigen : : Index num_cols ) { <nl> + output_kernel ( output_mapper , params , i , j , num_rows , num_cols ) ; <nl> + } ; <nl> + <nl> if ( filter . dim_size ( 0 ) = = 1 & & filter . dim_size ( 1 ) = = 1 & & <nl> row_stride_ = = 1 & & col_stride_ = = 1 & & padding_ ! = EXPLICIT ) { <nl> int conv_width = 1 ; / / Width for the convolution step . <nl> class LaunchFusedConv2DWithOutputKernel { <nl> <nl> Eigen : : array < Eigen : : IndexPair < Eigen : : DenseIndex > , 1 > dim_pair ; <nl> dim_pair [ 0 ] = Eigen : : IndexPair < Eigen : : DenseIndex > ( 1 , 0 ) ; <nl> - functor : : MatMulConvFunctor < CPUDevice , T , OutputKernel > ( ) ( <nl> + functor : : MatMulConvFunctor < CPUDevice , T , OutputKernelFn > ( ) ( <nl> ctx - > eigen_device < CPUDevice > ( ) , <nl> output - > shaped < T , 2 > ( { conv_width , filter . dim_size ( 3 ) } ) , <nl> input . shaped < T , 2 > ( { conv_width , filter . dim_size ( 2 ) } ) , <nl> filter . shaped < T , 2 > ( { filter . dim_size ( 2 ) , filter . dim_size ( 3 ) } ) , <nl> - dim_pair , output_kernel ) ; <nl> + dim_pair , std : : move ( output_kernel_fn ) ) ; <nl> <nl> } else if ( filter . dim_size ( 0 ) = = input . dim_size ( 1 ) & & <nl> filter . dim_size ( 1 ) = = input . dim_size ( 2 ) & & row_dilation_ = = 1 & & <nl> class LaunchFusedConv2DWithOutputKernel { <nl> <nl> Eigen : : array < Eigen : : IndexPair < Eigen : : DenseIndex > , 1 > dim_pair ; <nl> dim_pair [ 0 ] = Eigen : : IndexPair < Eigen : : DenseIndex > ( 1 , 0 ) ; <nl> - functor : : MatMulConvFunctor < CPUDevice , T , OutputKernel > ( ) ( <nl> + functor : : MatMulConvFunctor < CPUDevice , T , OutputKernelFn > ( ) ( <nl> ctx - > eigen_device < CPUDevice > ( ) , <nl> output - > shaped < T , 2 > ( { input . dim_size ( 0 ) , filter . dim_size ( 3 ) } ) , <nl> input . shaped < T , 2 > ( { input . dim_size ( 0 ) , k } ) , <nl> filter . shaped < T , 2 > ( { k , filter . dim_size ( 3 ) } ) , dim_pair , <nl> - output_kernel ) ; <nl> + std : : move ( output_kernel_fn ) ) ; <nl> <nl> } else { <nl> if ( padding_ = = EXPLICIT ) { <nl> - functor : : SpatialConvolution < CPUDevice , T , OutputKernel > ( ) ( <nl> + functor : : SpatialConvolution < CPUDevice , T , OutputKernelFn > ( ) ( <nl> ctx - > eigen_device < CPUDevice > ( ) , output - > tensor < T , 4 > ( ) , <nl> input . tensor < T , 4 > ( ) , filter . tensor < T , 4 > ( ) , row_stride_ , <nl> col_stride_ , row_dilation_ , col_dilation_ , <nl> static_cast < int > ( explicit_paddings_ [ 2 ] ) , <nl> static_cast < int > ( explicit_paddings_ [ 3 ] ) , <nl> static_cast < int > ( explicit_paddings_ [ 4 ] ) , <nl> - static_cast < int > ( explicit_paddings_ [ 5 ] ) , output_kernel ) ; <nl> + static_cast < int > ( explicit_paddings_ [ 5 ] ) , <nl> + std : : move ( output_kernel_fn ) ) ; <nl> } else { <nl> - functor : : SpatialConvolution < CPUDevice , T , OutputKernel > ( ) ( <nl> + functor : : SpatialConvolution < CPUDevice , T , OutputKernelFn > ( ) ( <nl> ctx - > eigen_device < CPUDevice > ( ) , output - > tensor < T , 4 > ( ) , <nl> input . tensor < T , 4 > ( ) , filter . tensor < T , 4 > ( ) , row_stride_ , <nl> col_stride_ , row_dilation_ , col_dilation_ , <nl> - BrainPadding2EigenPadding ( padding_ ) , output_kernel ) ; <nl> + BrainPadding2EigenPadding ( padding_ ) , std : : move ( output_kernel_fn ) ) ; <nl> } <nl> } <nl> } <nl> mmm a / tensorflow / core / kernels / cwise_op_sigmoid . cc <nl> ppp b / tensorflow / core / kernels / cwise_op_sigmoid . cc <nl> limitations under the License . <nl> # include " tensorflow / core / kernels / cwise_ops_gradients . h " <nl> <nl> namespace tensorflow { <nl> - REGISTER5 ( UnaryOp , CPU , " Sigmoid " , functor : : sigmoid , float , Eigen : : half , double , <nl> - complex64 , complex128 ) ; <nl> + REGISTER6 ( UnaryOp , CPU , " Sigmoid " , functor : : sigmoid , bfloat16 , float , <nl> + Eigen : : half , double , complex64 , complex128 ) ; <nl> # if GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> REGISTER3 ( UnaryOp , GPU , " Sigmoid " , functor : : sigmoid , float , Eigen : : half , <nl> double ) ; <nl> REGISTER3 ( UnaryOp , GPU , " Sigmoid " , functor : : sigmoid , float , Eigen : : half , <nl> REGISTER ( UnaryOp , SYCL , " Sigmoid " , functor : : sigmoid , float ) ; <nl> # endif / / TENSORFLOW_USE_SYCL <nl> <nl> - REGISTER5 ( SimpleBinaryOp , CPU , " SigmoidGrad " , functor : : sigmoid_grad , float , <nl> - Eigen : : half , double , complex64 , complex128 ) ; <nl> + REGISTER6 ( SimpleBinaryOp , CPU , " SigmoidGrad " , functor : : sigmoid_grad , bfloat16 , <nl> + float , Eigen : : half , double , complex64 , complex128 ) ; <nl> # if GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> REGISTER3 ( SimpleBinaryOp , GPU , " SigmoidGrad " , functor : : sigmoid_grad , float , <nl> Eigen : : half , double ) ; <nl> mmm a / tensorflow / core / kernels / data / experimental / data_service_dataset_op . cc <nl> ppp b / tensorflow / core / kernels / data / experimental / data_service_dataset_op . cc <nl> class DataServiceDatasetOp : : Dataset : public DatasetBase { <nl> [ & ] ( ) { <nl> return dispatcher_ - > CreateJob ( dataset ( ) - > dataset_id_ , <nl> dataset ( ) - > processing_mode_ , <nl> - & job_client_id_ ) ; <nl> + job_client_id_ ) ; <nl> } , <nl> " create job " , deadline_micros ) ) ; <nl> } else { <nl> class DataServiceDatasetOp : : Dataset : public DatasetBase { <nl> [ & ] ( ) { <nl> return dispatcher_ - > GetOrCreateJob ( <nl> dataset ( ) - > dataset_id_ , dataset ( ) - > processing_mode_ , <nl> - dataset ( ) - > job_name_ , iterator_index_ , & job_client_id_ ) ; <nl> + dataset ( ) - > job_name_ , iterator_index_ , job_client_id_ ) ; <nl> } , <nl> " get or create job " , deadline_micros ) ) ; <nl> } <nl> class DataServiceDatasetOp : : Dataset : public DatasetBase { <nl> VLOG ( 3 ) < < " Updating tasks " ; <nl> std : : vector < TaskInfo > tasks ; <nl> bool job_finished ; <nl> - Status s = dispatcher_ - > GetTasks ( job_client_id_ , & tasks , & job_finished ) ; <nl> + Status s = dispatcher_ - > GetTasks ( job_client_id_ , tasks , job_finished ) ; <nl> if ( ! s . ok ( ) ) { <nl> LOG ( WARNING ) < < " Failed to get task info for job client id " <nl> < < job_client_id_ < < " : " < < s ; <nl> class DataServiceDatasetOp : : Dataset : public DatasetBase { <nl> TaskInfo & task_info = new_task_entry . second ; <nl> std : : unique_ptr < DataServiceWorkerClient > worker ; <nl> Status s = CreateDataServiceWorkerClient ( task_info . worker_address ( ) , <nl> - dataset ( ) - > protocol_ , & worker ) ; <nl> + dataset ( ) - > protocol_ , worker ) ; <nl> if ( ! s . ok ( ) ) { <nl> status_ = s ; <nl> get_next_cv_ . notify_all ( ) ; <nl> class DataServiceDatasetOp : : Dataset : public DatasetBase { <nl> CompressedElement compressed ; <nl> bool end_of_sequence ; <nl> for ( int num_retries = 0 ; ; + + num_retries ) { <nl> - Status s = task - > worker - > GetElement ( task - > task_id , & compressed , <nl> - & end_of_sequence ) ; <nl> + Status s = task - > worker - > GetElement ( task - > task_id , compressed , <nl> + end_of_sequence ) ; <nl> if ( s . ok ( ) ) { <nl> break ; <nl> } <nl> void DataServiceDatasetOp : : MakeDataset ( OpKernelContext * ctx , <nl> ctx , ParseScalarArgument ( ctx , kProcessingMode , & processing_mode_str ) ) ; <nl> ProcessingMode processing_mode ; <nl> OP_REQUIRES_OK ( ctx , <nl> - ParseProcessingMode ( processing_mode_str , & processing_mode ) ) ; <nl> + ParseProcessingMode ( processing_mode_str , processing_mode ) ) ; <nl> <nl> tstring address ; <nl> OP_REQUIRES_OK ( ctx , ParseScalarArgument ( ctx , kAddress , & address ) ) ; <nl> mmm a / tensorflow / core / kernels / data / experimental / data_service_ops . cc <nl> ppp b / tensorflow / core / kernels / data / experimental / data_service_ops . cc <nl> void RegisterDatasetOp : : Compute ( OpKernelContext * ctx ) { <nl> int64 deadline_micros = EnvTime : : NowMicros ( ) + kRetryTimeoutMicros ; <nl> OP_REQUIRES_OK ( <nl> ctx , grpc_util : : Retry ( <nl> - [ & ] ( ) { return client . RegisterDataset ( graph_def , & dataset_id ) ; } , <nl> + [ & ] ( ) { return client . RegisterDataset ( graph_def , dataset_id ) ; } , <nl> / * description = * / " register dataset " , deadline_micros ) ) ; <nl> <nl> Tensor * output ; <nl> mmm a / tensorflow / core / kernels / dynamic_partition_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / dynamic_partition_op_gpu . cu . cc <nl> limitations under the License . <nl> # include " tensorflow / core / util / gpu_kernel_helper . h " <nl> # include " tensorflow / core / util / transform_output_iterator . h " <nl> <nl> + # if GOOGLE_CUDA <nl> + # include " tensorflow / stream_executor / cuda / cuda_activation . h " <nl> + using stream_executor : : cuda : : ScopedActivateExecutorContext ; <nl> + # elif TENSORFLOW_USE_ROCM <nl> + # include " tensorflow / core / platform / rocm . h " <nl> + using stream_executor : : rocm : : ScopedActivateExecutorContext ; <nl> + # endif / / GOOGLE_CUDA <nl> + <nl> namespace tensorflow { <nl> <nl> typedef Eigen : : GpuDevice GPUDevice ; <nl> class DynamicPartitionOpGPU : public AsyncOpKernel { <nl> TensorReference partition_ref ( partition_count ) ; <nl> auto wrapped_callback = [ this , c , & data , & partitions , indices_out , <nl> partition_ref , cpu_tensor , done ] ( ) { <nl> + auto stream = c - > op_device_context ( ) - > stream ( ) ; <nl> + ScopedActivateExecutorContext scoped_activation { stream - > parent ( ) } ; <nl> + <nl> OpOutputList outputs ; <nl> this - > AllocateOutputs ( c , & data , & partitions , & cpu_tensor , & outputs , done ) ; <nl> if ( ! c - > status ( ) . ok ( ) ) { <nl> new file mode 100644 <nl> index 0000000000000 . . c863d4c4ab523 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / lib / lmdb / BUILD <nl> <nl> + # Description : <nl> + # lmdb test data packages . <nl> + <nl> + package ( <nl> + licenses = [ " notice " ] , # Apache 2 . 0 <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " lmdb_testdata " , <nl> + testonly = 1 , <nl> + srcs = [ <nl> + # A simple key - value store : <nl> + # 0 : ' b ' <nl> + # 1 : ' b ' <nl> + # . . . <nl> + # 9 : ' b ' <nl> + # Which is then overwritten with : <nl> + # 0 : ' a ' <nl> + # 1 : ' b ' <nl> + # . . . <nl> + # 9 : ' j ' <nl> + " testdata / data . mdb " , <nl> + # LMDB , being a memory - mapped database , uses a different file format on <nl> + # big - endian systems . <nl> + " testdata / data_bigendian . mdb " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + ) <nl> mmm a / tensorflow / core / platform / ram_file_system . h <nl> ppp b / tensorflow / core / platform / ram_file_system . h <nl> class RamFileSystem : public FileSystem { <nl> FileStatistics * stat ) override { <nl> mutex_lock m ( mu_ ) ; <nl> auto it = fs_ . lower_bound ( fname ) ; <nl> - if ( it = = fs_ . end ( ) ) { <nl> + if ( it = = fs_ . end ( ) | | ! absl : : StartsWith ( it - > first , fname ) ) { <nl> return errors : : NotFound ( " " ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / platform / ram_file_system_test . py <nl> ppp b / tensorflow / core / platform / ram_file_system_test . py <nl> <nl> <nl> import numpy as np <nl> <nl> + from tensorflow . python . eager import def_function <nl> from tensorflow . python . estimator . estimator import Estimator <nl> from tensorflow . python . estimator . model_fn import EstimatorSpec <nl> from tensorflow . python . estimator . run_config import RunConfig <nl> <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import test_util <nl> from tensorflow . python . layers import core as core_layers <nl> + from tensorflow . python . module import module <nl> from tensorflow . python . ops . losses import losses <nl> from tensorflow . python . platform import gfile <nl> from tensorflow . python . platform import test <nl> + from tensorflow . python . saved_model import saved_model <nl> from tensorflow . python . training import adam <nl> from tensorflow . python . training import training_util <nl> <nl> def test_glob ( self ) : <nl> matches = [ ' ram : / / c / b / % d . txt ' % i for i in range ( 10 ) ] <nl> self . assertEqual ( gfile . Glob ( ' ram : / / c / b / * ' ) , matches ) <nl> <nl> + def test_file_exists ( self ) : <nl> + with gfile . GFile ( ' ram : / / exists / a / b / c . txt ' , ' w ' ) as f : <nl> + f . write ( ' ' ) <nl> + self . assertTrue ( gfile . Exists ( ' ram : / / exists / a ' ) ) <nl> + self . assertTrue ( gfile . Exists ( ' ram : / / exists / a / b ' ) ) <nl> + self . assertTrue ( gfile . Exists ( ' ram : / / exists / a / b / c . txt ' ) ) <nl> + <nl> + self . assertFalse ( gfile . Exists ( ' ram : / / exists / b ' ) ) <nl> + self . assertFalse ( gfile . Exists ( ' ram : / / exists / a / c ' ) ) <nl> + self . assertFalse ( gfile . Exists ( ' ram : / / exists / a / b / k ' ) ) <nl> + <nl> def test_estimator ( self ) : <nl> <nl> def model_fn ( features , labels , mode , params ) : <nl> def input_fn ( ) : <nl> estimator . train ( input_fn = input_fn , steps = 10 ) <nl> estimator . train ( input_fn = input_fn , steps = 10 ) <nl> <nl> + def test_savedmodel ( self ) : <nl> + class MyModule ( module . Module ) : <nl> + <nl> + @ def_function . function ( input_signature = [ ] ) <nl> + def foo ( self ) : <nl> + return constant_op . constant ( [ 1 ] ) <nl> + <nl> + saved_model . save ( MyModule ( ) , ' ram : / / my_module ' ) <nl> + <nl> + loaded = saved_model . load ( ' ram : / / my_module ' ) <nl> + self . assertAllEqual ( loaded . foo ( ) , [ 1 ] ) <nl> + <nl> <nl> if __name__ = = ' __main__ ' : <nl> test . main ( ) <nl> mmm a / tensorflow / core / profiler / convert / op_metrics_db_combiner . cc <nl> ppp b / tensorflow / core / profiler / convert / op_metrics_db_combiner . cc <nl> void CopyOpMetricsMetadata ( const OpMetrics & src , OpMetrics * dst ) { <nl> DCHECK ( dst ! = nullptr ) ; <nl> DCHECK_EQ ( src . hlo_module_id ( ) , dst - > hlo_module_id ( ) ) ; <nl> DCHECK_EQ ( src . name ( ) , dst - > name ( ) ) ; <nl> + if ( dst - > long_name ( ) . empty ( ) ) { <nl> + dst - > set_long_name ( src . long_name ( ) ) ; <nl> + } <nl> if ( dst - > category ( ) . empty ( ) ) { <nl> dst - > set_category ( src . category ( ) ) ; <nl> } <nl> mmm a / tensorflow / core / profiler / convert / op_stats_to_overview_page . cc <nl> ppp b / tensorflow / core / profiler / convert / op_stats_to_overview_page . cc <nl> OverviewPageAnalysis ComputeAnalysisResult ( const OpStats & op_stats ) { <nl> uint64 outside_compilation_device_op_time_ps = 0 ; <nl> for ( const OpMetrics & metrics : <nl> op_stats . device_op_metrics_db ( ) . metrics_db ( ) ) { <nl> - if ( ! IsOutsideCompilationOp ( metrics . provenance ( ) , metrics . name ( ) ) ) continue ; <nl> + if ( ! IsOutsideCompilationOp ( metrics . provenance ( ) , metrics . long_name ( ) ) ) <nl> + continue ; <nl> outside_compilation_device_op_time_ps + = metrics . self_time_ps ( ) ; <nl> } <nl> uint64 num_total_tf_ops = num_host_tf_ops + num_device_tf_ops ; <nl> mmm a / tensorflow / core / profiler / convert / op_stats_to_tf_stats . cc <nl> ppp b / tensorflow / core / profiler / convert / op_stats_to_tf_stats . cc <nl> namespace tensorflow { <nl> namespace profiler { <nl> namespace { <nl> <nl> + / / The maximum number of Tensorflow Ops displayed on Tensorflow Stats page . <nl> + / / 500 device side ops and 500 host side ops . <nl> + const int kMaxNumOfOps = 500 ; <nl> + <nl> TfStatsRecord ConvertOpMetricsToTfStatsRecord ( <nl> bool on_device , const OpMetrics & metrics , <nl> double ridge_point_operational_intensity ) { <nl> TfStatsTable GenerateTfStatsTable ( <nl> total_device_time_ps - = IdleTimePs ( device_tf_metrics_db ) ; <nl> } <nl> double total_device_time_us = PicosToMicros ( total_device_time_ps ) ; <nl> - for ( const OpMetrics * metrics : SortedOpMetricsDb ( device_tf_metrics_db ) ) { <nl> + for ( const OpMetrics * metrics : <nl> + SortedOpMetricsDb ( device_tf_metrics_db , kMaxNumOfOps ) ) { <nl> if ( exclude_idle & & IsIdleOp ( * metrics ) ) continue ; <nl> TfStatsRecord * record = tf_stats_table . add_tf_stats_record ( ) ; <nl> * record = ConvertOpMetricsToTfStatsRecord ( <nl> TfStatsTable GenerateTfStatsTable ( <nl> total_host_time_ps - = IdleTimePs ( host_tf_metrics_db ) ; <nl> } <nl> double total_host_time_us = PicosToMicros ( total_host_time_ps ) ; <nl> - for ( const OpMetrics * metrics : <nl> - tensorflow : : profiler : : SortedOpMetricsDb ( host_tf_metrics_db ) ) { <nl> + for ( const OpMetrics * metrics : tensorflow : : profiler : : SortedOpMetricsDb ( <nl> + host_tf_metrics_db , kMaxNumOfOps ) ) { <nl> if ( exclude_idle & & IsIdleOp ( * metrics ) ) continue ; <nl> TfStatsRecord * record = tf_stats_table . add_tf_stats_record ( ) ; <nl> * record = ConvertOpMetricsToTfStatsRecord ( <nl> mmm a / tensorflow / core / profiler / protobuf / op_metrics . proto <nl> ppp b / tensorflow / core / profiler / protobuf / op_metrics . proto <nl> message LayoutAnalysis { <nl> } <nl> <nl> / / Metrics for an operation ( accumulated over all occurrences ) . <nl> - / / Next ID : 20 <nl> + / / Next ID : 21 <nl> message OpMetrics { <nl> / / HLO module id . 0 for TF ops . <nl> uint64 hlo_module_id = 13 ; <nl> / / Name of this op . <nl> string name = 6 ; <nl> + / / Long name of this op ( e . g . , HLO expression ) . <nl> + string long_name = 20 ; <nl> / / Category of this op . <nl> string category = 11 ; <nl> / / Provenance of this op ( e . g . , if HLO op , original TF op ) . <nl> mmm a / tensorflow / core / profiler / rpc / profiler_server . cc <nl> ppp b / tensorflow / core / profiler / rpc / profiler_server . cc <nl> limitations under the License . <nl> # include " tensorflow / core / profiler / rpc / profiler_service_impl . h " <nl> <nl> namespace tensorflow { <nl> + namespace profiler { <nl> <nl> void ProfilerServer : : StartProfilerServer ( int32 port ) { <nl> std : : string server_address = absl : : StrCat ( " [ : : ] : " , port ) ; <nl> ProfilerServer : : ~ ProfilerServer ( ) { <nl> } <nl> } <nl> <nl> + } / / namespace profiler <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / profiler / rpc / profiler_server . h <nl> ppp b / tensorflow / core / profiler / rpc / profiler_server . h <nl> limitations under the License . <nl> # include " tensorflow / core / profiler / profiler_service . grpc . pb . h " <nl> <nl> namespace tensorflow { <nl> + namespace profiler { <nl> <nl> class ProfilerServer { <nl> public : <nl> class ProfilerServer { <nl> std : : unique_ptr < : : grpc : : Server > server_ ; <nl> } ; <nl> <nl> + } / / namespace profiler <nl> } / / namespace tensorflow <nl> <nl> # endif / / TENSORFLOW_CORE_PROFILER_RPC_PROFILER_SERVER_H_ <nl> mmm a / tensorflow / core / profiler / rpc / profiler_service_impl . cc <nl> ppp b / tensorflow / core / profiler / rpc / profiler_service_impl . cc <nl> limitations under the License . <nl> # include " tensorflow / core / profiler / protobuf / xplane . pb . h " <nl> <nl> namespace tensorflow { <nl> + namespace profiler { <nl> namespace { <nl> <nl> const absl : : string_view kXPlanePb = " xplane . pb " ; <nl> std : : unique_ptr < grpc : : ProfilerService : : Service > CreateProfilerService ( ) { <nl> return absl : : make_unique < ProfilerServiceImpl > ( ) ; <nl> } <nl> <nl> + } / / namespace profiler <nl> + <nl> + std : : unique_ptr < grpc : : ProfilerService : : Service > CreateProfilerService ( ) { <nl> + return absl : : make_unique < profiler : : ProfilerServiceImpl > ( ) ; <nl> + } <nl> + <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / profiler / rpc / profiler_service_impl . h <nl> ppp b / tensorflow / core / profiler / rpc / profiler_service_impl . h <nl> namespace tensorflow { <nl> <nl> std : : unique_ptr < grpc : : ProfilerService : : Service > CreateProfilerService ( ) ; <nl> <nl> + namespace profiler { <nl> + <nl> + std : : unique_ptr < grpc : : ProfilerService : : Service > CreateProfilerService ( ) ; <nl> + <nl> + } / / namespace profiler <nl> } / / namespace tensorflow <nl> <nl> # endif / / TENSORFLOW_CORE_PROFILER_RPC_PROFILER_SERVICE_IMPL_H_ <nl> mmm a / tensorflow / core / public / version . h <nl> ppp b / tensorflow / core / public / version . h <nl> limitations under the License . <nl> <nl> # define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0 <nl> # define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0 <nl> - # define TF_GRAPH_DEF_VERSION 507 / / Updated : 2020 / 8 / 28 <nl> + # define TF_GRAPH_DEF_VERSION 510 / / Updated : 2020 / 8 / 31 <nl> <nl> / / Checkpoint compatibility versions ( the versions field in SavedSliceMeta ) . <nl> / / <nl> mmm a / tensorflow / lite / c / c_api . h <nl> ppp b / tensorflow / lite / c / c_api . h <nl> TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetOutputTensorCount ( <nl> const TfLiteInterpreter * interpreter ) ; <nl> <nl> / / Returns the tensor associated with the output index . <nl> - / / REQUIRES : 0 < = input_index < TfLiteInterpreterGetOutputTensorCount ( tensor ) <nl> + / / REQUIRES : 0 < = output_index < TfLiteInterpreterGetOutputTensorCount ( tensor ) <nl> / / <nl> / / NOTE : The shape and underlying data buffer for output tensors may be not <nl> / / be available until after the output tensor has been both sized and allocated . <nl> mmm a / tensorflow / lite / c / common . h <nl> ppp b / tensorflow / lite / c / common . h <nl> void TfLiteFloatArrayFree ( TfLiteFloatArray * a ) ; <nl> } \ <nl> } while ( 0 ) <nl> <nl> + # define TF_LITE_ENSURE_NEAR ( context , a , b , epsilon ) \ <nl> + do { \ <nl> + auto delta = ( ( a ) > ( b ) ) ? ( ( a ) - ( b ) ) : ( ( b ) - ( a ) ) ; \ <nl> + if ( delta > epsilon ) { \ <nl> + TF_LITE_KERNEL_LOG ( ( context ) , " % s : % d % s not near % s ( % f ! = % f ) " , \ <nl> + __FILE__ , __LINE__ , # a , # b , static_cast < double > ( a ) , \ <nl> + static_cast < double > ( b ) ) ; \ <nl> + return kTfLiteError ; \ <nl> + } \ <nl> + } while ( 0 ) <nl> + <nl> # define TF_LITE_ENSURE_OK ( context , status ) \ <nl> do { \ <nl> const TfLiteStatus s = ( status ) ; \ <nl> mmm a / tensorflow / lite / delegates / gpu / cl / kernels / BUILD <nl> ppp b / tensorflow / lite / delegates / gpu / cl / kernels / BUILD <nl> cc_test ( <nl> ] , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " reduce " , <nl> + srcs = [ " reduce . cc " ] , <nl> + hdrs = [ " reduce . h " ] , <nl> + deps = [ <nl> + " : gpu_operation " , <nl> + " : util " , <nl> + " / / tensorflow / lite / delegates / gpu / cl : precision " , <nl> + " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> + " / / tensorflow / lite / delegates / gpu / common : status " , <nl> + " / / tensorflow / lite / delegates / gpu / common : types " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " reduce_test " , <nl> + srcs = [ " reduce_test . cc " ] , <nl> + linkstatic = True , <nl> + tags = tf_gpu_tests_tags ( ) + [ <nl> + " linux " , <nl> + " local " , <nl> + ] , <nl> + deps = [ <nl> + " : cl_test " , <nl> + " : reduce " , <nl> + " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> + " / / tensorflow / lite / delegates / gpu / common : status " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " relu " , <nl> srcs = [ " relu . cc " ] , <nl> test_suite ( <nl> " padding_test " , <nl> " pooling_test " , <nl> " prelu_test " , <nl> + " reduce_test " , <nl> " relu_test " , <nl> " reshape_test " , <nl> " reshapex4_test " , <nl> new file mode 100644 <nl> index 0000000000000 . . 4f889d4ff0ee4 <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / delegates / gpu / cl / kernels / reduce . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / lite / delegates / gpu / cl / kernels / reduce . h " <nl> + <nl> + # include < string > <nl> + <nl> + # include " tensorflow / lite / delegates / gpu / cl / kernels / util . h " <nl> + # include " tensorflow / lite / delegates / gpu / cl / precision . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / operations . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> + <nl> + namespace tflite { <nl> + namespace gpu { <nl> + namespace cl { <nl> + namespace { <nl> + std : : string GetReduceChannelsKernelCode ( const OperationDef & op_def , <nl> + const OperationType & op_type ) { <nl> + std : : string c = GetCommonDefines ( op_def . precision ) ; <nl> + if ( op_type = = OperationType : : ADD ) { <nl> + c + = " # define OP ( a , b ) ( ( a ) + ( b ) ) \ n " ; <nl> + } else if ( op_type = = OperationType : : MUL ) { <nl> + c + = " # define OP ( a , b ) ( ( a ) * ( b ) ) \ n " ; <nl> + } else if ( op_type = = OperationType : : MAXIMUM ) { <nl> + c + = " # define OP ( a , b ) max ( a , b ) \ n " ; <nl> + } else if ( op_type = = OperationType : : MINIMUM ) { <nl> + c + = " # define OP ( a , b ) min ( a , b ) \ n " ; <nl> + } <nl> + c + = " __kernel void main_function ( $ 0 ) { \ n " ; <nl> + c + = " int X = get_global_id ( 0 ) ; \ n " ; <nl> + c + = " int Y = get_global_id ( 1 ) ; \ n " ; <nl> + c + = " if ( X > = args . dst_tensor . Width ( ) | | Y > = args . dst_tensor . Height ( ) ) " <nl> + " return ; \ n " ; <nl> + if ( op_type = = OperationType : : ADD ) { <nl> + c + = " FLT4 reduced = ( FLT4 ) ( 0 . 0f , 0 . 0f , 0 . 0f , 0 . 0f ) ; \ n " ; <nl> + } else if ( op_type = = OperationType : : MUL ) { <nl> + c + = " FLT4 reduced = ( FLT4 ) ( 1 . 0f , 1 . 0f , 1 . 0f , 1 . 0f ) ; \ n " ; <nl> + } else { <nl> + c + = " FLT4 V0 = args . src_tensor . Read ( X , Y , 0 ) ; \ n " ; <nl> + c + = " FLT4 reduced = ( FLT4 ) ( V0 . x , V0 . x , V0 . x , V0 . x ) ; \ n " ; <nl> + } <nl> + c + = " int s = 0 ; \ n " ; <nl> + c + = " for ( ; s < args . src_tensor . Slices ( ) - 1 ; + + s ) { \ n " ; <nl> + c + = " FLT4 V = args . src_tensor . Read ( X , Y , s ) ; \ n " ; <nl> + c + = " reduced = OP ( reduced , V ) ; \ n " ; <nl> + c + = " } \ n " ; <nl> + c + = " FLT reduced_final = OP ( OP ( reduced . x , reduced . y ) , OP ( reduced . z , " <nl> + " reduced . w ) ) ; \ n " ; <nl> + c + = " FLT last_reduce ; \ n " ; <nl> + c + = " FLT4 last_val = args . src_tensor . Read ( X , Y , s ) ; \ n " ; <nl> + c + = " int ch_rem = args . src_tensor . Channels ( ) % 4 ; \ n " ; <nl> + c + = " if ( ch_rem = = 0 ) { \ n " ; <nl> + c + = " last_reduce = OP ( OP ( last_val . x , last_val . y ) , OP ( last_val . z , " <nl> + " last_val . w ) ) ; \ n " ; <nl> + c + = " } else if ( ch_rem = = 1 ) { \ n " ; <nl> + c + = " last_reduce = OP ( OP ( last_val . x , last_val . y ) , last_val . z ) ; \ n " ; <nl> + c + = " } else if ( ch_rem = = 2 ) { \ n " ; <nl> + c + = " last_reduce = OP ( last_val . x , last_val . y ) ; \ n " ; <nl> + c + = " } else { \ n " ; <nl> + c + = " last_reduce = last_val . x ; \ n " ; <nl> + c + = " } \ n " ; <nl> + c + = " reduced_final = OP ( reduced_final , last_reduce ) ; \ n " ; <nl> + c + = " FLT4 result = ( FLT4 ) ( reduced_final , 0 . 0f , 0 . 0f , 0 . 0f ) ; \ n " ; <nl> + c + = " args . dst_tensor . Write ( result , X , Y , 0 ) ; \ n " ; <nl> + c + = " } \ n " ; <nl> + return c ; <nl> + } <nl> + } / / namespace <nl> + <nl> + GPUOperation CreateReduce ( const OperationDef & definition , <nl> + const OperationType & op_type ) { <nl> + GPUOperation op ( definition ) ; <nl> + auto src_desc = definition . src_tensors [ 0 ] ; <nl> + if ( definition . IsBatchSupported ( ) ) { <nl> + src_desc . SetStateVar ( " BatchedWidth " , " true " ) ; <nl> + } <nl> + op . AddSrcTensor ( " src_tensor " , src_desc ) ; <nl> + auto dst_desc = definition . dst_tensors [ 0 ] ; <nl> + if ( definition . IsBatchSupported ( ) ) { <nl> + dst_desc . SetStateVar ( " BatchedWidth " , " true " ) ; <nl> + } <nl> + op . AddDstTensor ( " dst_tensor " , dst_desc ) ; <nl> + op . code_ = GetReduceChannelsKernelCode ( definition , op_type ) ; <nl> + op . tensor_to_grid_ = TensorToGrid : : kWBToX_HDToY_ZIs1 ; <nl> + return op ; <nl> + } <nl> + <nl> + } / / namespace cl <nl> + } / / namespace gpu <nl> + } / / namespace tflite <nl> similarity index 55 % <nl> rename from tensorflow / c / eager / parallel_device / parallel_device_ops . cc <nl> rename to tensorflow / lite / delegates / gpu / cl / kernels / reduce . h <nl> mmm a / tensorflow / c / eager / parallel_device / parallel_device_ops . cc <nl> ppp b / tensorflow / lite / delegates / gpu / cl / kernels / reduce . h <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / common_shape_fns . h " <nl> - # include " tensorflow / core / framework / op . h " <nl> - <nl> - / / TODO ( allenl ) : Figure out if we need this op , and if so whether we should move <nl> - / / it to core TF . Right now the eager C API does some checking of op <nl> - / / registrations before calling into custom devices , but we may be able to avoid <nl> - / / that . <nl> - REGISTER_OP ( " DeviceID " ) <nl> - . Output ( " device_id : int64 " ) <nl> - . SetIsStateful ( ) <nl> - . SetShapeFn ( tensorflow : : shape_inference : : ScalarShape ) ; <nl> + # ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_KERNELS_REDUCE_H_ <nl> + # define TENSORFLOW_LITE_DELEGATES_GPU_CL_KERNELS_REDUCE_H_ <nl> + <nl> + # include " tensorflow / lite / delegates / gpu / cl / kernels / gpu_operation . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / operations . h " <nl> + <nl> + namespace tflite { <nl> + namespace gpu { <nl> + namespace cl { <nl> + <nl> + GPUOperation CreateReduce ( const OperationDef & definition , <nl> + const OperationType & op_type ) ; <nl> + <nl> + } / / namespace cl <nl> + } / / namespace gpu <nl> + } / / namespace tflite <nl> + <nl> + # endif / / TENSORFLOW_LITE_DELEGATES_GPU_CL_KERNELS_REDUCE_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . 9275c451d3421 <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / delegates / gpu / cl / kernels / reduce_test . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / lite / delegates / gpu / cl / kernels / reduce . h " <nl> + <nl> + # include < cmath > <nl> + # include < cstdlib > <nl> + # include < vector > <nl> + <nl> + # include < gmock / gmock . h > <nl> + # include < gtest / gtest . h > <nl> + # include " tensorflow / lite / delegates / gpu / cl / kernels / cl_test . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / operations . h " <nl> + # include " tensorflow / lite / delegates / gpu / common / status . h " <nl> + <nl> + using : : testing : : FloatNear ; <nl> + using : : testing : : Pointwise ; <nl> + <nl> + namespace tflite { <nl> + namespace gpu { <nl> + namespace cl { <nl> + namespace { <nl> + <nl> + TEST_F ( OpenCLOperationTest , ReduceSumChannels ) { <nl> + TensorFloat32 src_tensor ; <nl> + src_tensor . shape = BHWC ( 1 , 2 , 1 , 5 ) ; <nl> + src_tensor . data = { 1 . 1 , 2 . 1 , 0 . 7 , 0 . 3 , 1 . 2 , 3 . 1 , 4 . 1 , 0 . 0 , 1 . 0 , 4 . 4 } ; <nl> + <nl> + for ( auto storage : env_ . GetSupportedStorages ( ) ) { <nl> + for ( auto precision : env_ . GetSupportedPrecisions ( ) ) { <nl> + const float eps = precision = = CalculationsPrecision : : F32 ? 1e - 6f : 1e - 3f ; <nl> + OperationDef op_def ; <nl> + op_def . precision = precision ; <nl> + auto data_type = DeduceDataTypeFromPrecision ( precision ) ; <nl> + op_def . src_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + op_def . dst_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + TensorFloat32 dst_tensor ; <nl> + GPUOperation operation = CreateReduce ( op_def , OperationType : : ADD ) ; <nl> + ASSERT_OK ( ExecuteGPUOperation ( src_tensor , creation_context_ , & operation , <nl> + BHWC ( 1 , 2 , 1 , 1 ) , & dst_tensor ) ) ; <nl> + EXPECT_THAT ( dst_tensor . data , Pointwise ( FloatNear ( eps ) , { 5 . 4f , 12 . 6f } ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + TEST_F ( OpenCLOperationTest , ReduceProductChannels ) { <nl> + TensorFloat32 src_tensor ; <nl> + src_tensor . shape = BHWC ( 1 , 2 , 1 , 2 ) ; <nl> + src_tensor . data = { 1 . 1 , 2 . 0 , 3 . 1 , 4 . 0 } ; <nl> + <nl> + for ( auto storage : env_ . GetSupportedStorages ( ) ) { <nl> + for ( auto precision : env_ . GetSupportedPrecisions ( ) ) { <nl> + const float eps = precision = = CalculationsPrecision : : F32 ? 1e - 6f : 1e - 3f ; <nl> + OperationDef op_def ; <nl> + op_def . precision = precision ; <nl> + auto data_type = DeduceDataTypeFromPrecision ( precision ) ; <nl> + op_def . src_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + op_def . dst_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + TensorFloat32 dst_tensor ; <nl> + GPUOperation operation = CreateReduce ( op_def , OperationType : : MUL ) ; <nl> + ASSERT_OK ( ExecuteGPUOperation ( src_tensor , creation_context_ , & operation , <nl> + BHWC ( 1 , 2 , 1 , 1 ) , & dst_tensor ) ) ; <nl> + EXPECT_THAT ( dst_tensor . data , Pointwise ( FloatNear ( eps ) , { 2 . 2f , 12 . 4f } ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + TEST_F ( OpenCLOperationTest , ReduceMaxChannels ) { <nl> + TensorFloat32 src_tensor ; <nl> + src_tensor . shape = BHWC ( 1 , 2 , 1 , 6 ) ; <nl> + src_tensor . data = { 1 . 1 , 2 . 0 , - 0 . 3 , - 100 . 0 , 32 . 6 , 1 . 1 , <nl> + - 3 . 1 , - 4 . 0 , - 5 . 0 , - 7 . 0 , - 2 . 0 , - 100 . 0 } ; <nl> + <nl> + for ( auto storage : env_ . GetSupportedStorages ( ) ) { <nl> + for ( auto precision : env_ . GetSupportedPrecisions ( ) ) { <nl> + const float eps = precision = = CalculationsPrecision : : F32 ? 1e - 6f : 1e - 3f ; <nl> + OperationDef op_def ; <nl> + op_def . precision = precision ; <nl> + auto data_type = DeduceDataTypeFromPrecision ( precision ) ; <nl> + op_def . src_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + op_def . dst_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + TensorFloat32 dst_tensor ; <nl> + GPUOperation operation = CreateReduce ( op_def , OperationType : : MAXIMUM ) ; <nl> + ASSERT_OK ( ExecuteGPUOperation ( src_tensor , creation_context_ , & operation , <nl> + BHWC ( 1 , 2 , 1 , 1 ) , & dst_tensor ) ) ; <nl> + EXPECT_THAT ( dst_tensor . data , Pointwise ( FloatNear ( eps ) , { 32 . 6f , - 2 . 0f } ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + TEST_F ( OpenCLOperationTest , ReduceMinChannels ) { <nl> + TensorFloat32 src_tensor ; <nl> + src_tensor . shape = BHWC ( 1 , 2 , 1 , 6 ) ; <nl> + src_tensor . data = { 1 . 1 , 2 . 0 , - 0 . 3 , - 100 . 0 , 32 . 6 , 1 . 1 , <nl> + - 3 . 1 , - 4 . 0 , - 5 . 0 , - 7 . 0 , - 2 . 0 , 100 . 0 } ; <nl> + <nl> + for ( auto storage : env_ . GetSupportedStorages ( ) ) { <nl> + for ( auto precision : env_ . GetSupportedPrecisions ( ) ) { <nl> + const float eps = precision = = CalculationsPrecision : : F32 ? 1e - 6f : 1e - 3f ; <nl> + OperationDef op_def ; <nl> + op_def . precision = precision ; <nl> + auto data_type = DeduceDataTypeFromPrecision ( precision ) ; <nl> + op_def . src_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + op_def . dst_tensors . push_back ( { data_type , storage , Layout : : HWC } ) ; <nl> + TensorFloat32 dst_tensor ; <nl> + GPUOperation operation = CreateReduce ( op_def , OperationType : : MINIMUM ) ; <nl> + ASSERT_OK ( ExecuteGPUOperation ( src_tensor , creation_context_ , & operation , <nl> + BHWC ( 1 , 2 , 1 , 1 ) , & dst_tensor ) ) ; <nl> + EXPECT_THAT ( dst_tensor . data , Pointwise ( FloatNear ( eps ) , { - 100 . 0f , - 7 . 0f } ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + } / / namespace <nl> + } / / namespace cl <nl> + } / / namespace gpu <nl> + } / / namespace tflite <nl> mmm a / tensorflow / lite / delegates / gpu / common / model_builder . cc <nl> ppp b / tensorflow / lite / delegates / gpu / common / model_builder . cc <nl> class ElementwiseOperationParser : public TFLiteOperationParser { <nl> case OperationType : : ABS : <nl> case OperationType : : COPY : <nl> case OperationType : : COS : <nl> + case OperationType : : ELU : <nl> case OperationType : : EXP : <nl> case OperationType : : LOG : <nl> case OperationType : : RSQRT : <nl> class ElementwiseOperationParser : public TFLiteOperationParser { <nl> bool IsTwoArgumentOperation ( ) const { <nl> switch ( operation_type_ ) { <nl> case OperationType : : DIV : <nl> + case OperationType : : MAXIMUM : <nl> + case OperationType : : MINIMUM : <nl> case OperationType : : POW : <nl> case OperationType : : SQUARED_DIFF : <nl> case OperationType : : SUB : <nl> class ElementwiseOperationParser : public TFLiteOperationParser { <nl> <nl> bool IsTwoArgumentOperationWithConst ( ) const { <nl> switch ( operation_type_ ) { <nl> - case OperationType : : MINIMUM : <nl> + case OperationType : : DIV : <nl> case OperationType : : MAXIMUM : <nl> + case OperationType : : MINIMUM : <nl> + case OperationType : : POW : <nl> + case OperationType : : SQUARED_DIFF : <nl> case OperationType : : SUB : <nl> return true ; <nl> default : <nl> class MulOperationParser : public TFLiteOperationParser { <nl> / / The " larger " input tensor must be bound to 1st input and the " smaller " <nl> / / input tensor ( " mask " ) must be bound to 2nd input . <nl> if ( runtime_tensor0 & & runtime_tensor1 ) { <nl> + if ( input0 = = input1 ) { <nl> + / / replace MUL ( A , A ) with POW ( A , 2 . 0 ) <nl> + / / TODO ( b / 166831113 ) : Support the same inputs for operations . <nl> + node - > operation . type = ToString ( OperationType : : POW ) ; <nl> + ElementwiseAttributes attr ; <nl> + attr . param = 2 . 0f ; <nl> + node - > operation . attributes = std : : move ( attr ) ; <nl> + RETURN_IF_ERROR ( reader - > AddInput ( node , 0 ) ) ; <nl> + return reader - > AddOutputs ( node ) ; <nl> + } <nl> + <nl> BHWC shape0 ; <nl> RETURN_IF_ERROR ( ExtractTensorShape ( * input0 , & shape0 ) ) ; <nl> BHWC shape1 ; <nl> mmm a / tensorflow / lite / delegates / gpu / metal / kernels / BUILD <nl> ppp b / tensorflow / lite / delegates / gpu / metal / kernels / BUILD <nl> cc_library ( <nl> srcs = [ " conv . cc " ] , <nl> hdrs = [ " conv . h " ] , <nl> deps = [ <nl> + " / / tensorflow / lite / delegates / gpu / common : data_type " , <nl> " / / tensorflow / lite / delegates / gpu / common : model " , <nl> " / / tensorflow / lite / delegates / gpu / common : operations " , <nl> " / / tensorflow / lite / delegates / gpu / common : shape " , <nl> mmm a / tensorflow / lite / g3doc / performance / post_training_integer_quant . ipynb <nl> ppp b / tensorflow / lite / g3doc / performance / post_training_integer_quant . ipynb <nl> <nl> " interpreter . invoke ( ) \ n " , <nl> " output = interpreter . get_tensor ( output_details [ \ " index \ " ] ) [ 0 ] \ n " , <nl> " \ n " , <nl> - " # Check if the output type is quantized , then rescale output data to float \ n " , <nl> - " if output_details [ ' dtype ' ] = = np . uint8 : \ n " , <nl> - " output_scale , output_zero_point = output_details [ \ " quantization \ " ] \ n " , <nl> - " test_image = test_image . astype ( np . float32 ) \ n " , <nl> - " test_image = test_image / input_scale + input_zero_point \ n " , <nl> - " \ n " , <nl> " predictions [ i ] = output . argmax ( ) \ n " , <nl> " \ n " , <nl> " return predictions \ n " <nl> mmm a / tensorflow / lite / kernels / BUILD <nl> ppp b / tensorflow / lite / kernels / BUILD <nl> cc_library ( <nl> name = " custom_ops " , <nl> srcs = [ <nl> " complex_support . cc " , <nl> + " cumsum . cc " , <nl> " rfft2d . cc " , <nl> ] , <nl> hdrs = [ " custom_ops_register . h " ] , <nl> cc_library ( <nl> " / / tensorflow / lite / kernels / internal : types " , <nl> " / / third_party / fft2d : fft2d_headers " , <nl> " @ fft2d " , <nl> + " @ flatbuffers " , <nl> " @ ruy / / ruy / profiler : instrumentation " , <nl> ] , <nl> ) <nl> cc_test ( <nl> ] , <nl> ) <nl> <nl> + cc_test ( <nl> + name = " cumsum_test " , <nl> + srcs = [ " cumsum_test . cc " ] , <nl> + deps = [ <nl> + " : custom_ops " , <nl> + " : test_main " , <nl> + " : test_util " , <nl> + " / / tensorflow / lite : framework " , <nl> + " / / tensorflow / lite / schema : schema_fbs " , <nl> + " / / tensorflow / lite / testing : util " , <nl> + " @ com_google_googletest / / : gtest " , <nl> + " @ flatbuffers " , <nl> + ] , <nl> + ) <nl> + <nl> tflite_portable_test_suite_combined ( combine_conditions = { " deps " : [ " : test_main " ] } ) <nl> new file mode 100644 <nl> index 0000000000000 . . 173de0959fa19 <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / kernels / cumsum . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " flatbuffers / flexbuffers . h " / / from @ flatbuffers <nl> + # include " tensorflow / lite / c / common . h " <nl> + # include " tensorflow / lite / kernels / internal / optimized / optimized_ops . h " <nl> + # include " tensorflow / lite / kernels / internal / tensor . h " <nl> + # include " tensorflow / lite / kernels / internal / tensor_ctypes . h " <nl> + # include " tensorflow / lite / kernels / kernel_util . h " <nl> + <nl> + / / TODO ( b / 161933288 ) : Promote this op to builtin - op when we can add new builtin <nl> + / / ops . <nl> + <nl> + namespace tflite { <nl> + namespace ops { <nl> + namespace custom { <nl> + namespace cumsum { <nl> + <nl> + typedef struct { <nl> + bool exclusive ; <nl> + bool reverse ; <nl> + } TfLiteCumsumParams ; <nl> + <nl> + static const int kInputTensor = 0 ; <nl> + static const int kAxisTensor = 1 ; <nl> + static const int kOutputTensor = 0 ; <nl> + <nl> + void * Init ( TfLiteContext * context , const char * buffer , size_t length ) { <nl> + auto * data = new TfLiteCumsumParams ; <nl> + const uint8_t * buffer_data = reinterpret_cast < const uint8_t * > ( buffer ) ; <nl> + <nl> + const flexbuffers : : Map & m = flexbuffers : : GetRoot ( buffer_data , length ) . AsMap ( ) ; <nl> + data - > exclusive = m [ " exclusive " ] . AsBool ( ) ; <nl> + data - > reverse = m [ " reverse " ] . AsBool ( ) ; <nl> + return data ; <nl> + } <nl> + <nl> + void Free ( TfLiteContext * context , void * buffer ) { <nl> + delete reinterpret_cast < TfLiteCumsumParams * > ( buffer ) ; <nl> + } <nl> + <nl> + TfLiteStatus Prepare ( TfLiteContext * context , TfLiteNode * node ) { <nl> + TF_LITE_ENSURE_EQ ( context , NumInputs ( node ) , 2 ) ; <nl> + TF_LITE_ENSURE_EQ ( context , NumOutputs ( node ) , 1 ) ; <nl> + <nl> + const TfLiteTensor * input = GetInput ( context , node , kInputTensor ) ; <nl> + const TfLiteTensor * axis = GetInput ( context , node , kAxisTensor ) ; <nl> + <nl> + TF_LITE_ENSURE ( context , <nl> + input - > type = = kTfLiteInt32 | | input - > type = = kTfLiteFloat32 ) ; <nl> + TF_LITE_ENSURE_EQ ( context , axis - > type , kTfLiteInt32 ) ; <nl> + <nl> + TF_LITE_ENSURE_EQ ( context , NumElements ( axis ) , 1 ) ; <nl> + <nl> + TF_LITE_ENSURE ( context , NumDimensions ( input ) > = 1 ) ; <nl> + <nl> + TfLiteTensor * output = GetOutput ( context , node , kOutputTensor ) ; <nl> + <nl> + TfLiteIntArray * output_shape = TfLiteIntArrayCopy ( input - > dims ) ; <nl> + return context - > ResizeTensor ( context , output , output_shape ) ; <nl> + } <nl> + <nl> + TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> + const TfLiteTensor * input = GetInput ( context , node , kInputTensor ) ; <nl> + const TfLiteTensor * axis_tensor = GetInput ( context , node , kAxisTensor ) ; <nl> + <nl> + TfLiteTensor * output = GetOutput ( context , node , kOutputTensor ) ; <nl> + <nl> + auto * params = reinterpret_cast < TfLiteCumsumParams * > ( node - > user_data ) ; <nl> + <nl> + int axis = * GetTensorData < int > ( axis_tensor ) ; <nl> + if ( axis < 0 ) axis + = NumDimensions ( input ) ; <nl> + <nl> + if ( axis < 0 | | axis > = NumDimensions ( input ) ) { <nl> + TF_LITE_KERNEL_LOG ( context , " Invalid axis : " , axis ) ; <nl> + return kTfLiteError ; <nl> + } <nl> + <nl> + switch ( input - > type ) { <nl> + case kTfLiteInt32 : { <nl> + optimized_ops : : CumSum ( GetTensorData < int > ( input ) , GetTensorShape ( input ) , <nl> + axis , params - > exclusive , params - > reverse , <nl> + GetTensorData < int > ( output ) ) ; <nl> + break ; <nl> + } <nl> + case kTfLiteFloat32 : { <nl> + optimized_ops : : CumSum ( GetTensorData < float > ( input ) , GetTensorShape ( input ) , <nl> + axis , params - > exclusive , params - > reverse , <nl> + GetTensorData < float > ( output ) ) ; <nl> + break ; <nl> + } <nl> + default : { <nl> + TF_LITE_KERNEL_LOG ( <nl> + context , <nl> + " Unsupported input type , cumsum only supports int32 & float32 . " ) ; <nl> + return kTfLiteError ; <nl> + } <nl> + } <nl> + <nl> + return kTfLiteOk ; <nl> + } <nl> + <nl> + } / / namespace cumsum <nl> + <nl> + TfLiteRegistration * Register_CUMSUM ( ) { <nl> + static TfLiteRegistration r = { cumsum : : Init , cumsum : : Free , cumsum : : Prepare , <nl> + cumsum : : Eval } ; <nl> + return & r ; <nl> + } <nl> + <nl> + } / / namespace custom <nl> + } / / namespace ops <nl> + } / / namespace tflite <nl> new file mode 100644 <nl> index 0000000000000 . . 092defdcba3c0 <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / kernels / cumsum_test . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include < vector > <nl> + <nl> + # include < gmock / gmock . h > <nl> + # include < gtest / gtest . h > <nl> + # include " flatbuffers / flexbuffers . h " / / from @ flatbuffers <nl> + # include " tensorflow / lite / interpreter . h " <nl> + # include " tensorflow / lite / kernels / custom_ops_register . h " <nl> + # include " tensorflow / lite / kernels / test_util . h " <nl> + # include " tensorflow / lite / schema / schema_generated . h " <nl> + # include " tensorflow / lite / testing / util . h " <nl> + <nl> + namespace tflite { <nl> + namespace ops { <nl> + namespace custom { <nl> + <nl> + TfLiteRegistration * Register_CUMSUM ( ) ; <nl> + <nl> + namespace { <nl> + <nl> + template < typename T > <nl> + class CumsumOpModel : public SingleOpModel { <nl> + public : <nl> + CumsumOpModel ( const TensorData & input , const TensorData & output , <nl> + bool exclusive , bool reverse ) { <nl> + input_ = AddInput ( input ) ; <nl> + axis_ = AddInput ( { TensorType_INT32 , { 1 } } ) ; <nl> + <nl> + output_ = AddOutput ( output ) ; <nl> + <nl> + flexbuffers : : Builder fbb ; <nl> + fbb . Map ( [ & ] ( ) { <nl> + fbb . Bool ( " exclusive " , exclusive ) ; <nl> + fbb . Bool ( " reverse " , reverse ) ; <nl> + } ) ; <nl> + fbb . Finish ( ) ; <nl> + SetCustomOp ( " Cumsum " , fbb . GetBuffer ( ) , Register_CUMSUM ) ; <nl> + <nl> + BuildInterpreter ( { GetShape ( input_ ) , GetShape ( axis_ ) } ) ; <nl> + } <nl> + <nl> + int input ( ) { return input_ ; } <nl> + int axis ( ) { return axis_ ; } <nl> + <nl> + std : : vector < T > GetOutput ( ) { return ExtractVector < T > ( output_ ) ; } <nl> + <nl> + private : <nl> + int input_ ; <nl> + int axis_ ; <nl> + int output_ ; <nl> + } ; <nl> + <nl> + TEST ( CumsumOpTest , SimpleIntTest ) { <nl> + CumsumOpModel < int32_t > m ( { TensorType_INT32 , { 2 , 4 } } , { TensorType_INT32 , { } } , <nl> + false , false ) ; <nl> + <nl> + m . PopulateTensor < int > ( m . input ( ) , { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 } ) ; <nl> + m . PopulateTensor < int > ( m . axis ( ) , { 1 } ) ; <nl> + <nl> + m . Invoke ( ) ; <nl> + <nl> + EXPECT_THAT ( m . GetOutput ( ) , <nl> + testing : : ElementsAreArray ( { 1 , 3 , 6 , 10 , 5 , 11 , 18 , 26 } ) ) ; <nl> + } <nl> + <nl> + TEST ( CumsumOpTest , SimpleIntAxis0Test ) { <nl> + CumsumOpModel < int32_t > m ( { TensorType_INT32 , { 2 , 4 } } , { TensorType_INT32 , { } } , <nl> + false , false ) ; <nl> + <nl> + m . PopulateTensor < int > ( m . input ( ) , { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 } ) ; <nl> + m . PopulateTensor < int > ( m . axis ( ) , { 0 } ) ; <nl> + <nl> + m . Invoke ( ) ; <nl> + <nl> + EXPECT_THAT ( m . GetOutput ( ) , <nl> + testing : : ElementsAreArray ( { 1 , 2 , 3 , 4 , 6 , 8 , 10 , 12 } ) ) ; <nl> + } <nl> + <nl> + TEST ( CumsumOpTest , Simple1DIntTest ) { <nl> + CumsumOpModel < int32_t > m ( { TensorType_INT32 , { 8 } } , { TensorType_INT32 , { } } , <nl> + false , false ) ; <nl> + <nl> + m . PopulateTensor < int > ( m . input ( ) , { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 } ) ; <nl> + m . PopulateTensor < int > ( m . axis ( ) , { 0 } ) ; <nl> + <nl> + m . Invoke ( ) ; <nl> + <nl> + EXPECT_THAT ( m . GetOutput ( ) , <nl> + testing : : ElementsAreArray ( { 1 , 3 , 6 , 10 , 15 , 21 , 28 , 36 } ) ) ; <nl> + } <nl> + <nl> + TEST ( CumsumOpTest , SimpleIntReverseTest ) { <nl> + CumsumOpModel < int32_t > m ( { TensorType_INT32 , { 2 , 4 } } , { TensorType_INT32 , { } } , <nl> + false , true ) ; <nl> + <nl> + m . PopulateTensor < int > ( m . input ( ) , { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 } ) ; <nl> + m . PopulateTensor < int > ( m . axis ( ) , { 1 } ) ; <nl> + <nl> + m . Invoke ( ) ; <nl> + <nl> + EXPECT_THAT ( m . GetOutput ( ) , <nl> + testing : : ElementsAreArray ( { 10 , 9 , 7 , 4 , 26 , 21 , 15 , 8 } ) ) ; <nl> + } <nl> + <nl> + TEST ( CumsumOpTest , SimpleIntExclusiveTest ) { <nl> + CumsumOpModel < int32_t > m ( { TensorType_INT32 , { 2 , 4 } } , { TensorType_INT32 , { } } , <nl> + true , false ) ; <nl> + <nl> + m . PopulateTensor < int > ( m . input ( ) , { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 } ) ; <nl> + m . PopulateTensor < int > ( m . axis ( ) , { 1 } ) ; <nl> + <nl> + m . Invoke ( ) ; <nl> + <nl> + EXPECT_THAT ( m . GetOutput ( ) , <nl> + testing : : ElementsAreArray ( { 0 , 1 , 3 , 6 , 0 , 5 , 11 , 18 } ) ) ; <nl> + } <nl> + <nl> + TEST ( CumsumOpTest , SimpleFloatTest ) { <nl> + CumsumOpModel < float > m ( { TensorType_FLOAT32 , { 2 , 4 } } , { TensorType_FLOAT32 , { } } , <nl> + false , false ) ; <nl> + <nl> + m . PopulateTensor < float > ( m . input ( ) , { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 } ) ; <nl> + m . PopulateTensor < int > ( m . axis ( ) , { 1 } ) ; <nl> + <nl> + m . Invoke ( ) ; <nl> + <nl> + EXPECT_THAT ( m . GetOutput ( ) , testing : : ElementsAreArray ( <nl> + ArrayFloatNear ( { 1 , 3 , 6 , 10 , 5 , 11 , 18 , 26 } ) ) ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + } / / namespace custom <nl> + } / / namespace ops <nl> + } / / namespace tflite <nl> mmm a / tensorflow / lite / kernels / custom_ops_register . h <nl> ppp b / tensorflow / lite / kernels / custom_ops_register . h <nl> namespace tflite { <nl> namespace ops { <nl> namespace custom { <nl> <nl> + TfLiteRegistration * Register_CUMSUM ( ) ; <nl> TfLiteRegistration * Register_RFFT2D ( ) ; <nl> TfLiteRegistration * Register_HASHTABLE ( ) ; <nl> TfLiteRegistration * Register_HASHTABLE_FIND ( ) ; <nl> TfLiteRegistration * Register_HASHTABLE_IMPORT ( ) ; <nl> TfLiteRegistration * Register_HASHTABLE_SIZE ( ) ; <nl> TfLiteRegistration * Register_REAL ( ) ; <nl> TfLiteRegistration * Register_IMAG ( ) ; <nl> - } <nl> + <nl> + } / / namespace custom <nl> } / / namespace ops <nl> } / / namespace tflite <nl> <nl> mmm a / tensorflow / lite / kernels / internal / BUILD <nl> ppp b / tensorflow / lite / kernels / internal / BUILD <nl> cc_library ( <nl> " reference / integer_ops / fully_connected . h " , <nl> " reference / integer_ops / l2normalization . h " , <nl> " reference / integer_ops / logistic . h " , <nl> + " reference / integer_ops / mean . h " , <nl> " reference / integer_ops / mul . h " , <nl> " reference / integer_ops / pooling . h " , <nl> " reference / integer_ops / tanh . h " , <nl> cc_library ( <nl> " / / conditions : default " : [ <nl> " reference / integer_ops / dequantize . h " , <nl> " reference / integer_ops / log_softmax . h " , <nl> - " reference / integer_ops / mean . h " , <nl> " reference / integer_ops / transpose_conv . h " , <nl> " reference / reference_ops . h " , <nl> " reference / string_comparisons . h " , <nl> mmm a / tensorflow / lite / kernels / internal / common . h <nl> ppp b / tensorflow / lite / kernels / internal / common . h <nl> inline void gen_lut ( const std : : function < double ( double ) > & func , double min , <nl> std : : min ( std : : max ( TfLiteRound ( func ( max ) * 32768 . 0 ) , - 32768 . 0 ) , 32767 . 0 ) ; <nl> } <nl> <nl> + / / generate INT16 LUT for function ( ) , e . g . , table exp ( x ) and 1 / ( 1 + x ) used in <nl> + / / softmax <nl> + inline void gen_lut ( const std : : function < float ( float ) > & func , float min , <nl> + float max , int16_t * table , const int num ) { <nl> + / / size of table should equal to num + 1 <nl> + / / last element only for slope calculation <nl> + float step = ( max - min ) / ( num - 1 ) ; <nl> + float half_step = step / 2 . 0f ; <nl> + for ( int i = 0 ; i < num - 1 ; i + + ) { <nl> + float sample_val = TfLiteRound ( func ( min + i * step ) * 32768 . 0f ) ; <nl> + float midpoint_interp_val = <nl> + TfLiteRound ( ( func ( min + ( i + 1 ) * step ) * 32768 . 0f + <nl> + TfLiteRound ( func ( min + i * step ) * 32768 . 0f ) ) / <nl> + 2 . 0f ) ; <nl> + float midpoint_val = <nl> + TfLiteRound ( func ( min + i * step + half_step ) * 32768 . 0f ) ; <nl> + float midpoint_err = midpoint_interp_val - midpoint_val ; <nl> + float bias = TfLiteRound ( midpoint_err / 2 . 0f ) ; <nl> + table [ i ] = std : : min ( std : : max ( sample_val - bias , - 32768 . 0f ) , 32767 . 0f ) ; <nl> + } <nl> + table [ num - 1 ] = std : : min ( <nl> + std : : max ( TfLiteRound ( func ( max ) * 32768 . 0f ) , - 32768 . 0f ) , 32767 . 0f ) ; <nl> + } <nl> + <nl> / / int16_t func table lookup , e . g . , lookup exp ( ) and 1 / ( 1 + x ) used in softmax <nl> inline int16_t generic_int16_table_lookup ( int16_t value , const int16_t * lut ) { <nl> / / 512 base value , lut [ 513 ] only for calculate slope <nl> mmm a / tensorflow / lite / kernels / internal / optimized / optimized_ops . h <nl> ppp b / tensorflow / lite / kernels / internal / optimized / optimized_ops . h <nl> inline void BroadcastMinimumDispatch ( const ArithmeticParams & params , <nl> MinimumElementwise , MinimumScalarBroadcast ) ; <nl> } <nl> <nl> + template < typename T > <nl> + void CumsumImpl ( const T * input_data , const RuntimeShape & shape , int axis , <nl> + bool exclusive , bool reverse , T * output_data ) { <nl> + Eigen : : array < Eigen : : DenseIndex , 3 > dims = { 1 , 1 , 1 } ; <nl> + <nl> + for ( int i = 0 ; i < axis ; + + i ) { <nl> + dims [ 0 ] * = shape . Dims ( i ) ; <nl> + } <nl> + dims [ 1 ] = shape . Dims ( axis ) ; <nl> + for ( int i = axis + 1 ; i < shape . DimensionsCount ( ) ; + + i ) { <nl> + dims [ 2 ] * = shape . Dims ( i ) ; <nl> + } <nl> + <nl> + typedef Eigen : : TensorMap < <nl> + Eigen : : Tensor < const T , 3 , Eigen : : RowMajor , Eigen : : DenseIndex > , <nl> + Eigen : : Aligned > <nl> + ConstTensor ; <nl> + typedef Eigen : : TensorMap < <nl> + Eigen : : Tensor < T , 3 , Eigen : : RowMajor , Eigen : : DenseIndex > , Eigen : : Aligned > <nl> + Tensor ; <nl> + ConstTensor input ( input_data , dims ) ; <nl> + Tensor output ( output_data , dims ) ; <nl> + <nl> + if ( reverse ) { <nl> + Eigen : : array < bool , 3 > reverse_idx = { false , true , false } ; <nl> + output = <nl> + input . reverse ( reverse_idx ) . cumsum ( 1 , exclusive ) . reverse ( reverse_idx ) ; <nl> + } else { <nl> + output = input . cumsum ( 1 , exclusive ) ; <nl> + } <nl> + } <nl> + <nl> + template < typename T > <nl> + void CumSum ( const T * input_data , const RuntimeShape & shape , int axis , <nl> + bool exclusive , bool reverse , T * output_data ) { <nl> + const int dim = shape . DimensionsCount ( ) ; <nl> + TFLITE_DCHECK_GE ( dim , 1 ) ; <nl> + CumsumImpl < T > ( input_data , shape , axis , exclusive , reverse , output_data ) ; <nl> + } <nl> + <nl> } / / namespace optimized_ops <nl> } / / namespace tflite <nl> <nl> mmm a / tensorflow / lite / kernels / internal / reference / integer_ops / mean . h <nl> ppp b / tensorflow / lite / kernels / internal / reference / integer_ops / mean . h <nl> namespace reference_integer_ops { <nl> template < typename integer_type > <nl> inline void Mean ( const tflite : : MeanParams & op_params , int32_t multiplier , <nl> int32_t shift , const RuntimeShape & unextended_input_shape , <nl> - const integer_type * input_data , int32 input_zero_point , <nl> + const integer_type * input_data , int32_t input_zero_point , <nl> const RuntimeShape & unextended_output_shape , <nl> - integer_type * output_data , int32 output_zero_point ) { <nl> + integer_type * output_data , int32_t output_zero_point ) { <nl> / / Current implementation only supports dimension equals 4 and simultaneous <nl> / / reduction over width and height . <nl> TFLITE_CHECK_EQ ( unextended_input_shape . DimensionsCount ( ) , 4 ) ; <nl> inline void Mean ( const tflite : : MeanParams & op_params , int32_t multiplier , <nl> <nl> for ( int out_b = 0 ; out_b < output_batch ; + + out_b ) { <nl> for ( int out_d = 0 ; out_d < output_depth ; + + out_d ) { <nl> - int32 acc = 0 ; <nl> + int32_t acc = 0 ; <nl> for ( int in_h = 0 ; in_h < input_height ; + + in_h ) { <nl> for ( int in_w = 0 ; in_w < input_width ; + + in_w ) { <nl> acc + = input_data [ Offset ( input_shape , out_b , in_h , in_w , out_d ) ] - <nl> mmm a / tensorflow / lite / kernels / internal / reference / reduce . h <nl> ppp b / tensorflow / lite / kernels / internal / reference / reduce . h <nl> inline bool Mean ( const T * input_data , const int * input_dims , <nl> } <nl> <nl> / / Calculate mean by dividing output_data by num of aggregated element . <nl> - U num_elements_in_axis = 1 ; <nl> + size_t num_elements_in_axis = 1 ; <nl> for ( int idx = 0 ; idx < num_resolved_axis ; + + idx ) { <nl> size_t current = static_cast < size_t > ( input_dims [ resolved_axis [ idx ] ] ) ; <nl> / / Overflow prevention . <nl> - if ( current > ( std : : numeric_limits < U > : : max ( ) / num_elements_in_axis ) ) { <nl> + if ( current > ( std : : numeric_limits < size_t > : : max ( ) / num_elements_in_axis ) ) { <nl> return false ; <nl> } <nl> num_elements_in_axis * = current ; <nl> inline bool QuantizedMeanOrSum ( const T * input_data , int32_t input_zero_point , <nl> } <nl> <nl> / / Calculate mean by dividing output_data by num of aggregated element . <nl> - U num_elements_in_axis = 1 ; <nl> + size_t num_elements_in_axis = 1 ; <nl> for ( int idx = 0 ; idx < num_resolved_axis ; + + idx ) { <nl> size_t current = static_cast < size_t > ( input_dims [ resolved_axis [ idx ] ] ) ; <nl> / / Overflow prevention . <nl> - if ( current > ( std : : numeric_limits < U > : : max ( ) / num_elements_in_axis ) ) { <nl> + if ( current > ( std : : numeric_limits < size_t > : : max ( ) / num_elements_in_axis ) ) { <nl> return false ; <nl> } <nl> num_elements_in_axis * = current ; <nl> mmm a / tensorflow / lite / kernels / internal / types . h <nl> ppp b / tensorflow / lite / kernels / internal / types . h <nl> struct SoftmaxParams { <nl> int32_t zero_point ; <nl> float scale ; <nl> float * table ; <nl> + / / int16 LUT for exp ( x ) , where x uniform distributed between [ - 10 . 0 , 0 . 0 ] <nl> int16_t * exp_lut ; <nl> + / / int16 LUT for 1 / ( 1 + x ) , where x uniform distributed between [ 0 . 0 , 1 . 0 ] <nl> int16_t * one_over_one_plus_x_lut ; <nl> uint8_t * uint8_table1 ; <nl> uint8_t * uint8_table2 ; <nl> mmm a / tensorflow / lite / micro / kernels / reduce . cc <nl> ppp b / tensorflow / lite / micro / kernels / reduce . cc <nl> limitations under the License . <nl> # include " tensorflow / lite / c / builtin_op_data . h " <nl> # include " tensorflow / lite / c / common . h " <nl> # include " tensorflow / lite / kernels / internal / quantization_util . h " <nl> + # include " tensorflow / lite / kernels / internal / reference / integer_ops / mean . h " <nl> # include " tensorflow / lite / kernels / internal / tensor_ctypes . h " <nl> # include " tensorflow / lite / kernels / internal / types . h " <nl> # include " tensorflow / lite / kernels / kernel_util . h " <nl> namespace reduce { <nl> constexpr int kMaxNumberOfAxis = 4 ; <nl> constexpr int kMaxNumberOfReducedAxis = 2 ; <nl> <nl> + struct OpData { <nl> + int32_t multiplier ; <nl> + int shift ; <nl> + int temp_buffer_idx ; <nl> + int input_zp ; <nl> + float input_scale ; <nl> + int output_zp ; <nl> + float output_scale ; <nl> + } ; <nl> + <nl> + void * InitMean ( TfLiteContext * context , const char * buffer , size_t length ) { <nl> + return context - > AllocatePersistentBuffer ( context , sizeof ( OpData ) ) ; <nl> + } <nl> + <nl> TfLiteStatus PrepareSimple ( TfLiteContext * context , TfLiteNode * node ) { <nl> / / Inputs Tensor ( dtype depends on quantization ) : <nl> / / [ 0 ] = Input <nl> TfLiteStatus PrepareSimple ( TfLiteContext * context , TfLiteNode * node ) { <nl> } <nl> <nl> TfLiteStatus PrepareMeanOrSum ( TfLiteContext * context , TfLiteNode * node ) { <nl> + const TfLiteTensor * input = GetInput ( context , node , 0 ) ; <nl> + OpData * op_data = reinterpret_cast < OpData * > ( node - > user_data ) ; <nl> + const TfLiteTensor * output = GetOutput ( context , node , 0 ) ; <nl> + if ( input - > type = = kTfLiteInt8 ) { <nl> + const double real_multiplier = static_cast < double > ( input - > params . scale ) / <nl> + static_cast < double > ( output - > params . scale ) ; <nl> + QuantizeMultiplier ( real_multiplier , & op_data - > multiplier , & op_data - > shift ) ; <nl> + } <nl> + <nl> + int output_size = NumElements ( output ) ; <nl> + if ( input - > type = = kTfLiteInt8 | | input - > type = = kTfLiteUInt8 ) { <nl> + context - > RequestScratchBufferInArena ( context , output_size * sizeof ( int32_t ) , <nl> + & op_data - > temp_buffer_idx ) ; <nl> + op_data - > input_zp = input - > params . zero_point ; <nl> + op_data - > input_scale = input - > params . scale ; <nl> + op_data - > output_zp = output - > params . zero_point ; <nl> + op_data - > output_scale = output - > params . scale ; <nl> + } <nl> + <nl> TF_LITE_ENSURE_OK ( context , PrepareSimple ( context , node ) ) ; <nl> / / TODO ( b / 144955155 ) : Support uint8_t ( b / 144955155 ) and int8_t ( b / 144955018 ) <nl> return kTfLiteOk ; <nl> TfLiteStatus EvalMean ( TfLiteContext * context , TfLiteNode * node ) { <nl> TfLiteEvalTensor * output = tflite : : micro : : GetEvalOutput ( context , node , 0 ) ; <nl> TfLiteReducerParams * params = <nl> reinterpret_cast < TfLiteReducerParams * > ( node - > builtin_data ) ; <nl> + OpData * op_data = reinterpret_cast < OpData * > ( node - > user_data ) ; <nl> <nl> int num_axis = static_cast < int > ( ElementCount ( * axis - > dims ) ) ; <nl> int temp_index [ kMaxNumberOfAxis ] ; <nl> int resolved_axis [ kMaxNumberOfReducedAxis ] ; <nl> <nl> + tflite : : MeanParams op_params ; <nl> + ResolveAxis ( tflite : : micro : : GetTensorData < int > ( axis ) , num_axis , & op_params ) ; <nl> + / / TODO ( b / 146571391 ) : Support only 4D Input and 2D Axis for Mean until <nl> + / / scratch tensor allocation has been implemented in ( b / 132070898 ) <nl> + bool is_valid_inputs = ( input - > dims - > size = = 4 & & op_params . axis_count = = 2 & & <nl> + ( ( op_params . axis [ 0 ] = = 1 & & op_params . axis [ 1 ] = = 2 ) | | <nl> + ( op_params . axis [ 0 ] = = 2 & & op_params . axis [ 1 ] = = 1 ) ) ) ; <nl> + TF_LITE_ENSURE_MSG ( <nl> + context , is_valid_inputs = = true , <nl> + " Number of Input " <nl> + " dimensions ! = 4 OR the Axis is not either [ 1 , 2 ] or [ 2 , 1 ] " ) ; <nl> switch ( input - > type ) { <nl> case kTfLiteFloat32 : { <nl> - tflite : : MeanParams op_params ; <nl> - ResolveAxis ( tflite : : micro : : GetTensorData < int > ( axis ) , num_axis , <nl> - & op_params ) ; <nl> - / / TODO ( b / 146571391 ) : Support only 4D Input and 2D Axis for Mean until <nl> - / / scratch tensor allocation has been implemented in ( b / 132070898 ) <nl> - bool is_valid_inputs = <nl> - ( input - > dims - > size = = 4 & & op_params . axis_count = = 2 & & <nl> - ( ( op_params . axis [ 0 ] = = 1 & & op_params . axis [ 1 ] = = 2 ) | | <nl> - ( op_params . axis [ 0 ] = = 2 & & op_params . axis [ 1 ] = = 1 ) ) ) ; <nl> - TF_LITE_ENSURE_MSG ( <nl> - context , is_valid_inputs = = true , <nl> - " Number of Input " <nl> - " dimensions ! = 4 OR the Axis is not either [ 1 , 2 ] or [ 2 , 1 ] " ) ; <nl> / / TODO ( b / 139102329 ) : Handle the below special case in the combined <nl> / / reference method . <nl> / / Defer to specialized implementation for 4D Mean across axes 1 & 2 . <nl> TfLiteStatus EvalMean ( TfLiteContext * context , TfLiteNode * node ) { <nl> tflite : : micro : : GetTensorData < float > ( output ) ) ) ; <nl> } <nl> } break ; <nl> + case kTfLiteInt8 : { <nl> + if ( params - > keep_dims ) { <nl> + reference_integer_ops : : Mean ( <nl> + op_params , op_data - > multiplier , op_data - > shift , <nl> + tflite : : micro : : GetTensorShape ( input ) , <nl> + tflite : : micro : : GetTensorData < int8_t > ( input ) , op_data - > input_zp , <nl> + tflite : : micro : : GetTensorShape ( output ) , <nl> + tflite : : micro : : GetTensorData < int8_t > ( output ) , op_data - > output_zp ) ; <nl> + } else if ( op_data - > input_zp = = op_data - > output_zp & & <nl> + op_data - > input_scale = = op_data - > output_scale ) { <nl> + int32_t * temp_buffer = static_cast < int32_t * > ( <nl> + context - > GetScratchBuffer ( context , op_data - > temp_buffer_idx ) ) ; <nl> + TF_LITE_ENSURE ( <nl> + context , <nl> + reference_ops : : Mean ( <nl> + tflite : : micro : : GetTensorData < int8_t > ( input ) , input - > dims - > data , <nl> + input - > dims - > size , tflite : : micro : : GetTensorData < int8_t > ( output ) , <nl> + output - > dims - > data , output - > dims - > size , <nl> + tflite : : micro : : GetTensorData < int > ( axis ) , num_axis , <nl> + params - > keep_dims , temp_index , resolved_axis , temp_buffer ) ) ; <nl> + } else { <nl> + int32_t * temp_buffer = static_cast < int32_t * > ( <nl> + context - > GetScratchBuffer ( context , op_data - > temp_buffer_idx ) ) ; <nl> + TF_LITE_ENSURE ( <nl> + context , <nl> + reference_ops : : QuantizedMeanOrSum ( <nl> + tflite : : micro : : GetTensorData < int8_t > ( input ) , op_data - > input_zp , <nl> + op_data - > input_scale , input - > dims - > data , input - > dims - > size , <nl> + tflite : : micro : : GetTensorData < int8_t > ( output ) , <nl> + op_data - > output_zp , op_data - > output_scale , output - > dims - > data , <nl> + output - > dims - > size , tflite : : micro : : GetTensorData < int > ( axis ) , <nl> + num_axis , params - > keep_dims , temp_index , resolved_axis , <nl> + temp_buffer , false ) ) ; <nl> + } <nl> + } break ; <nl> + case kTfLiteUInt8 : { <nl> + if ( params - > keep_dims ) { <nl> + reference_ops : : Mean ( op_params , tflite : : micro : : GetTensorShape ( input ) , <nl> + tflite : : micro : : GetTensorData < uint8_t > ( input ) , <nl> + op_data - > input_zp , op_data - > input_scale , <nl> + tflite : : micro : : GetTensorShape ( output ) , <nl> + tflite : : micro : : GetTensorData < uint8_t > ( output ) , <nl> + op_data - > output_zp , op_data - > output_scale ) ; <nl> + } else if ( op_data - > input_zp = = op_data - > output_zp & & <nl> + op_data - > input_scale = = op_data - > output_scale ) { <nl> + uint32_t * temp_buffer = static_cast < uint32_t * > ( <nl> + context - > GetScratchBuffer ( context , op_data - > temp_buffer_idx ) ) ; <nl> + TF_LITE_ENSURE ( <nl> + context , <nl> + reference_ops : : Mean ( tflite : : micro : : GetTensorData < uint8_t > ( input ) , <nl> + input - > dims - > data , input - > dims - > size , <nl> + tflite : : micro : : GetTensorData < uint8_t > ( output ) , <nl> + output - > dims - > data , output - > dims - > size , <nl> + tflite : : micro : : GetTensorData < int > ( axis ) , <nl> + num_axis , params - > keep_dims , temp_index , <nl> + resolved_axis , temp_buffer ) ) ; <nl> + } else { <nl> + uint32_t * temp_buffer = static_cast < uint32_t * > ( <nl> + context - > GetScratchBuffer ( context , op_data - > temp_buffer_idx ) ) ; <nl> + TF_LITE_ENSURE ( <nl> + context , <nl> + reference_ops : : QuantizedMeanOrSum ( <nl> + tflite : : micro : : GetTensorData < uint8_t > ( input ) , op_data - > input_zp , <nl> + op_data - > input_scale , input - > dims - > data , input - > dims - > size , <nl> + tflite : : micro : : GetTensorData < uint8_t > ( output ) , <nl> + op_data - > output_zp , op_data - > output_scale , output - > dims - > data , <nl> + output - > dims - > size , tflite : : micro : : GetTensorData < int > ( axis ) , <nl> + num_axis , params - > keep_dims , temp_index , resolved_axis , <nl> + temp_buffer , false ) ) ; <nl> + } <nl> + } break ; <nl> default : <nl> / / TODO ( b / 144955155 ) : Support uint8_t ( b / 144955155 ) and int8_t ( b / 144955018 ) <nl> TF_LITE_ENSURE_MSG ( context , false , <nl> - " Currently , only float32 input type " <nl> + " Currently , only float32 , int8 or uint8 input type " <nl> " is supported . " ) ; <nl> } <nl> return kTfLiteOk ; <nl> TfLiteStatus EvalMean ( TfLiteContext * context , TfLiteNode * node ) { <nl> } / / namespace reduce <nl> <nl> TfLiteRegistration Register_MEAN ( ) { <nl> - return { / * init = * / nullptr , <nl> + return { / * init = * / reduce : : InitMean , <nl> / * free = * / nullptr , <nl> / * prepare = * / reduce : : PrepareMeanOrSum , <nl> / * invoke = * / reduce : : EvalMean , <nl> mmm a / tensorflow / lite / micro / kernels / reduce_test . cc <nl> ppp b / tensorflow / lite / micro / kernels / reduce_test . cc <nl> namespace testing { <nl> namespace { <nl> <nl> / / Common inputs and outputs . <nl> - / / static const int kInputElements4D = 24 ; <nl> + static const int kInputElements4D = 24 ; <nl> static const int kInputShape4D [ ] = { 4 , 2 , 2 , 3 , 2 } ; <nl> static const float kInputData4D [ ] = { <nl> 1 . 0 , 2 . 0 , 3 . 0 , 4 . 0 , 5 . 0 , 6 . 0 , 7 . 0 , 8 . 0 , 9 . 0 , 10 . 0 , 11 . 0 , 12 . 0 , <nl> void TestMeanFloatInput4D ( const int * input_dims_data , const float * input_data , <nl> output_data , output_dims_count , params , tolerance ) ) ; <nl> } <nl> <nl> + template < typename T > <nl> + void TestMeanOpQuantized ( const int * input_dims_data , const float * input_data , <nl> + T * input_data_quant , float input_scale , <nl> + int input_zero_point , const int * axis_dims_data , <nl> + const int32_t * axis_data , const int * output_dims_data , <nl> + const float * expected_output_data , <nl> + T * output_data_quant , T * expected_output_data_quant , <nl> + float output_scale , int output_zero_point , <nl> + TfLiteReducerParams * params ) { <nl> + / / Convert dimesion arguments to TfLiteArrays <nl> + TfLiteIntArray * input_dims = IntArrayFromInts ( input_dims_data ) ; <nl> + TfLiteIntArray * axis_dims = IntArrayFromInts ( axis_dims_data ) ; <nl> + TfLiteIntArray * output_dims = IntArrayFromInts ( output_dims_data ) ; <nl> + <nl> + / / Get number of elements in input and output tensors <nl> + const int output_dims_count = ElementCount ( * output_dims ) ; <nl> + <nl> + / / Initialize tensors <nl> + constexpr int tensors_size = 3 ; <nl> + TfLiteTensor tensors [ ] = { <nl> + CreateQuantizedTensor ( input_data , input_data_quant , input_dims , <nl> + input_scale , input_zero_point ) , <nl> + CreateInt32Tensor ( axis_data , axis_dims ) , <nl> + CreateQuantizedTensor ( output_data_quant , output_dims , output_scale , <nl> + output_zero_point ) , <nl> + } ; <nl> + <nl> + / / Quantize expected output <nl> + tflite : : AsymmetricQuantize ( expected_output_data , expected_output_data_quant , <nl> + output_dims_count , output_scale , <nl> + output_zero_point ) ; <nl> + <nl> + TF_LITE_MICRO_EXPECT_EQ ( <nl> + kTfLiteOk , <nl> + ValidateReduceGoldens ( tensors , tensors_size , expected_output_data_quant , <nl> + output_data_quant , output_dims_count , params , 1 . 0 ) ) ; <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace testing <nl> } / / namespace tflite <nl> TF_LITE_MICRO_TEST ( MeanFloat4DKeepDims ) { <nl> & params ) ; <nl> } <nl> <nl> + TF_LITE_MICRO_TEST ( MeanInt84DKeepDims ) { <nl> + int8_t expected_output_data_quant [ tflite : : testing : : kOutputElements ] ; <nl> + int8_t output_data_quant [ tflite : : testing : : kOutputElements ] ; <nl> + int8_t input_data_quant [ tflite : : testing : : kInputElements4D ] ; <nl> + <nl> + float input_scale = 0 . 5f ; <nl> + int input_zero_point = 0 ; <nl> + float output_scale = 0 . 5f ; <nl> + int output_zero_point = 0 ; <nl> + <nl> + TfLiteReducerParams params = { <nl> + true / / keep_dims <nl> + } ; <nl> + <nl> + tflite : : testing : : TestMeanOpQuantized < int8_t > ( <nl> + tflite : : testing : : kInputShape4D , tflite : : testing : : kInputData4D , <nl> + input_data_quant , input_scale , input_zero_point , <nl> + tflite : : testing : : kAxisShape , tflite : : testing : : kAxisData , <nl> + tflite : : testing : : kOutputShape , tflite : : testing : : kGoldenData , <nl> + output_data_quant , expected_output_data_quant , output_scale , <nl> + output_zero_point , & params ) ; <nl> + } <nl> + <nl> + TF_LITE_MICRO_TEST ( MeanUInt84DKeepDims ) { <nl> + uint8_t expected_output_data_quant [ tflite : : testing : : kOutputElements ] ; <nl> + uint8_t output_data_quant [ tflite : : testing : : kOutputElements ] ; <nl> + uint8_t input_data_quant [ tflite : : testing : : kInputElements4D ] ; <nl> + <nl> + float input_scale = 0 . 5f ; <nl> + int input_zero_point = 128 ; <nl> + float output_scale = 0 . 5f ; <nl> + int output_zero_point = 128 ; <nl> + <nl> + TfLiteReducerParams params = { <nl> + true / / keep_dims <nl> + } ; <nl> + <nl> + tflite : : testing : : TestMeanOpQuantized < uint8_t > ( <nl> + tflite : : testing : : kInputShape4D , tflite : : testing : : kInputData4D , <nl> + input_data_quant , input_scale , input_zero_point , <nl> + tflite : : testing : : kAxisShape , tflite : : testing : : kAxisData , <nl> + tflite : : testing : : kOutputShape , tflite : : testing : : kGoldenData , <nl> + output_data_quant , expected_output_data_quant , output_scale , <nl> + output_zero_point , & params ) ; <nl> + } <nl> + <nl> TF_LITE_MICRO_TEST ( MeanFloat4DWithoutKeepDims ) { <nl> const int kOutputShape [ ] = { 2 , 2 , 2 } ; <nl> float output_data [ tflite : : testing : : kOutputElements ] ; <nl> - <nl> TfLiteReducerParams params = { <nl> false / / keep_dims <nl> } ; <nl> TF_LITE_MICRO_TEST ( MeanFloat4DWithoutKeepDims ) { <nl> tflite : : testing : : kGoldenData , output_data , & params ) ; <nl> } <nl> <nl> + TF_LITE_MICRO_TEST ( MeanInt84DWithoutKeepDims ) { <nl> + int8_t expected_output_data_quant [ tflite : : testing : : kOutputElements ] ; <nl> + int8_t output_data_quant [ tflite : : testing : : kOutputElements ] ; <nl> + int8_t input_data_quant [ tflite : : testing : : kInputElements4D ] ; <nl> + <nl> + const int kOutputShape [ ] = { 2 , 2 , 2 } ; <nl> + TfLiteReducerParams params = { <nl> + false / / keep_dims <nl> + } ; <nl> + float input_scale = 0 . 5f ; <nl> + int input_zero_point = 0 ; <nl> + float output_scale = 0 . 5f ; <nl> + int output_zero_point = 0 ; <nl> + <nl> + tflite : : testing : : TestMeanOpQuantized < int8_t > ( <nl> + tflite : : testing : : kInputShape4D , tflite : : testing : : kInputData4D , <nl> + input_data_quant , input_scale , input_zero_point , <nl> + tflite : : testing : : kAxisShape , tflite : : testing : : kAxisData , kOutputShape , <nl> + tflite : : testing : : kGoldenData , output_data_quant , <nl> + expected_output_data_quant , output_scale , output_zero_point , & params ) ; <nl> + } <nl> + <nl> + TF_LITE_MICRO_TEST ( MeanUInt84DWithoutKeepDims ) { <nl> + uint8_t expected_output_data_quant [ tflite : : testing : : kOutputElements ] ; <nl> + uint8_t output_data_quant [ tflite : : testing : : kOutputElements ] ; <nl> + uint8_t input_data_quant [ tflite : : testing : : kInputElements4D ] ; <nl> + <nl> + const int kOutputShape [ ] = { 2 , 2 , 2 } ; <nl> + TfLiteReducerParams params = { <nl> + false / / keep_dims <nl> + } ; <nl> + float input_scale = 0 . 5f ; <nl> + int input_zero_point = 128 ; <nl> + float output_scale = 0 . 5f ; <nl> + int output_zero_point = 128 ; <nl> + <nl> + tflite : : testing : : TestMeanOpQuantized < uint8_t > ( <nl> + tflite : : testing : : kInputShape4D , tflite : : testing : : kInputData4D , <nl> + input_data_quant , input_scale , input_zero_point , <nl> + tflite : : testing : : kAxisShape , tflite : : testing : : kAxisData , kOutputShape , <nl> + tflite : : testing : : kGoldenData , output_data_quant , <nl> + expected_output_data_quant , output_scale , output_zero_point , & params ) ; <nl> + } <nl> + <nl> TF_LITE_MICRO_TEST ( MeanFloat4DWithoutKeepDimsWithPrecision ) { <nl> const int kInputShape4D [ ] = { 4 , 2 , 2 , 3 , 1 } ; <nl> const float kInputData4D [ ] = { 1 . 0 , 24 . 0 , 13 . 0 , 3 . 0 , 9 . 0 , 17 . 0 , <nl> TF_LITE_MICRO_TEST ( MeanFloat4DWithoutKeepDimsWithPrecision ) { <nl> const int kOutputShape [ ] = { 2 , 2 , 1 } ; <nl> const float kGoldenData [ ] = { 11 . 166667 , 19 . 833334 } ; <nl> float output_data [ kOutputElements ] ; <nl> - <nl> TfLiteReducerParams params = { <nl> false / / keep_dims <nl> } ; <nl> TF_LITE_MICRO_TEST ( MeanFloat4DWithoutKeepDimsWithPrecision ) { <nl> & params ) ; <nl> } <nl> <nl> + TF_LITE_MICRO_TEST ( MeanInt84DWithoutKeepDimsWithPrecision ) { <nl> + const int kInputShape4D [ ] = { 4 , 2 , 2 , 3 , 1 } ; <nl> + const float kInputData4D [ ] = { 1 . 0 , 24 . 0 , 13 . 0 , 3 . 0 , 9 . 0 , 17 . 0 , <nl> + 11 . 0 , 36 . 0 , 14 . 0 , 19 . 0 , 17 . 0 , 22 . 0 } ; <nl> + const int kOutputShape [ ] = { 2 , 2 , 1 } ; <nl> + const float kGoldenData [ ] = { 11 . 166667 , 19 . 833334 } ; <nl> + TfLiteReducerParams params = { <nl> + false / / keep_dims <nl> + } ; <nl> + float input_scale = 0 . 5f ; <nl> + int input_zero_point = 0 ; <nl> + float output_scale = 0 . 5f ; <nl> + int output_zero_point = 0 ; <nl> + <nl> + int8_t output_data_quant [ 2 ] ; <nl> + int8_t expected_output_data_quant [ 2 ] ; <nl> + int8_t input_data_quant [ 12 ] ; <nl> + <nl> + tflite : : testing : : TestMeanOpQuantized < int8_t > ( <nl> + kInputShape4D , kInputData4D , input_data_quant , input_scale , <nl> + input_zero_point , tflite : : testing : : kAxisShape , tflite : : testing : : kAxisData , <nl> + kOutputShape , kGoldenData , output_data_quant , expected_output_data_quant , <nl> + output_scale , output_zero_point , & params ) ; <nl> + } <nl> + <nl> + TF_LITE_MICRO_TEST ( MeanUInt84DWithoutKeepDimsWithPrecision ) { <nl> + const int kInputShape4D [ ] = { 4 , 2 , 2 , 3 , 1 } ; <nl> + const float kInputData4D [ ] = { 1 . 0 , 24 . 0 , 13 . 0 , 3 . 0 , 9 . 0 , 17 . 0 , <nl> + 11 . 0 , 36 . 0 , 14 . 0 , 19 . 0 , 17 . 0 , 22 . 0 } ; <nl> + const int kOutputShape [ ] = { 2 , 2 , 1 } ; <nl> + const float kGoldenData [ ] = { 11 . 166667 , 19 . 833334 } ; <nl> + TfLiteReducerParams params = { <nl> + false / / keep_dims <nl> + } ; <nl> + <nl> + float input_scale = 0 . 5f ; <nl> + int input_zero_point = 128 ; <nl> + float output_scale = 0 . 5f ; <nl> + int output_zero_point = 128 ; <nl> + <nl> + uint8_t output_data_quant [ 2 ] ; <nl> + uint8_t expected_output_data_quant [ 2 ] ; <nl> + uint8_t input_data_quant [ 12 ] ; <nl> + <nl> + tflite : : testing : : TestMeanOpQuantized < uint8_t > ( <nl> + kInputShape4D , kInputData4D , input_data_quant , input_scale , <nl> + input_zero_point , tflite : : testing : : kAxisShape , tflite : : testing : : kAxisData , <nl> + kOutputShape , kGoldenData , output_data_quant , expected_output_data_quant , <nl> + output_scale , output_zero_point , & params ) ; <nl> + } <nl> TF_LITE_MICRO_TESTS_END <nl> mmm a / tensorflow / lite / micro / kernels / softmax . cc <nl> ppp b / tensorflow / lite / micro / kernels / softmax . cc <nl> namespace micro { <nl> namespace activations { <nl> namespace { <nl> <nl> + / / Softmax parameter data that persists in user_data <nl> + static constexpr int kInt16LUTArraySize = 513 ; <nl> + <nl> TfLiteStatus CalculateSoftmaxParams ( TfLiteContext * context , <nl> const TfLiteTensor * input , <nl> TfLiteTensor * output , <nl> const TfLiteSoftmaxParams * params , <nl> SoftmaxParams * op_data ) { <nl> - if ( input - > type = = kTfLiteUInt8 | | input - > type = = kTfLiteInt8 ) { <nl> + if ( input - > type = = kTfLiteUInt8 | | input - > type = = kTfLiteInt8 | | <nl> + input - > type = = kTfLiteInt16 ) { <nl> if ( input - > type = = kTfLiteUInt8 ) { <nl> TF_LITE_ENSURE_TYPES_EQ ( context , output - > type , kTfLiteUInt8 ) ; <nl> TF_LITE_ENSURE_EQ ( context , output - > params . zero_point , 0 ) ; <nl> - } else { <nl> + } else if ( input - > type = = kTfLiteInt16 ) { <nl> + TF_LITE_ENSURE_EQ ( context , output - > params . zero_point , 0 ) ; <nl> + TF_LITE_ENSURE_NEAR ( context , output - > params . scale , 1 . f / 32768 , <nl> + ( 0 . 001f * 1 . f / 32768 ) ) ; <nl> + } else { / / input - > type = = kTfLiteInt8 <nl> TF_LITE_ENSURE_TYPES_EQ ( context , input - > type , kTfLiteInt8 ) ; <nl> if ( output - > type = = kTfLiteInt16 ) { <nl> TF_LITE_ENSURE_EQ ( context , output - > params . zero_point , - 32768 ) ; <nl> - / / NOTE : Current int16_t softmax output does not require symmetric <nl> - / / scaling <nl> - / / - so no need to verify scale here . <nl> - } else { <nl> + TF_LITE_ENSURE_NEAR ( context , output - > params . scale , 1 . f / 65536 , <nl> + ( 0 . 001f * 1 . f / 65536 ) ) ; <nl> + } else { / / output - > type = = kTfLiteint8 <nl> TF_LITE_ENSURE_TYPES_EQ ( context , output - > type , kTfLiteInt8 ) ; <nl> TF_LITE_ENSURE_EQ ( context , output - > params . zero_point , - 128 ) ; <nl> TF_LITE_ENSURE ( context , output - > params . scale = = 1 . f / 256 ) ; <nl> TfLiteStatus CalculateSoftmaxParams ( TfLiteContext * context , <nl> <nl> static const int kScaledDiffIntegerBits = 5 ; <nl> <nl> - int input_left_shift ; <nl> - tflite : : PreprocessSoftmaxScaling ( <nl> - static_cast < double > ( params - > beta ) , <nl> - static_cast < double > ( input - > params . scale ) , kScaledDiffIntegerBits , <nl> - & op_data - > input_multiplier , & input_left_shift ) ; <nl> - op_data - > input_left_shift = input_left_shift ; <nl> - op_data - > diff_min = <nl> - - 1 . 0 * tflite : : CalculateInputRadius ( kScaledDiffIntegerBits , <nl> - op_data - > input_left_shift ) ; <nl> + / / Calculate input_multiplier and input_left_shift <nl> + if ( input - > type = = kTfLiteInt16 ) { <nl> + int input_left_shift ; <nl> + double input_scale_beta_rescale = <nl> + static_cast < double > ( input - > params . scale ) * <nl> + static_cast < double > ( params - > beta ) / <nl> + ( 10 . 0 / 65535 . 0 ) ; / / scale the input_diff such that [ - 65535 , 0 ] <nl> + / / correspond to [ - 10 . 0 , 0 . 0 ] <nl> + QuantizeMultiplier ( input_scale_beta_rescale , & op_data - > input_multiplier , <nl> + & input_left_shift ) ; <nl> + op_data - > input_left_shift = input_left_shift ; <nl> + } else { <nl> + int input_left_shift ; <nl> + tflite : : PreprocessSoftmaxScaling ( <nl> + static_cast < double > ( params - > beta ) , <nl> + static_cast < double > ( input - > params . scale ) , kScaledDiffIntegerBits , <nl> + & op_data - > input_multiplier , & input_left_shift ) ; <nl> + op_data - > input_left_shift = input_left_shift ; <nl> + op_data - > diff_min = <nl> + - 1 . 0 * tflite : : CalculateInputRadius ( kScaledDiffIntegerBits , <nl> + op_data - > input_left_shift ) ; <nl> + } <nl> } else { <nl> TF_LITE_ENSURE_TYPES_EQ ( context , input - > type , kTfLiteFloat32 ) ; <nl> TF_LITE_ENSURE_TYPES_EQ ( context , output - > type , kTfLiteFloat32 ) ; <nl> void SoftmaxQuantized ( const TfLiteEvalTensor * input , TfLiteEvalTensor * output , <nl> tflite : : micro : : GetTensorData < uint8_t > ( input ) , <nl> tflite : : micro : : GetTensorShape ( output ) , <nl> tflite : : micro : : GetTensorData < uint8_t > ( output ) ) ; <nl> - } else { <nl> + } else if ( input - > type = = kTfLiteInt8 ) { <nl> if ( output - > type = = kTfLiteInt16 ) { <nl> tflite : : reference_ops : : Softmax ( <nl> op_data , tflite : : micro : : GetTensorShape ( input ) , <nl> void SoftmaxQuantized ( const TfLiteEvalTensor * input , TfLiteEvalTensor * output , <nl> tflite : : micro : : GetTensorShape ( output ) , <nl> tflite : : micro : : GetTensorData < int8_t > ( output ) ) ; <nl> } <nl> + } else { <nl> + tflite : : reference_ops : : SoftmaxInt16 ( <nl> + op_data , tflite : : micro : : GetTensorShape ( input ) , <nl> + tflite : : micro : : GetTensorData < int16_t > ( input ) , <nl> + tflite : : micro : : GetTensorShape ( output ) , <nl> + tflite : : micro : : GetTensorData < int16_t > ( output ) ) ; <nl> } <nl> } <nl> <nl> void * SoftmaxInit ( TfLiteContext * context , const char * buffer , size_t length ) { <nl> } <nl> <nl> TfLiteStatus SoftmaxPrepare ( TfLiteContext * context , TfLiteNode * node ) { <nl> - auto * params = static_cast < TfLiteSoftmaxParams * > ( node - > builtin_data ) ; <nl> - <nl> TF_LITE_ENSURE_EQ ( context , NumInputs ( node ) , 1 ) ; <nl> TF_LITE_ENSURE_EQ ( context , NumOutputs ( node ) , 1 ) ; <nl> const TfLiteTensor * input = GetInput ( context , node , 0 ) ; <nl> TF_LITE_ENSURE ( context , NumDimensions ( input ) > = 1 ) ; <nl> - <nl> TfLiteTensor * output = GetOutput ( context , node , 0 ) ; <nl> <nl> - TFLITE_DCHECK ( node - > user_data ! = nullptr ) ; <nl> - SoftmaxParams * data = static_cast < SoftmaxParams * > ( node - > user_data ) ; <nl> - return CalculateSoftmaxParams ( context , input , output , params , data ) ; <nl> + TF_LITE_ENSURE ( context , node - > user_data ! = nullptr ) ; <nl> + SoftmaxParams * op_data = static_cast < SoftmaxParams * > ( node - > user_data ) ; <nl> + / / Only allocate LUTs for KTfLiteInt16 data type <nl> + if ( input - > type = = kTfLiteInt16 ) { <nl> + void * raw_exp_lut = context - > AllocatePersistentBuffer ( <nl> + context , sizeof ( int16_t ) * kInt16LUTArraySize ) ; <nl> + TF_LITE_ENSURE ( context , raw_exp_lut ! = nullptr ) ; <nl> + op_data - > exp_lut = reinterpret_cast < int16_t * > ( raw_exp_lut ) ; <nl> + void * one_over_one_plus_x_lut = context - > AllocatePersistentBuffer ( <nl> + context , sizeof ( int16_t ) * kInt16LUTArraySize ) ; <nl> + TF_LITE_ENSURE ( context , one_over_one_plus_x_lut ! = nullptr ) ; <nl> + op_data - > one_over_one_plus_x_lut = <nl> + reinterpret_cast < int16_t * > ( one_over_one_plus_x_lut ) ; <nl> + } <nl> + <nl> + if ( output - > type = = kTfLiteInt16 ) { <nl> + TF_LITE_ENSURE ( context , input - > type = = kTfLiteInt8 | | <nl> + input - > type = = kTfLiteUInt8 | | <nl> + input - > type = = kTfLiteInt16 ) ; <nl> + } else { <nl> + TF_LITE_ENSURE_EQ ( context , input - > type , output - > type ) ; <nl> + } <nl> + <nl> + / / Populate LUT if required <nl> + if ( input - > type = = kTfLiteInt16 ) { <nl> + TF_LITE_ENSURE_EQ ( context , output - > params . zero_point , 0 ) ; <nl> + / / exp LUT only used on negative values <nl> + / / we consider exp ( - 10 . 0 ) is insignificant to accumulation <nl> + gen_lut ( [ ] ( float value ) { return std : : exp ( value ) ; } , - 10 . 0f , 0 . 0f , <nl> + op_data - > exp_lut , kInt16LUTArraySize ) ; <nl> + gen_lut ( [ ] ( float value ) { return 1 . 0f / ( 1 . 0f + value ) ; } , 0 . 0f , 1 . 0f , <nl> + op_data - > one_over_one_plus_x_lut , kInt16LUTArraySize ) ; <nl> + op_data - > zero_point = output - > params . zero_point ; <nl> + op_data - > scale = output - > params . scale ; <nl> + } <nl> + <nl> + auto * params = static_cast < TfLiteSoftmaxParams * > ( node - > builtin_data ) ; <nl> + return CalculateSoftmaxParams ( context , input , output , params , op_data ) ; <nl> } <nl> <nl> TfLiteStatus SoftmaxEval ( TfLiteContext * context , TfLiteNode * node ) { <nl> TfLiteStatus SoftmaxEval ( TfLiteContext * context , TfLiteNode * node ) { <nl> TfLiteEvalTensor * output = tflite : : micro : : GetEvalOutput ( context , node , 0 ) ; <nl> <nl> TFLITE_DCHECK ( node - > user_data ! = nullptr ) ; <nl> - SoftmaxParams * data = static_cast < SoftmaxParams * > ( node - > user_data ) ; <nl> + SoftmaxParams op_data = * static_cast < SoftmaxParams * > ( node - > user_data ) ; <nl> <nl> switch ( input - > type ) { <nl> case kTfLiteFloat32 : { <nl> - SoftmaxFloat ( input , output , * data ) ; <nl> + SoftmaxFloat ( input , output , op_data ) ; <nl> return kTfLiteOk ; <nl> } <nl> case kTfLiteInt8 : <nl> - case kTfLiteUInt8 : { <nl> - SoftmaxQuantized ( input , output , * data ) ; <nl> + case kTfLiteUInt8 : <nl> + case kTfLiteInt16 : { <nl> + SoftmaxQuantized ( input , output , op_data ) ; <nl> return kTfLiteOk ; <nl> } <nl> default : <nl> mmm a / tensorflow / lite / micro / kernels / softmax_test . cc <nl> ppp b / tensorflow / lite / micro / kernels / softmax_test . cc <nl> namespace { <nl> / / quantization parameters . <nl> const float output_scale_int8 = 1 . 0f / 256 . 0f ; <nl> const float output_scale_uint8 = 1 . 0f / 256 . 0f ; <nl> + const float output_scale_int16 = 1 . 0f / 32768 . 0f ; <nl> const int output_zero_point_int8 = - 128 ; <nl> const int output_zero_point_uint8 = 0 ; <nl> + const int output_zero_point_int16 = 0 ; <nl> + <nl> + / / Empirical tolerance in quantization space <nl> + const float tolerance_int16 = 7 . 0 ; <nl> <nl> / / 1 - dimensional test data . <nl> const int flat_size_1d = 5 ; <nl> void TestSoftmaxQuantized ( const int * input_dims_data , const float * input_data , <nl> int input_zero_point , const int * output_dims_data , <nl> const float * golden , T * golden_quantized , <nl> float output_scale , int output_zero_point , <nl> - T * output_data ) { <nl> + T * output_data , float tolerance = 1 . 0 ) { <nl> TfLiteIntArray * input_dims = IntArrayFromInts ( input_dims_data ) ; <nl> TfLiteIntArray * output_dims = IntArrayFromInts ( output_dims_data ) ; <nl> const int output_dims_count = ElementCount ( * output_dims ) ; <nl> void TestSoftmaxQuantized ( const int * input_dims_data , const float * input_data , <nl> output_zero_point ) ; <nl> <nl> ValidateSoftmaxGoldens ( tensors , tensors_size , output_data , golden_quantized , <nl> - output_dims_count , 1 . 0 ) ; <nl> + output_dims_count , tolerance ) ; <nl> } <nl> <nl> } / / namespace <nl> TF_LITE_MICRO_TEST ( Softmax1DQuantizedInt8ShouldMatchGolden ) { <nl> tflite : : testing : : output_zero_point_int8 , output_data ) ; <nl> } <nl> <nl> + TF_LITE_MICRO_TEST ( Softmax1DQuantizedInt16ShouldMatchGolden ) { <nl> + const float input_scale = 0 . 1f ; <nl> + const int input_zero_point = 0 ; <nl> + <nl> + int16_t input_quantized [ tflite : : testing : : flat_size_1d ] ; <nl> + int16_t golden_quantized [ tflite : : testing : : flat_size_1d ] ; <nl> + int16_t output_data [ tflite : : testing : : flat_size_1d ] ; <nl> + tflite : : testing : : TestSoftmaxQuantized ( <nl> + tflite : : testing : : shape_1d , tflite : : testing : : input_data_1d , <nl> + input_quantized , input_scale , input_zero_point , tflite : : testing : : shape_1d , <nl> + tflite : : testing : : golden_1d , golden_quantized , <nl> + tflite : : testing : : output_scale_int16 , <nl> + tflite : : testing : : output_zero_point_int16 , output_data ) ; <nl> + } <nl> + <nl> TF_LITE_MICRO_TEST ( Softmax2DFloatShouldMatchGolden ) { <nl> float output_data [ tflite : : testing : : flat_size_2d ] ; <nl> tflite : : testing : : TestSoftmaxFloat ( <nl> TF_LITE_MICRO_TEST ( Softmax2DQuantizedInt8ShouldMatchGolden ) { <nl> tflite : : testing : : output_zero_point_int8 , output_data ) ; <nl> } <nl> <nl> + TF_LITE_MICRO_TEST ( Softmax2DQuantizedInt16ShouldMatchGolden ) { <nl> + const float input_scale = 0 . 1f ; <nl> + const int input_zero_point = 0 ; <nl> + <nl> + int16_t input_quantized [ tflite : : testing : : flat_size_2d ] ; <nl> + int16_t golden_quantized [ tflite : : testing : : flat_size_2d ] ; <nl> + int16_t output_data [ tflite : : testing : : flat_size_2d ] ; <nl> + tflite : : testing : : TestSoftmaxQuantized ( <nl> + tflite : : testing : : shape_2d , tflite : : testing : : input_data_2d , <nl> + input_quantized , input_scale , input_zero_point , tflite : : testing : : shape_2d , <nl> + tflite : : testing : : golden_2d , golden_quantized , <nl> + tflite : : testing : : output_scale_int16 , <nl> + tflite : : testing : : output_zero_point_int16 , output_data ) ; <nl> + } <nl> + <nl> TF_LITE_MICRO_TEST ( Softmax3DFloatShouldMatchGolden ) { <nl> float output_data [ tflite : : testing : : flat_size_3d ] ; <nl> tflite : : testing : : TestSoftmaxFloat ( <nl> TF_LITE_MICRO_TEST ( Softmax3DQuantizedInt8ShouldMatchGolden ) { <nl> tflite : : testing : : output_zero_point_int8 , output_data ) ; <nl> } <nl> <nl> + TF_LITE_MICRO_TEST ( Softmax3DQuantizedInt16ShouldMatchGolden ) { <nl> + const float input_scale = 0 . 1f ; <nl> + const int input_zero_point = 0 ; <nl> + <nl> + int16_t input_quantized [ tflite : : testing : : flat_size_3d ] ; <nl> + int16_t golden_quantized [ tflite : : testing : : flat_size_3d ] ; <nl> + int16_t output_data [ tflite : : testing : : flat_size_3d ] ; <nl> + tflite : : testing : : TestSoftmaxQuantized ( <nl> + tflite : : testing : : shape_3d , tflite : : testing : : input_data_3d , <nl> + input_quantized , input_scale , input_zero_point , tflite : : testing : : shape_3d , <nl> + tflite : : testing : : golden_3d , golden_quantized , <nl> + tflite : : testing : : output_scale_int16 , <nl> + tflite : : testing : : output_zero_point_int16 , output_data , <nl> + tflite : : testing : : tolerance_int16 ) ; <nl> + } <nl> + <nl> TF_LITE_MICRO_TEST ( Softmax4DFloatShouldMatchGolden ) { <nl> float output_data [ tflite : : testing : : flat_size_4d ] ; <nl> tflite : : testing : : TestSoftmaxFloat ( <nl> TF_LITE_MICRO_TEST ( Softmax4DQuantizedInt8ShouldMatchGolden ) { <nl> tflite : : testing : : output_zero_point_int8 , output_data ) ; <nl> } <nl> <nl> + TF_LITE_MICRO_TEST ( Softmax4DQuantizedInt16ShouldMatchGolden ) { <nl> + const float input_scale = 0 . 1f ; <nl> + const int input_zero_point = 0 ; <nl> + <nl> + int16_t input_quantized [ tflite : : testing : : flat_size_4d ] ; <nl> + int16_t golden_quantized [ tflite : : testing : : flat_size_4d ] ; <nl> + int16_t output_data [ tflite : : testing : : flat_size_4d ] ; <nl> + tflite : : testing : : TestSoftmaxQuantized ( <nl> + tflite : : testing : : shape_4d , tflite : : testing : : input_data_4d , <nl> + input_quantized , input_scale , input_zero_point , tflite : : testing : : shape_4d , <nl> + tflite : : testing : : golden_4d , golden_quantized , <nl> + tflite : : testing : : output_scale_int16 , <nl> + tflite : : testing : : output_zero_point_int16 , output_data , <nl> + tflite : : testing : : tolerance_int16 ) ; <nl> + } <nl> TF_LITE_MICRO_TESTS_END <nl> mmm a / tensorflow / lite / micro / tools / make / Makefile <nl> ppp b / tensorflow / lite / micro / tools / make / Makefile <nl> tensorflow / lite / kernels / internal / reference / integer_ops / depthwise_conv . h \ <nl> tensorflow / lite / kernels / internal / reference / integer_ops / fully_connected . h \ <nl> tensorflow / lite / kernels / internal / reference / integer_ops / logistic . h \ <nl> tensorflow / lite / kernels / internal / reference / integer_ops / l2normalization . h \ <nl> + tensorflow / lite / kernels / internal / reference / integer_ops / mean . h \ <nl> tensorflow / lite / kernels / internal / reference / integer_ops / mul . h \ <nl> tensorflow / lite / kernels / internal / reference / integer_ops / pooling . h \ <nl> tensorflow / lite / kernels / internal / reference / integer_ops / tanh . h \ <nl> mmm a / tensorflow / lite / micro / tools / make / targets / esp32_makefile . inc <nl> ppp b / tensorflow / lite / micro / tools / make / targets / esp32_makefile . inc <nl> <nl> <nl> ifeq ( $ ( TARGET ) , esp ) <nl> TARGET_ARCH : = xtensa - esp32 <nl> + CCFLAGS : = $ ( filter - out - std = c11 , $ ( CCFLAGS ) ) <nl> + CFLAGS + = - std = c11 <nl> endif <nl> mmm a / tensorflow / lite / micro / tools / make / targets / hexagon_makefile . inc <nl> ppp b / tensorflow / lite / micro / tools / make / targets / hexagon_makefile . inc <nl> <nl> # Settings for Hexagon toolchain . <nl> # REQUIRED : <nl> - # - Hexagon SDK 3 . 5 Toolkit ( for hexagon - clang + + , hexagon - sim ) . <nl> - # - HEXAGON_SDK_PREFIX environment variable must be set to location of <nl> + # - Hexagon SDK 3 . 5 Toolkit ( for qurt , posix libs ) . <nl> + # HEXAGON_SDK_ROOT environment variable must be set to location of <nl> # Hexagon_SDK / < version > / on your machine . <nl> + # - Hexagon Tools root ( for hexagon - clang + + , hexagon - sim ) . <nl> + # The tool folder may be a part of the Hexagon SDK <nl> + # ( e . g . $ ( HEXAGON_SDK_ROOT ) / tools / HEXAGON_Tools ) or installed <nl> + # separately . <nl> + # HEXAGON_ROOT environment variable must be set to location of <nl> + # HEXAGON_Tools on your machine . <nl> + # - HEXAGON_TOOL_VER : The Hexagon tool version ( installed under HEXAGON_ROOT ) . <nl> + # For example : 8 . 3 . 07 <nl> # - HEXAGON_CPU_VER : The CPU version to use , will cause a compiler exception <nl> - # without providing a version . Acceptable values : v55 - v67 <nl> + # without providing a version . Valid values may vary depending on tools <nl> + # version , but generally in the range : v55 - v67 <nl> # <nl> # Unlike other targets , there is not currently a way to automatically download <nl> # the Hexagon SDK . For this reason , users are required to manually download <nl> <nl> ifeq ( $ ( TARGET ) , hexagon ) <nl> TARGET_ARCH : = hexagon <nl> <nl> - ifndef HEXAGON_SDK_PREFIX <nl> - $ ( error HEXAGON_SDK_PREFIX is undefined ) <nl> + ifndef HEXAGON_SDK_ROOT <nl> + $ ( error HEXAGON_SDK_ROOT is undefined ) <nl> + endif <nl> + <nl> + ifndef HEXAGON_TOOL_VER <nl> + $ ( error HEXAGON_TOOL_VER is undefined ) <nl> + endif <nl> + <nl> + ifndef HEXAGON_ROOT <nl> + $ ( error HEXAGON_ROOT is undefined ) <nl> endif <nl> <nl> ifndef HEXAGON_CPU_VER <nl> ifeq ( $ ( TARGET ) , hexagon ) <nl> - mcpu = $ ( HEXAGON_CPU_VER ) \ <nl> - m $ ( HEXAGON_CPU_VER ) <nl> <nl> + export PATH : = $ ( HEXAGON_ROOT ) / $ ( HEXAGON_TOOL_VER ) / Tools / bin : $ ( PATH ) <nl> TARGET_TOOLCHAIN_PREFIX : = hexagon - <nl> CXX_TOOL : = clang + + <nl> CC_TOOL : = clang <nl> ifeq ( $ ( TARGET ) , hexagon ) <nl> CCFLAGS + = $ ( PLATFORM_ARGS ) <nl> LDFLAGS + = \ <nl> - Wl , - - gc - sections - lhexagon \ <nl> - $ ( HEXAGON_SDK_PREFIX ) / tools / HEXAGON_Tools / 8 . 3 . 07 / Tools / target / hexagon / lib / v66 / libstdc + + . a <nl> + $ ( HEXAGON_ROOT ) / $ ( HEXAGON_TOOL_VER ) / Tools / target / hexagon / lib / v66 / libstdc + + . a <nl> <nl> INCLUDES + = \ <nl> - - I $ ( HEXAGON_SDK_PREFIX ) / libs / common / qurt / computev66 / include / posix \ <nl> - - I $ ( HEXAGON_SDK_PREFIX ) / libs / common / qurt / computev66 / include / qurt <nl> + - I $ ( HEXAGON_SDK_ROOT ) / libs / common / qurt / computev66 / include / posix \ <nl> + - I $ ( HEXAGON_SDK_ROOT ) / libs / common / qurt / computev66 / include / qurt <nl> <nl> TEST_SCRIPT : = tensorflow / lite / micro / testing / test_hexagon_binary . sh <nl> endif <nl> mmm a / tensorflow / lite / tools / benchmark / experimental / c / c_api_types . h <nl> ppp b / tensorflow / lite / tools / benchmark / experimental / c / c_api_types . h <nl> void TfLiteFloatArrayFree ( TfLiteFloatArray * a ) ; <nl> } \ <nl> } while ( 0 ) <nl> <nl> + # define TF_LITE_ENSURE_NEAR ( context , a , b , epsilon ) \ <nl> + do { \ <nl> + auto delta = ( ( a ) > ( b ) ) ? ( ( a ) - ( b ) ) : ( ( b ) - ( a ) ) ; \ <nl> + if ( delta > epsilon ) { \ <nl> + TF_LITE_KERNEL_LOG ( ( context ) , " % s : % d % s not near % s ( % f ! = % f ) " , \ <nl> + __FILE__ , __LINE__ , # a , # b , static_cast < double > ( a ) , \ <nl> + static_cast < double > ( b ) ) ; \ <nl> + return kTfLiteError ; \ <nl> + } \ <nl> + } while ( 0 ) <nl> + <nl> # define TF_LITE_ENSURE_OK ( context , status ) \ <nl> do { \ <nl> const TfLiteStatus s = ( status ) ; \ <nl> mmm a / tensorflow / python / BUILD <nl> ppp b / tensorflow / python / BUILD <nl> py_library ( <nl> " : pywrap_tf_session " , <nl> " : pywrap_tfe " , <nl> " : rnn_ops_gen " , <nl> - " : saver_test_utils " , <nl> " : script_ops " , <nl> " : sendrecv_ops_gen " , <nl> " : session_ops " , <nl> py_library ( <nl> " / / tensorflow / python / tools : module_util " , <nl> " / / tensorflow / python / tools / api / generator : create_python_api " , <nl> " / / tensorflow / python / tpu : tpu_noestimator " , <nl> + " / / tensorflow / python / training : saver_test_utils " , <nl> " / / tensorflow / python / types " , <nl> " / / third_party / py / numpy " , <nl> ] , <nl> tf_python_pybind_extension ( <nl> tf_python_pybind_extension ( <nl> name = " _pywrap_quantize_training " , <nl> srcs = [ <nl> - " training / quantize_training_wrapper . cc " , <nl> + " / / tensorflow / python / training : quantize_training_wrapper . cc " , <nl> ] , <nl> hdrs = [ " / / tensorflow / core / common_runtime : quantize_training_hdrs " ] , <nl> module_name = " _pywrap_quantize_training " , <nl> py_library ( <nl> ] , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " py_context_manager " , <nl> + srcs = [ " framework / py_context_manager . cc " ] , <nl> + hdrs = [ " framework / py_context_manager . h " ] , <nl> + deps = [ <nl> + " : safe_pyobject_ptr " , <nl> + " / / tensorflow / core : lib " , # for core / platform / logging . h <nl> + " / / third_party / python_runtime : headers " , <nl> + ] , <nl> + ) <nl> + <nl> + # Pybind extension used by py_context_manager_test . <nl> + tf_python_pybind_extension ( <nl> + name = " _py_context_manager " , <nl> + srcs = [ " framework / py_context_manager_pybind . cc " ] , <nl> + module_name = " _py_context_manager " , <nl> + deps = [ <nl> + " : py_context_manager " , <nl> + " / / third_party / python_runtime : headers " , <nl> + " @ pybind11 " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " py_context_manager_test " , <nl> + srcs = [ " framework / py_context_manager_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tags = [ " no_pip " ] , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " : _py_context_manager " , <nl> + ] , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " op_def_util_cc " , <nl> srcs = [ " framework / op_def_util . cc " ] , <nl> tf_gen_op_wrapper_private_py ( <nl> " / / tensorflow / compiler / tests : __pkg__ " , <nl> " / / tensorflow / python / kernel_tests : __pkg__ " , <nl> " / / tensorflow / python / kernel_tests / v1_compat_tests : __pkg__ " , <nl> + " / / tensorflow / python / training : __pkg__ " , <nl> ] , <nl> deps = [ <nl> " / / tensorflow / c / kernels : bitcast_op_lib " , <nl> tf_gen_op_wrapper_private_py ( <nl> <nl> tf_gen_op_wrapper_private_py ( <nl> name = " checkpoint_ops_gen " , <nl> - visibility = [ " / / tensorflow / python / kernel_tests : __pkg__ " ] , <nl> + visibility = [ <nl> + " / / tensorflow / python / kernel_tests : __pkg__ " , <nl> + " / / tensorflow / python / training : __pkg__ " , <nl> + ] , <nl> ) <nl> <nl> tf_gen_op_wrapper_private_py ( <nl> tf_gen_op_wrapper_private_py ( <nl> visibility = [ <nl> " / / learning / brain / python / ops : __pkg__ " , <nl> " / / tensorflow / python / kernel_tests : __pkg__ " , <nl> + " / / tensorflow / python / training : __pkg__ " , <nl> ] , <nl> ) <nl> <nl> tf_gen_op_wrapper_private_py ( <nl> visibility = [ <nl> " / / learning / brain / python / ops : __pkg__ " , <nl> " / / tensorflow / python / kernel_tests : __pkg__ " , <nl> + " / / tensorflow / python / training : __pkg__ " , <nl> " / / tensorflow / python / training / tracking : __pkg__ " , <nl> ] , <nl> ) <nl> tf_gen_op_wrapper_private_py ( <nl> visibility = [ <nl> " / / learning / brain / python / ops : __pkg__ " , <nl> " / / tensorflow / python / kernel_tests : __pkg__ " , <nl> + " / / tensorflow / python / training : __pkg__ " , <nl> ] , <nl> ) <nl> <nl> tf_gen_op_wrapper_private_py ( <nl> visibility = [ <nl> " / / learning / brain / python / ops : __pkg__ " , <nl> " / / tensorflow / python / kernel_tests : __pkg__ " , <nl> + " / / tensorflow / python / training : __pkg__ " , <nl> ] , <nl> ) <nl> <nl> tf_gen_op_wrapper_private_py ( <nl> <nl> tf_gen_op_wrapper_private_py ( <nl> name = " training_ops_gen " , <nl> - out = " training / gen_training_ops . py " , <nl> + visibility = [ <nl> + " / / tensorflow / python / training : __pkg__ " , <nl> + ] , <nl> ) <nl> <nl> tf_gen_op_wrapper_private_py ( <nl> py_library ( <nl> ] , <nl> ) <nl> <nl> - py_library ( <nl> - name = " loss_scale " , <nl> - srcs = [ " training / experimental / loss_scale . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - deps = [ <nl> - " : framework " , <nl> - " @ absl_py / / absl / testing : parameterized " , <nl> - ] , <nl> - ) <nl> - <nl> - py_library ( <nl> - name = " loss_scale_optimizer " , <nl> - srcs = [ " training / experimental / loss_scale_optimizer . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - deps = [ <nl> - " : loss_scale " , <nl> - " / / tensorflow / python / distribute : distribute_lib " , <nl> - " @ absl_py / / absl / testing : parameterized " , <nl> - ] , <nl> - ) <nl> - <nl> - py_test ( <nl> - name = " loss_scale_optimizer_test " , <nl> - size = " small " , <nl> - srcs = [ " training / experimental / loss_scale_optimizer_test . py " ] , <nl> - python_version = " PY3 " , <nl> - deps = [ <nl> - " : client_testlib " , <nl> - " : loss_scale_optimizer " , <nl> - " / / tensorflow / python / distribute : mirrored_strategy " , <nl> - " / / tensorflow / python / distribute : one_device_strategy " , <nl> - " / / tensorflow / python / keras / mixed_precision / experimental : test_util " , <nl> - " @ absl_py / / absl / testing : parameterized " , <nl> - ] , <nl> - ) <nl> - <nl> - py_test ( <nl> - name = " loss_scale_test " , <nl> - size = " medium " , <nl> - srcs = [ " training / experimental / loss_scale_test . py " ] , <nl> - python_version = " PY3 " , <nl> - deps = [ <nl> - " : client_testlib " , <nl> - " : loss_scale " , <nl> - " / / tensorflow / python / distribute : mirrored_strategy " , <nl> - " / / tensorflow / python / distribute : one_device_strategy " , <nl> - " @ absl_py / / absl / testing : parameterized " , <nl> - ] , <nl> - ) <nl> - <nl> - py_library ( <nl> - name = " mixed_precision_global_state " , <nl> - srcs = [ " training / experimental / mixed_precision_global_state . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - ) <nl> - <nl> - py_library ( <nl> - name = " mixed_precision " , <nl> - srcs = [ " training / experimental / mixed_precision . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - deps = [ <nl> - " : config " , <nl> - " : loss_scale " , <nl> - " : loss_scale_optimizer " , <nl> - " : mixed_precision_global_state " , <nl> - " : util " , <nl> - ] , <nl> - ) <nl> - <nl> - cuda_py_test ( <nl> - name = " mixed_precision_test " , <nl> - size = " small " , <nl> - srcs = [ " training / experimental / mixed_precision_test . py " ] , <nl> - python_version = " PY3 " , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : client_testlib " , <nl> - " : mixed_precision " , <nl> - " @ absl_py / / absl / testing : parameterized " , <nl> - ] , <nl> - ) <nl> - <nl> - py_library ( <nl> - name = " loss_scaling_gradient_tape " , <nl> - srcs = [ " training / experimental / loss_scaling_gradient_tape . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : loss_scale " , <nl> - " : unconnected_gradients " , <nl> - " : util " , <nl> - " / / tensorflow / python / distribute : distribute_lib " , <nl> - " / / tensorflow / python / eager : backprop " , <nl> - ] , <nl> - ) <nl> - <nl> - cuda_py_test ( <nl> - name = " loss_scaling_gradient_tape_test " , <nl> - size = " medium " , <nl> - srcs = [ " training / experimental / loss_scaling_gradient_tape_test . py " ] , <nl> - shard_count = 2 , <nl> - deps = [ <nl> - " : client_testlib " , <nl> - " : constant_op " , <nl> - " : framework_test_combinations_lib " , <nl> - " : loss_scale " , <nl> - " : loss_scaling_gradient_tape " , <nl> - " / / tensorflow / python / compat : v2_compat " , <nl> - " / / tensorflow / python / distribute : mirrored_strategy " , <nl> - " / / tensorflow / python / eager : def_function " , <nl> - " / / third_party / py / numpy " , <nl> - " @ absl_py / / absl / testing : parameterized " , <nl> - ] , <nl> - ) <nl> - <nl> py_library ( <nl> name = " math_grad " , <nl> srcs = [ " ops / math_grad . py " ] , <nl> py_library ( <nl> " : linalg_ops " , <nl> " : logging_ops " , <nl> " : lookup_ops " , <nl> - " : loss_scaling_gradient_tape " , <nl> " : manip_grad " , <nl> " : manip_ops " , <nl> " : math_grad " , <nl> py_library ( <nl> " / / tensorflow / python / ops / linalg / sparse " , <nl> " / / tensorflow / python / ops / ragged " , <nl> " / / tensorflow / python / ops / structured " , <nl> + " / / tensorflow / python / training / experimental : loss_scaling_gradient_tape " , <nl> ] , <nl> ) <nl> <nl> tf_py_test ( <nl> ) <nl> <nl> py_library ( <nl> - name = " training_lib " , <nl> - srcs = glob ( <nl> - [ " training / * * / * . py " ] , <nl> - exclude = [ <nl> - " * * / * test * " , <nl> - " training / tracking / * * / * . py " , <nl> - " training / saving / * * / * . py " , <nl> - # The following targets have their own build rules ( same name as the <nl> - # file ) : <nl> - " training / basic_session_run_hooks . py " , <nl> - " training / checkpoint_management . py " , <nl> - " training / distribute . py " , <nl> - " training / distribution_strategy_context . py " , <nl> - " training / saver . py " , <nl> - " training / session_run_hook . py " , <nl> - " training / training_util . py " , <nl> - ] , <nl> - ) , <nl> + name = " client " , <nl> + srcs = [ <nl> + " client / client_lib . py " , <nl> + " client / device_lib . py " , <nl> + " client / timeline . py " , <nl> + ] , <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> - " : array_ops " , <nl> - " : array_ops_gen " , <nl> - " : basic_session_run_hooks " , <nl> - " : checkpoint_management " , <nl> - " : checkpoint_ops_gen " , <nl> - " : client " , <nl> - " : control_flow_ops " , <nl> - " : data_flow_ops " , <nl> - " : device " , <nl> - " : device_spec " , <nl> - " : distribute " , <nl> + " : _pywrap_device_lib " , <nl> " : errors " , <nl> " : framework " , <nl> " : framework_for_generated_wrappers " , <nl> - " : framework_ops " , <nl> - " : gradients " , <nl> - " : init_ops " , <nl> - " : io_ops " , <nl> - " : layers_util " , <nl> - " : lookup_ops " , <nl> - " : loss_scale " , <nl> - " : loss_scale_optimizer " , <nl> - " : math_ops " , <nl> - " : mixed_precision " , <nl> " : platform " , <nl> - " : py_checkpoint_reader " , <nl> - " : pywrap_tensorflow " , <nl> - " : random_ops " , <nl> - " : resource_variable_ops " , <nl> - " : resources " , <nl> - " : saver " , <nl> - " : sdca_ops " , <nl> " : session " , <nl> - " : session_run_hook " , <nl> - " : sparse_ops " , <nl> - " : sparse_tensor " , <nl> - " : state_ops " , <nl> - " : summary " , <nl> - " : training_ops_gen " , <nl> - " : training_util " , <nl> + " : session_ops " , <nl> " : util " , <nl> - " : variable_scope " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - " / / tensorflow / python / data / experimental / service : server_lib " , <nl> - " / / tensorflow / python / data / ops : dataset_ops " , <nl> - " / / tensorflow / python / distribute : distribute_coordinator_context " , <nl> - " / / tensorflow / python / distribute : distribute_lib " , <nl> - " / / tensorflow / python / distribute : reduce_util " , <nl> - " / / tensorflow / python / eager : backprop " , <nl> - " / / tensorflow / python / eager : context " , <nl> - " / / tensorflow / python / keras / optimizer_v2 : legacy_learning_rate_decay " , <nl> - " / / tensorflow / python / ops / losses " , <nl> " / / third_party / py / numpy " , <nl> " @ six_archive / / : six " , <nl> ] , <nl> ) <nl> <nl> - py_library ( <nl> - name = " training " , <nl> + # Leaf library : may not depend on anything else inside TensorFlow . <nl> + py_strict_library ( <nl> + name = " tf_export " , <nl> + srcs = [ " util / tf_export . py " ] , <nl> srcs_version = " PY2AND3 " , <nl> + visibility = [ " / / tensorflow : __subpackages__ " ] , <nl> deps = [ <nl> - " : training_lib " , <nl> - " / / tensorflow / python / training / tracking : base " , <nl> - " / / tensorflow / python / training / tracking : python_state " , <nl> - " / / tensorflow / python / training / tracking : util " , <nl> + " : tf_decorator " , <nl> ] , <nl> ) <nl> <nl> - # Dependency added and used by ClusterResolvers to avoid circular dependency between keras , distribute , and training . <nl> - py_library ( <nl> - name = " training_server_lib " , <nl> - srcs = [ " training / server_lib . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> + tf_py_test ( <nl> + name = " tf_export_test " , <nl> + srcs = [ " util / tf_export_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> deps = [ <nl> - " : framework " , <nl> - " : pywrap_tf_session " , <nl> + " : client_testlib " , <nl> + " : platform " , <nl> " : util " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> ] , <nl> ) <nl> <nl> - py_library ( <nl> - name = " py_checkpoint_reader " , <nl> - srcs = [ " training / py_checkpoint_reader . py " ] , <nl> - deps = [ <nl> - " : _pywrap_checkpoint_reader " , <nl> - " : dtypes " , <nl> - " : errors " , <nl> - " : util " , <nl> + # Leaf library : may not depend on anything else inside TensorFlow . <nl> + # TODO ( mdan ) : Move this utility outside of TF . <nl> + py_strict_library ( <nl> + name = " tf_decorator " , <nl> + srcs = [ <nl> + " util / tf_contextlib . py " , <nl> + " util / tf_decorator . py " , <nl> + " util / tf_inspect . py " , <nl> + ] , <nl> + srcs_version = " PY2AND3 " , <nl> + visibility = [ <nl> + " / / tensorflow : __subpackages__ " , <nl> + # TODO ( mdan ) : Remove these dependencies . <nl> + " / / third_party / py / tf_slim : __subpackages__ " , <nl> + " / / learning / deepmind / research / language / translation / lm : __subpackages__ " , <nl> ] , <nl> - ) <nl> - <nl> - py_library ( <nl> - name = " checkpoint_management " , <nl> - srcs = [ " training / checkpoint_management . py " ] , <nl> deps = [ <nl> - " : errors " , <nl> - " : lib " , <nl> - " : platform " , <nl> - " : protos_all_py " , <nl> - " : util " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> + " : tf_stack " , <nl> + " @ six_archive / / : six " , <nl> ] , <nl> ) <nl> <nl> - py_library ( <nl> - name = " session_run_hook " , <nl> - srcs = [ " training / session_run_hook . py " ] , <nl> + # Leaf library : may not depend on anything else inside TensorFlow . <nl> + py_strict_library ( <nl> + name = " tf_stack " , <nl> + srcs = [ " util / tf_stack . py " ] , <nl> srcs_version = " PY2AND3 " , <nl> - deps = [ " : util " ] , <nl> + # TODO ( mdan ) : Remove public visibility . <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : _tf_stack " , <nl> + " @ six_archive / / : six " , <nl> + ] , <nl> ) <nl> <nl> - py_library ( <nl> - name = " basic_session_run_hooks " , <nl> - srcs = [ " training / basic_session_run_hooks . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> + pybind_extension ( <nl> + name = " _tf_stack " , <nl> + srcs = [ " util / tf_stack . cc " ] , <nl> + # TODO ( b / 138203821 ) : change to " util . _tf_stack " once the bug is fixed . <nl> + module_name = " _tf_stack " , <nl> deps = [ <nl> - " : client " , <nl> - " : framework " , <nl> - " : platform " , <nl> - " : protos_all_py " , <nl> - " : session_run_hook " , <nl> - " : training_util " , <nl> - " : util " , <nl> - ] , <nl> - ) <nl> - <nl> - py_library ( <nl> - name = " saver " , <nl> - srcs = [ " training / saver . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : checkpoint_management " , <nl> - " : constant_op " , <nl> - " : control_flow_ops " , <nl> - " : device " , <nl> - " : errors " , <nl> - " : framework " , <nl> - " : framework_ops " , <nl> - " : io_ops " , <nl> - " : io_ops_gen " , <nl> - " : platform " , <nl> - " : py_checkpoint_reader " , <nl> - " : resource_variable_ops " , <nl> - " : session " , <nl> - " : state_ops " , <nl> - " : string_ops " , <nl> - " : training_util " , <nl> - " : util " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - " / / tensorflow / python / eager : context " , <nl> - " / / tensorflow / python / training / saving : saveable_object " , <nl> - " / / tensorflow / python / training / saving : saveable_object_util " , <nl> - " / / tensorflow / python / training / tracking : base " , <nl> - " / / third_party / py / numpy " , <nl> - " @ six_archive / / : six " , <nl> - ] , <nl> - ) <nl> - <nl> - py_library ( <nl> - name = " distribute " , <nl> - srcs = [ <nl> - " training / distribute . py " , <nl> - " training / distribution_strategy_context . py " , <nl> - ] , <nl> - srcs_version = " PY2AND3 " , <nl> - deps = [ <nl> - " / / tensorflow / python / distribute : distribute_lib " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " evaluation_test " , <nl> - size = " small " , <nl> - srcs = [ " training / evaluation_test . py " ] , <nl> - python_version = " PY3 " , <nl> - shard_count = 3 , <nl> - tags = [ <nl> - " manual " , <nl> - " notap " , # Disabling until b / 33000128 and b / 33040312 are fixed . <nl> - ] , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : framework " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : framework_test_lib " , <nl> - " : math_ops " , <nl> - " : metrics " , <nl> - " : platform " , <nl> - " : state_ops " , <nl> - " : summary " , <nl> - " : training " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - " / / tensorflow / python / ops / losses " , <nl> - " / / third_party / py / numpy " , <nl> - ] , <nl> - ) <nl> - <nl> - py_library ( <nl> - name = " client " , <nl> - srcs = [ <nl> - " client / client_lib . py " , <nl> - " client / device_lib . py " , <nl> - " client / timeline . py " , <nl> - ] , <nl> - srcs_version = " PY2AND3 " , <nl> - deps = [ <nl> - " : _pywrap_device_lib " , <nl> - " : errors " , <nl> - " : framework " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : platform " , <nl> - " : session " , <nl> - " : session_ops " , <nl> - " : util " , <nl> - " / / third_party / py / numpy " , <nl> - " @ six_archive / / : six " , <nl> - ] , <nl> - ) <nl> - <nl> - # Leaf library : may not depend on anything else inside TensorFlow . <nl> - py_strict_library ( <nl> - name = " tf_export " , <nl> - srcs = [ " util / tf_export . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - visibility = [ " / / tensorflow : __subpackages__ " ] , <nl> - deps = [ <nl> - " : tf_decorator " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " tf_export_test " , <nl> - srcs = [ " util / tf_export_test . py " ] , <nl> - python_version = " PY3 " , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : client_testlib " , <nl> - " : platform " , <nl> - " : util " , <nl> - ] , <nl> - ) <nl> - <nl> - # Leaf library : may not depend on anything else inside TensorFlow . <nl> - # TODO ( mdan ) : Move this utility outside of TF . <nl> - py_strict_library ( <nl> - name = " tf_decorator " , <nl> - srcs = [ <nl> - " util / tf_contextlib . py " , <nl> - " util / tf_decorator . py " , <nl> - " util / tf_inspect . py " , <nl> - ] , <nl> - srcs_version = " PY2AND3 " , <nl> - visibility = [ <nl> - " / / tensorflow : __subpackages__ " , <nl> - # TODO ( mdan ) : Remove these dependencies . <nl> - " / / third_party / py / tf_slim : __subpackages__ " , <nl> - " / / learning / deepmind / research / language / translation / lm : __subpackages__ " , <nl> - ] , <nl> - deps = [ <nl> - " : tf_stack " , <nl> - " @ six_archive / / : six " , <nl> - ] , <nl> - ) <nl> - <nl> - # Leaf library : may not depend on anything else inside TensorFlow . <nl> - py_strict_library ( <nl> - name = " tf_stack " , <nl> - srcs = [ " util / tf_stack . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - # TODO ( mdan ) : Remove public visibility . <nl> - visibility = [ " / / visibility : public " ] , <nl> - deps = [ <nl> - " : _tf_stack " , <nl> - " @ six_archive / / : six " , <nl> - ] , <nl> - ) <nl> - <nl> - pybind_extension ( <nl> - name = " _tf_stack " , <nl> - srcs = [ " util / tf_stack . cc " ] , <nl> - # TODO ( b / 138203821 ) : change to " util . _tf_stack " once the bug is fixed . <nl> - module_name = " _tf_stack " , <nl> - deps = [ <nl> - " / / third_party / python_runtime : headers " , # buildcleaner : keep <nl> - " @ pybind11 " , <nl> + " / / third_party / python_runtime : headers " , # buildcleaner : keep <nl> + " @ pybind11 " , <nl> ] , <nl> ) <nl> <nl> tf_proto_library ( <nl> " framework / cpp_shape_inference . proto " , <nl> ] , <nl> ) , <nl> + protodeps = [ " / / tensorflow / python / training : checkpoint_state " ] , <nl> visibility = visibility , <nl> ) <nl> <nl> py_library ( <nl> ] , <nl> ) <nl> <nl> - tf_py_test ( <nl> - name = " server_lib_test " , <nl> - size = " small " , <nl> - srcs = [ " training / server_lib_test . py " ] , <nl> - grpc_enabled = True , <nl> - python_version = " PY3 " , <nl> - tags = [ <nl> - " noasan " , # TODO ( b / 161236904 ) : flaky timeout in trying to start gRPC server <nl> - ] , <nl> - tfrt_enabled = True , <nl> + py_library ( <nl> + name = " timeline " , <nl> + srcs = [ " client / timeline . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + visibility = [ " / / visibility : public " ] , <nl> deps = [ <nl> - " : array_ops " , <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : data_flow_ops " , <nl> - " : errors " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : math_ops " , <nl> - " : training " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - " / / third_party / py / numpy " , <nl> + " : platform " , <nl> ] , <nl> ) <nl> <nl> - tf_py_test ( <nl> - name = " server_lib_multiple_containers_test " , <nl> - size = " small " , <nl> - srcs = [ " training / server_lib_multiple_containers_test . py " ] , <nl> - grpc_enabled = True , <nl> - python_version = " PY3 " , <nl> - tfrt_enabled = True , <nl> + # Just used by tests . <nl> + tf_cuda_library ( <nl> + name = " construction_fails_op " , <nl> + srcs = [ " client / test_construction_fails_op . cc " ] , <nl> deps = [ <nl> - " : array_ops " , <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : data_flow_ops " , <nl> - " : errors " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : math_ops " , <nl> - " : training " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - " / / third_party / py / numpy " , <nl> + " / / tensorflow / core " , <nl> + " / / tensorflow / core : framework " , <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : protos_all_cc " , <nl> ] , <nl> + alwayslink = 1 , <nl> ) <nl> <nl> tf_py_test ( <nl> - name = " server_lib_same_variables_clear_container_test " , <nl> - size = " small " , <nl> - srcs = [ " training / server_lib_same_variables_clear_container_test . py " ] , <nl> + name = " session_test " , <nl> + size = " medium " , <nl> + srcs = [ " client / session_test . py " ] , <nl> grpc_enabled = True , <nl> python_version = " PY3 " , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : data_flow_ops " , <nl> - " : errors " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : math_ops " , <nl> - " : training " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - " / / third_party / py / numpy " , <nl> + tags = [ <nl> + " no_gpu " , # b / 127001953 <nl> + " no_pip_gpu " , # testInteractivePlacePrunedGraph fails on invalid assumption about GPU ops . <nl> + " no_windows " , <nl> ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " server_lib_same_variables_clear_test " , <nl> - size = " small " , <nl> - srcs = [ " training / server_lib_same_variables_clear_test . py " ] , <nl> - grpc_enabled = True , <nl> - python_version = " PY3 " , <nl> tfrt_enabled = True , <nl> deps = [ <nl> " : array_ops " , <nl> " : client " , <nl> - " : client_testlib " , <nl> + " : config " , <nl> + " : control_flow_ops " , <nl> " : data_flow_ops " , <nl> " : errors " , <nl> + " : framework " , <nl> " : framework_for_generated_wrappers " , <nl> + " : framework_test_lib " , <nl> " : math_ops " , <nl> + " : platform_test " , <nl> + " : state_ops " , <nl> " : training " , <nl> + " : util " , <nl> " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> " / / third_party / py / numpy " , <nl> + " @ six_archive / / : six " , <nl> ] , <nl> ) <nl> <nl> tf_py_test ( <nl> - name = " server_lib_same_variables_no_clear_test " , <nl> + name = " session_clusterspec_prop_test " , <nl> size = " small " , <nl> - srcs = [ " training / server_lib_same_variables_no_clear_test . py " ] , <nl> + srcs = [ " client / session_clusterspec_prop_test . py " ] , <nl> grpc_enabled = True , <nl> python_version = " PY3 " , <nl> - tfrt_enabled = True , <nl> + tags = [ <nl> + " no_gpu " , <nl> + " no_oss " , <nl> + " no_pip " , <nl> + " no_pip_gpu " , <nl> + " notap " , <nl> + ] , <nl> deps = [ <nl> " : array_ops " , <nl> " : client " , <nl> " : client_testlib " , <nl> - " : data_flow_ops " , <nl> - " : errors " , <nl> + " : framework " , <nl> " : framework_for_generated_wrappers " , <nl> - " : math_ops " , <nl> - " : training " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - " / / third_party / py / numpy " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " server_lib_sparse_job_test " , <nl> - size = " small " , <nl> - srcs = [ " training / server_lib_sparse_job_test . py " ] , <nl> - grpc_enabled = True , <nl> - python_version = " PY3 " , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : data_flow_ops " , <nl> - " : errors " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : math_ops " , <nl> - " : training " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - " / / third_party / py / numpy " , <nl> - ] , <nl> - ) <nl> - <nl> - cuda_py_test ( <nl> - name = " localhost_cluster_performance_test " , <nl> - size = " medium " , <nl> - srcs = [ <nl> - " training / localhost_cluster_performance_test . py " , <nl> - ] , <nl> - grpc_enabled = True , <nl> - python_version = " PY3 " , <nl> - tags = [ <nl> - " no_oss " , # Test flaky due to port collisions . <nl> - " oss_serial " , <nl> - ] , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : distributed_framework_test_lib " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : partitioned_variables " , <nl> - " : training " , <nl> - " : variable_scope " , <nl> - " : variables " , <nl> - " / / third_party / py / numpy " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " sync_replicas_optimizer_test " , <nl> - size = " medium " , <nl> - srcs = [ <nl> - " training / sync_replicas_optimizer_test . py " , <nl> - ] , <nl> - grpc_enabled = True , <nl> - python_version = " PY3 " , <nl> - tags = [ <nl> - " no_oss " , # Test flaky due to port collisions . <nl> - " notsan " , # data race due to b / 62910646 <nl> - " oss_serial " , <nl> - ] , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : client_testlib " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : training " , <nl> - " : variables " , <nl> - ] , <nl> - ) <nl> - <nl> - py_library ( <nl> - name = " timeline " , <nl> - srcs = [ " client / timeline . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - visibility = [ " / / visibility : public " ] , <nl> - deps = [ <nl> - " : platform " , <nl> - ] , <nl> - ) <nl> - <nl> - # Just used by tests . <nl> - tf_cuda_library ( <nl> - name = " construction_fails_op " , <nl> - srcs = [ " client / test_construction_fails_op . cc " ] , <nl> - deps = [ <nl> - " / / tensorflow / core " , <nl> - " / / tensorflow / core : framework " , <nl> - " / / tensorflow / core : lib " , <nl> - " / / tensorflow / core : protos_all_cc " , <nl> - ] , <nl> - alwayslink = 1 , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " session_test " , <nl> - size = " medium " , <nl> - srcs = [ " client / session_test . py " ] , <nl> - grpc_enabled = True , <nl> - python_version = " PY3 " , <nl> - tags = [ <nl> - " no_gpu " , # b / 127001953 <nl> - " no_pip_gpu " , # testInteractivePlacePrunedGraph fails on invalid assumption about GPU ops . <nl> - " no_windows " , <nl> - ] , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : client " , <nl> - " : config " , <nl> - " : control_flow_ops " , <nl> - " : data_flow_ops " , <nl> - " : errors " , <nl> - " : framework " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : framework_test_lib " , <nl> - " : math_ops " , <nl> - " : platform_test " , <nl> - " : state_ops " , <nl> - " : training " , <nl> - " : util " , <nl> - " : variables " , <nl> - " / / third_party / py / numpy " , <nl> - " @ six_archive / / : six " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " session_clusterspec_prop_test " , <nl> - size = " small " , <nl> - srcs = [ " client / session_clusterspec_prop_test . py " ] , <nl> - grpc_enabled = True , <nl> - python_version = " PY3 " , <nl> - tags = [ <nl> - " no_gpu " , <nl> - " no_oss " , <nl> - " no_pip " , <nl> - " no_pip_gpu " , <nl> - " notap " , <nl> - ] , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : framework " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : framework_test_lib " , <nl> - " : math_ops " , <nl> - " : platform_test " , <nl> - " : state_ops " , <nl> - " : training " , <nl> - " : util " , <nl> - " : variables " , <nl> - " / / third_party / py / numpy " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " session_list_devices_test " , <nl> - size = " small " , <nl> - srcs = [ " client / session_list_devices_test . py " ] , <nl> - grpc_enabled = True , <nl> - python_version = " PY3 " , <nl> - tags = [ <nl> - " no_gpu " , <nl> - " no_pip_gpu " , <nl> - " notsan " , # data race due to b / 62910646 <nl> - ] , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : client " , <nl> - " : framework " , <nl> - " : framework_test_lib " , <nl> - " : platform_test " , <nl> - " : training " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " session_partial_run_test " , <nl> - size = " small " , <nl> - srcs = [ " client / session_partial_run_test . py " ] , <nl> - grpc_enabled = True , <nl> - python_version = " PY3 " , <nl> - tags = [ <nl> - " no_gpu " , <nl> - " no_windows " , <nl> - ] , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : client " , <nl> - " : errors " , <nl> - " : framework " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : framework_test_lib " , <nl> - " : math_ops " , <nl> - " : platform_test " , <nl> - " : training " , <nl> - " : util " , <nl> - " @ six_archive / / : six " , <nl> - ] , <nl> - ) <nl> - <nl> - cuda_py_test ( <nl> - name = " timeline_test " , <nl> - size = " small " , <nl> - srcs = [ " client / timeline_test . py " ] , <nl> - python_version = " PY3 " , <nl> - tags = [ <nl> - " gpu_cupti " , <nl> - " no_gpu " , # b / 154742661 <nl> - ] , <nl> - tfrt_enabled = True , <nl> - xla_enable_strict_auto_jit = False , # Graph structure is different with autojit <nl> - deps = [ <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : math_ops " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - ] , <nl> - ) <nl> - <nl> - cuda_py_test ( <nl> - name = " virtual_gpu_test " , <nl> - size = " small " , <nl> - srcs = [ " client / virtual_gpu_test . py " ] , <nl> - python_version = " PY3 " , <nl> - tags = [ <nl> - " no_gpu " , # b / 127386241 <nl> - " no_windows_gpu " , <nl> - ] , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : math_ops " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " c_api_util_test " , <nl> - size = " small " , <nl> - srcs = [ " framework / c_api_util_test . py " ] , <nl> - python_version = " PY3 " , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : c_api_util " , <nl> - " : framework_test_lib " , <nl> - " : platform_test " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " graph_util_test " , <nl> - size = " small " , <nl> - srcs = [ " framework / graph_util_test . py " ] , <nl> - python_version = " PY3 " , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : control_flow_v2_toggles " , <nl> - " : framework " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : math_ops " , <nl> - " : state_ops_gen " , <nl> - " : variable_scope " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " convert_to_constants_test " , <nl> - size = " small " , <nl> - srcs = [ " framework / convert_to_constants_test . py " ] , <nl> - python_version = " PY3 " , <nl> - tags = [ " no_rocm " ] , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : client_testlib " , <nl> - " : control_flow_v2_toggles " , <nl> - " : convert_to_constants " , <nl> " : framework_test_lib " , <nl> - " : math_ops " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " bfloat16_test " , <nl> - size = " small " , <nl> - srcs = [ " lib / core / bfloat16_test . py " ] , <nl> - python_version = " PY3 " , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : client_testlib " , <nl> - " : lib " , <nl> - " : pywrap_tensorflow " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " file_io_test " , <nl> - size = " small " , <nl> - srcs = [ " lib / io / file_io_test . py " ] , <nl> - python_version = " PY3 " , <nl> - tags = [ <nl> - " no_rocm " , <nl> - " no_windows " , <nl> - ] , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : client_testlib " , <nl> - " : errors " , <nl> - " : lib " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " tf_record_test " , <nl> - size = " small " , <nl> - srcs = [ " lib / io / tf_record_test . py " ] , <nl> - python_version = " PY3 " , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : client_testlib " , <nl> - " : errors " , <nl> - " : lib " , <nl> - " : util " , <nl> - ] , <nl> - ) <nl> - <nl> - cuda_py_test ( <nl> - name = " adam_test " , <nl> - size = " medium " , <nl> - srcs = [ " training / adam_test . py " ] , <nl> - python_version = " PY3 " , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : client_testlib " , <nl> - " : framework " , <nl> - " : math_ops " , <nl> - " : platform " , <nl> - " : platform_test " , <nl> - " : training " , <nl> - " / / third_party / py / numpy " , <nl> - ] , <nl> - ) <nl> - <nl> - cuda_py_test ( <nl> - name = " moving_averages_test " , <nl> - size = " small " , <nl> - srcs = [ <nl> - " training / moving_averages_test . py " , <nl> - ] , <nl> - python_version = " PY3 " , <nl> - tags = [ <nl> - " no_windows " , # b / 139083295 : bfloat16 tests fail on Windows <nl> - " notsan " , <nl> - ] , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : client_testlib " , <nl> - " : constant_op " , <nl> - " : dtypes " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : framework_ops " , <nl> - " : training " , <nl> - " : variable_scope " , <nl> - " : variables " , <nl> - ] , <nl> - ) <nl> - <nl> - cuda_py_tests ( <nl> - name = " training_tests " , <nl> - size = " medium " , <nl> - srcs = [ <nl> - " training / adadelta_test . py " , <nl> - " training / adagrad_da_test . py " , <nl> - " training / adagrad_test . py " , <nl> - " training / basic_loops_test . py " , <nl> - " training / coordinator_test . py " , <nl> - " training / device_setter_test . py " , <nl> - " training / ftrl_test . py " , <nl> - " training / gradient_descent_test . py " , <nl> - " training / momentum_test . py " , <nl> - " training / optimizer_test . py " , <nl> - " training / proximal_adagrad_test . py " , <nl> - " training / proximal_gradient_descent_test . py " , <nl> - " training / quantize_training_test . py " , <nl> - " training / queue_runner_test . py " , <nl> - " training / rmsprop_test . py " , <nl> - " training / slot_creator_test . py " , <nl> - " training / tensorboard_logging_test . py " , <nl> - " training / training_ops_test . py " , <nl> - ] , <nl> - python_version = " PY3 " , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : control_flow_ops " , <nl> - " : data_flow_ops " , <nl> - " : data_flow_ops_gen " , <nl> - " : embedding_ops " , <nl> - " : errors " , <nl> - " : framework " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : framework_test_lib " , <nl> - " : gradients " , <nl> - " : lookup_ops " , <nl> - " : math_ops " , <nl> - " : nn_grad " , <nl> - " : nn_ops " , <nl> - " : partitioned_variables " , <nl> - " : platform " , <nl> - " : platform_test " , <nl> - " : pywrap_tensorflow " , <nl> - " : random_ops " , <nl> - " : resource_variable_ops " , <nl> - " : resources " , <nl> - " : sparse_ops " , <nl> - " : state_ops " , <nl> - " : state_ops_gen " , <nl> - " : summary " , <nl> - " : training " , <nl> - " : util " , <nl> - " : variable_scope " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - " / / third_party / py / numpy " , <nl> - " @ six_archive / / : six " , <nl> - ] , <nl> - ) <nl> - <nl> - py_library ( <nl> - name = " saver_test_utils " , <nl> - srcs = [ " training / saver_test_utils . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - deps = [ <nl> - " : lookup_ops_gen " , <nl> - " : training " , <nl> - ] , <nl> - ) <nl> - <nl> - cuda_py_test ( <nl> - name = " saver_test " , <nl> - size = " medium " , <nl> - srcs = [ <nl> - " training / saver_test . py " , <nl> - ] , <nl> - python_version = " PY3 " , <nl> - tags = [ " multi_gpu " ] , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : client_testlib " , <nl> - " : control_flow_ops " , <nl> - " : data_flow_ops " , <nl> - " : errors " , <nl> - " : gradients " , <nl> - " : math_ops " , <nl> - " : nn_grad " , <nl> - " : nn_ops " , <nl> - " : partitioned_variables " , <nl> - " : platform " , <nl> - " : platform_test " , <nl> - " : py_checkpoint_reader " , <nl> - " : random_ops " , <nl> - " : resource_variable_ops " , <nl> - " : saver_test_utils " , <nl> - " : sparse_ops " , <nl> - " : summary " , <nl> - " : training " , <nl> - " : util " , <nl> - " : variable_scope " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - " / / tensorflow / python / data / ops : dataset_ops " , <nl> - " / / third_party / py / numpy " , <nl> - " @ six_archive / / : six " , <nl> - ] , <nl> - ) <nl> - <nl> - cuda_py_test ( <nl> - name = " checkpoint_management_test " , <nl> - size = " small " , <nl> - srcs = [ <nl> - " training / checkpoint_management_test . py " , <nl> - ] , <nl> - python_version = " PY3 " , <nl> - deps = [ <nl> - " : array_ops " , <nl> - " : client_testlib " , <nl> - " : control_flow_ops " , <nl> - " : data_flow_ops " , <nl> - " : errors " , <nl> - " : gradients " , <nl> - " : math_ops " , <nl> - " : nn_grad " , <nl> - " : nn_ops " , <nl> - " : partitioned_variables " , <nl> - " : platform " , <nl> - " : platform_test " , <nl> - " : pywrap_tensorflow " , <nl> - " : random_ops " , <nl> - " : resource_variable_ops " , <nl> - " : saver_test_utils " , <nl> - " : sparse_ops " , <nl> - " : summary " , <nl> - " : training " , <nl> - " : util " , <nl> - " : variable_scope " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - " / / tensorflow / python / data / ops : dataset_ops " , <nl> - " / / third_party / py / numpy " , <nl> - " @ six_archive / / : six " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " saver_large_variable_test " , <nl> - size = " medium " , <nl> - srcs = [ " training / saver_large_variable_test . py " ] , <nl> - python_version = " PY3 " , <nl> - tags = [ <nl> - " manual " , <nl> - " noasan " , # http : / / b / 30379628 <nl> - " notsan " , # http : / / b / 30379628 <nl> - ] , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : errors " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : training " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - ] , <nl> - ) <nl> - <nl> - tf_py_test ( <nl> - name = " saver_large_partitioned_variable_test " , <nl> - size = " medium " , <nl> - srcs = [ " training / saver_large_partitioned_variable_test . py " ] , <nl> - python_version = " PY3 " , <nl> - tags = [ <nl> - " noasan " , # http : / / b / 30782289 <nl> - " notsan " , # http : / / b / 30782289 <nl> - ] , <nl> - tfrt_enabled = True , <nl> - deps = [ <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : partitioned_variables " , <nl> + " : math_ops " , <nl> + " : platform_test " , <nl> + " : state_ops " , <nl> " : training " , <nl> + " : util " , <nl> " : variables " , <nl> + " / / third_party / py / numpy " , <nl> ] , <nl> ) <nl> <nl> - cuda_py_test ( <nl> - name = " session_manager_test " , <nl> - size = " medium " , # TODO ( irving ) : Can this be made small ? <nl> - srcs = [ " training / session_manager_test . py " ] , <nl> + tf_py_test ( <nl> + name = " session_list_devices_test " , <nl> + size = " small " , <nl> + srcs = [ " client / session_list_devices_test . py " ] , <nl> grpc_enabled = True , <nl> - main = " training / session_manager_test . py " , <nl> python_version = " PY3 " , <nl> + tags = [ <nl> + " no_gpu " , <nl> + " no_pip_gpu " , <nl> + " notsan " , # data race due to b / 62910646 <nl> + ] , <nl> tfrt_enabled = True , <nl> deps = [ <nl> - " : array_ops " , <nl> " : client " , <nl> - " : client_testlib " , <nl> - " : control_flow_ops " , <nl> - " : errors " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : platform " , <nl> + " : framework " , <nl> + " : framework_test_lib " , <nl> + " : platform_test " , <nl> " : training " , <nl> - " : variables " , <nl> ] , <nl> ) <nl> <nl> tf_py_test ( <nl> - name = " supervisor_test " , <nl> + name = " session_partial_run_test " , <nl> size = " small " , <nl> - srcs = [ " training / supervisor_test . py " ] , <nl> + srcs = [ " client / session_partial_run_test . py " ] , <nl> grpc_enabled = True , <nl> python_version = " PY3 " , <nl> - tags = [ " no_windows " ] , <nl> + tags = [ <nl> + " no_gpu " , <nl> + " no_windows " , <nl> + ] , <nl> tfrt_enabled = True , <nl> deps = [ <nl> " : array_ops " , <nl> - " : checkpoint_management " , <nl> - " : client_testlib " , <nl> + " : client " , <nl> " : errors " , <nl> " : framework " , <nl> " : framework_for_generated_wrappers " , <nl> - " : io_ops " , <nl> - " : parsing_ops " , <nl> - " : platform " , <nl> - " : saver " , <nl> - " : summary " , <nl> + " : framework_test_lib " , <nl> + " : math_ops " , <nl> + " : platform_test " , <nl> " : training " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> + " : util " , <nl> + " @ six_archive / / : six " , <nl> ] , <nl> ) <nl> <nl> - tf_py_test ( <nl> - name = " basic_session_run_hooks_test " , <nl> - size = " medium " , <nl> - srcs = [ " training / basic_session_run_hooks_test . py " ] , <nl> + cuda_py_test ( <nl> + name = " timeline_test " , <nl> + size = " small " , <nl> + srcs = [ " client / timeline_test . py " ] , <nl> python_version = " PY3 " , <nl> tags = [ <nl> - " no_pip " , # Relies on contrib <nl> - " no_windows " , <nl> - " notsan " , # intermittent races on a few percent of runs <nl> + " gpu_cupti " , <nl> + " no_gpu " , # b / 154742661 <nl> ] , <nl> tfrt_enabled = True , <nl> + xla_enable_strict_auto_jit = False , # Graph structure is different with autojit <nl> deps = [ <nl> " : client " , <nl> " : client_testlib " , <nl> - " : control_flow_ops " , <nl> - " : fake_summary_writer " , <nl> - " : framework " , <nl> " : framework_for_generated_wrappers " , <nl> - " : nn_grad " , <nl> - " : platform " , <nl> - " : state_ops " , <nl> - " : summary " , <nl> - " : training " , <nl> - " : variable_scope " , <nl> - " : variables " , <nl> + " : math_ops " , <nl> " / / tensorflow / core : protos_all_py " , <nl> ] , <nl> ) <nl> <nl> - tf_py_test ( <nl> - name = " checkpoint_utils_test " , <nl> + cuda_py_test ( <nl> + name = " virtual_gpu_test " , <nl> size = " small " , <nl> - srcs = [ " training / checkpoint_utils_test . py " ] , <nl> + srcs = [ " client / virtual_gpu_test . py " ] , <nl> python_version = " PY3 " , <nl> tags = [ <nl> - " manual " , <nl> - " no_cuda_on_cpu_tap " , <nl> - " no_oss " , <nl> - " no_windows " , <nl> - " notap " , <nl> + " no_gpu " , # b / 127386241 <nl> + " no_windows_gpu " , <nl> ] , <nl> + tfrt_enabled = True , <nl> deps = [ <nl> " : client " , <nl> " : client_testlib " , <nl> " : framework_for_generated_wrappers " , <nl> - " : io_ops " , <nl> - " : partitioned_variables " , <nl> - " : platform " , <nl> - " : resource_variable_ops " , <nl> - " : state_ops " , <nl> - " : training " , <nl> - " : variable_scope " , <nl> - " : variables " , <nl> + " : math_ops " , <nl> + " / / tensorflow / core : protos_all_py " , <nl> ] , <nl> ) <nl> <nl> tf_py_test ( <nl> - name = " checkpoint_ops_test " , <nl> + name = " c_api_util_test " , <nl> size = " small " , <nl> - srcs = [ " training / checkpoint_ops_test . py " ] , <nl> + srcs = [ " framework / c_api_util_test . py " ] , <nl> python_version = " PY3 " , <nl> tfrt_enabled = True , <nl> deps = [ <nl> - " : checkpoint_ops_gen " , <nl> - " : client " , <nl> - " : client_testlib " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : io_ops " , <nl> - " : partitioned_variables " , <nl> - " : platform " , <nl> - " : pywrap_tensorflow " , <nl> - " : state_ops " , <nl> - " : training " , <nl> - " : variable_scope " , <nl> - " : variables " , <nl> + " : c_api_util " , <nl> + " : framework_test_lib " , <nl> + " : platform_test " , <nl> ] , <nl> ) <nl> <nl> tf_py_test ( <nl> - name = " warm_starting_util_test " , <nl> - size = " medium " , <nl> - srcs = [ " training / warm_starting_util_test . py " ] , <nl> + name = " graph_util_test " , <nl> + size = " small " , <nl> + srcs = [ " framework / graph_util_test . py " ] , <nl> python_version = " PY3 " , <nl> tfrt_enabled = True , <nl> deps = [ <nl> - " : array_ops " , <nl> + " : client " , <nl> " : client_testlib " , <nl> - " : dtypes " , <nl> - " : framework_ops " , <nl> - " : init_ops " , <nl> - " : training " , <nl> + " : control_flow_v2_toggles " , <nl> + " : framework " , <nl> + " : framework_for_generated_wrappers " , <nl> + " : math_ops " , <nl> + " : state_ops_gen " , <nl> " : variable_scope " , <nl> " : variables " , <nl> - " / / third_party / py / numpy " , <nl> + " / / tensorflow / core : protos_all_py " , <nl> ] , <nl> ) <nl> <nl> tf_py_test ( <nl> - name = " monitored_session_test " , <nl> - size = " medium " , <nl> - srcs = [ " training / monitored_session_test . py " ] , <nl> - tags = [ <nl> - " no_pip " , <nl> - " notsan " , # b / 67945581 <nl> - ] , <nl> + name = " convert_to_constants_test " , <nl> + size = " small " , <nl> + srcs = [ " framework / convert_to_constants_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tags = [ " no_rocm " ] , <nl> tfrt_enabled = True , <nl> deps = [ <nl> - " : array_ops " , <nl> - " : checkpoint_management " , <nl> " : client_testlib " , <nl> - " : control_flow_ops " , <nl> - " : errors " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : resource_variable_ops " , <nl> - " : saver " , <nl> - " : session " , <nl> - " : state_ops " , <nl> - " : summary " , <nl> - " : training " , <nl> - " : variables " , <nl> - " / / tensorflow / core : protos_all_py " , <nl> - " / / tensorflow / python / distribute : collective_all_reduce_strategy " , <nl> - " / / tensorflow / python / distribute : distribute_coordinator " , <nl> + " : control_flow_v2_toggles " , <nl> + " : convert_to_constants " , <nl> + " : framework_test_lib " , <nl> + " : math_ops " , <nl> ] , <nl> ) <nl> <nl> - py_library ( <nl> - name = " training_util " , <nl> - srcs = [ " training / training_util . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> + tf_py_test ( <nl> + name = " bfloat16_test " , <nl> + size = " small " , <nl> + srcs = [ " lib / core / bfloat16_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> deps = [ <nl> - " : dtypes " , <nl> - " : framework " , <nl> - " : framework_ops " , <nl> - " : init_ops " , <nl> - " : platform " , <nl> - " : resource_variable_ops " , <nl> - " : state_ops " , <nl> - " : util " , <nl> - " : variable_scope " , <nl> - " : variables " , <nl> - " / / tensorflow / python / eager : context " , <nl> + " : client_testlib " , <nl> + " : lib " , <nl> + " : pywrap_tensorflow " , <nl> ] , <nl> ) <nl> <nl> tf_py_test ( <nl> - name = " training_util_test " , <nl> + name = " file_io_test " , <nl> size = " small " , <nl> - srcs = [ " training / training_util_test . py " ] , <nl> + srcs = [ " lib / io / file_io_test . py " ] , <nl> python_version = " PY3 " , <nl> + tags = [ <nl> + " no_rocm " , <nl> + " no_windows " , <nl> + ] , <nl> tfrt_enabled = True , <nl> deps = [ <nl> " : client_testlib " , <nl> - " : framework " , <nl> - " : platform " , <nl> - " : training " , <nl> - " : training_util " , <nl> - " : variables " , <nl> + " : errors " , <nl> + " : lib " , <nl> ] , <nl> ) <nl> <nl> tf_py_test ( <nl> - name = " input_test " , <nl> - size = " medium " , <nl> - srcs = [ " training / input_test . py " ] , <nl> + name = " tf_record_test " , <nl> + size = " small " , <nl> + srcs = [ " lib / io / tf_record_test . py " ] , <nl> python_version = " PY3 " , <nl> tfrt_enabled = True , <nl> deps = [ <nl> - " : array_ops " , <nl> " : client_testlib " , <nl> " : errors " , <nl> - " : framework " , <nl> - " : framework_for_generated_wrappers " , <nl> - " : math_ops " , <nl> - " : platform " , <nl> - " : training " , <nl> + " : lib " , <nl> " : util " , <nl> - " : variables " , <nl> - " / / third_party / py / numpy " , <nl> ] , <nl> ) <nl> <nl> tf_python_pybind_extension ( <nl> module_name = " _pywrap_parallel_device " , <nl> visibility = [ " / / tensorflow / python / distribute / parallel_device : __pkg__ " ] , <nl> deps = [ <nl> + " : pybind11_lib " , <nl> + " : pybind11_status " , <nl> " / / tensorflow / core : framework_headers_lib " , <nl> " / / tensorflow / core : lib_headers_for_pybind " , <nl> " / / tensorflow / core : protos_all_cc " , <nl> - " / / tensorflow / python : pybind11_lib " , <nl> - " / / tensorflow / python : pybind11_status " , <nl> " / / third_party / python_runtime : headers " , <nl> " @ pybind11 " , <nl> ] , <nl> cuda_py_test ( <nl> " : client_testlib " , <nl> ] , <nl> ) <nl> + <nl> + alias ( <nl> + name = " basic_session_run_hooks " , <nl> + actual = " / / tensorflow / python / training : basic_session_run_hooks " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " checkpoint_management " , <nl> + actual = " / / tensorflow / python / training : checkpoint_management " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " distribute " , <nl> + actual = " / / tensorflow / python / training : distribute " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " py_checkpoint_reader " , <nl> + actual = " / / tensorflow / python / training : py_checkpoint_reader " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " saver " , <nl> + actual = " / / tensorflow / python / training : saver " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " session_run_hook " , <nl> + actual = " / / tensorflow / python / training : session_run_hook " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " training " , <nl> + actual = " / / tensorflow / python / training : training " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " training_lib " , <nl> + actual = " / / tensorflow / python / training : training_lib " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " training_server_lib " , <nl> + actual = " / / tensorflow / python / training : server_lib " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " training_util " , <nl> + actual = " / / tensorflow / python / training : training_util " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " loss_scale " , <nl> + actual = " / / tensorflow / python / training / experimental : loss_scale " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " loss_scale_optimizer " , <nl> + actual = " / / tensorflow / python / training / experimental : loss_scale_optimizer " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " mixed_precision " , <nl> + actual = " / / tensorflow / python / training / experimental : mixed_precision " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " mixed_precision_global_state " , <nl> + actual = " / / tensorflow / python / training / experimental : mixed_precision_global_state " , <nl> + ) <nl> + <nl> + alias ( <nl> + name = " loss_scaling_gradient_tape " , <nl> + actual = " / / tensorflow / python / training / experimental : loss_scaling_gradient_tape " , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " learning_rate_decay " , <nl> + # This rule depends on a target that only python : __pkg__ has visibility for . <nl> + srcs = [ " / / tensorflow / python / training : learning_rate_decay . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ " / / tensorflow / python / keras / optimizer_v2 : legacy_learning_rate_decay " ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " loss_scale_optimizer_test " , <nl> + size = " small " , <nl> + # This test currently depends on rules only python : __pkg__ has visibility for . <nl> + srcs = [ " / / tensorflow / python / training / experimental : loss_scale_optimizer_test . py " ] , <nl> + python_version = " PY3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python / distribute : mirrored_strategy " , <nl> + " / / tensorflow / python / distribute : one_device_strategy " , <nl> + " / / tensorflow / python / keras / mixed_precision / experimental : test_util " , <nl> + " / / tensorflow / python / training / experimental : loss_scale_optimizer " , <nl> + " @ absl_py / / absl / testing : parameterized " , <nl> + ] , <nl> + ) <nl> mmm a / tensorflow / python / compat / compat . py <nl> ppp b / tensorflow / python / compat / compat . py <nl> <nl> # This value changes every day with an automatic CL . It can be modified in code <nl> # via ` forward_compatibility_horizon ( ) ` or with the environment variable <nl> # TF_FORWARD_COMPATIBILITY_DELTA_DAYS , which is added to the compatibility date . <nl> - _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2020 , 8 , 28 ) <nl> + _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2020 , 8 , 31 ) <nl> _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = " TF_FORWARD_COMPATIBILITY_DELTA_DAYS " <nl> _FORWARD_COMPATIBILITY_DATE_NUMBER = None <nl> <nl> mmm a / tensorflow / python / data / experimental / kernel_tests / compression_ops_test . py <nl> ppp b / tensorflow / python / data / experimental / kernel_tests / compression_ops_test . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> + from collections import namedtuple <nl> from absl . testing import parameterized <nl> <nl> from tensorflow . python . data . experimental . ops import compression_ops <nl> <nl> from tensorflow . python . data . util import structure <nl> from tensorflow . python . framework import combinations <nl> from tensorflow . python . framework import sparse_tensor <nl> + from tensorflow . python . ops . ragged import ragged_factory_ops <nl> from tensorflow . python . platform import test <nl> <nl> <nl> def _test_objects ( ) : <nl> + <nl> + Item = namedtuple ( " Item " , " id name " ) <nl> + <nl> return [ <nl> combinations . NamedObject ( " int " , 1 ) , <nl> combinations . NamedObject ( " string " , " dog " ) , <nl> combinations . NamedObject ( " tuple " , ( 1 , 1 ) ) , <nl> + combinations . NamedObject ( " nested_tuple " , ( ( 1 , 1 ) , ( 2 , 2 ) ) ) , <nl> + combinations . NamedObject ( " named_tuple " , Item ( id = 1 , name = " item1 " ) ) , <nl> + combinations . NamedObject ( " unicode " , " アヒル " ) , <nl> + combinations . NamedObject ( <nl> + " nested_named_tuple " , <nl> + ( Item ( id = 1 , name = " item1 " ) , Item ( id = 2 , name = " item2 " ) ) ) , <nl> combinations . NamedObject ( " int_string_tuple " , ( 1 , " dog " ) ) , <nl> combinations . NamedObject ( <nl> " sparse " , <nl> def _test_objects ( ) : <nl> ] <nl> <nl> <nl> + def _test_v2_eager_only_objects ( ) : <nl> + return [ <nl> + combinations . NamedObject ( <nl> + " ragged " , <nl> + ragged_factory_ops . constant ( [ [ 0 , 1 , 2 , 3 ] , [ 4 , 5 ] , [ 6 , 7 , 8 ] , [ 9 ] ] ) ) , <nl> + combinations . NamedObject ( <nl> + " sparse_ragged_structured " , { <nl> + " sparse " : <nl> + sparse_tensor . SparseTensorValue ( <nl> + indices = [ [ 0 , 0 ] , [ 1 , 2 ] ] , <nl> + values = [ 1 , 2 ] , <nl> + dense_shape = [ 3 , 4 ] ) , <nl> + " ragged " : <nl> + ragged_factory_ops . constant ( [ [ 0 , 1 , 2 , 3 ] , [ 9 ] ] ) <nl> + } ) <nl> + ] <nl> + <nl> + <nl> class CompressionOpsTest ( test_base . DatasetTestBase , parameterized . TestCase ) : <nl> <nl> @ combinations . generate ( <nl> combinations . times ( test_base . default_test_combinations ( ) , <nl> - combinations . combine ( element = _test_objects ( ) ) ) ) <nl> + combinations . combine ( element = _test_objects ( ) ) ) + <nl> + combinations . times ( <nl> + test_base . v2_eager_only_combinations ( ) , <nl> + combinations . combine ( element = _test_v2_eager_only_objects ( ) ) ) ) <nl> def testCompression ( self , element ) : <nl> element = element . _obj <nl> <nl> def testCompression ( self , element ) : <nl> <nl> @ combinations . generate ( <nl> combinations . times ( test_base . default_test_combinations ( ) , <nl> - combinations . combine ( element = _test_objects ( ) ) ) ) <nl> + combinations . combine ( element = _test_objects ( ) ) ) + <nl> + combinations . times ( <nl> + test_base . v2_eager_only_combinations ( ) , <nl> + combinations . combine ( element = _test_v2_eager_only_objects ( ) ) ) ) <nl> def testDatasetCompression ( self , element ) : <nl> element = element . _obj <nl> <nl> mmm a / tensorflow / python / data / experimental / service / server_lib_wrapper . cc <nl> ppp b / tensorflow / python / data / experimental / service / server_lib_wrapper . cc <nl> PYBIND11_MODULE ( _pywrap_server_lib , m ) { <nl> } <nl> std : : unique_ptr < tensorflow : : data : : DispatchGrpcDataServer > server ; <nl> tensorflow : : Status status = <nl> - tensorflow : : data : : NewDispatchServer ( config , & server ) ; <nl> + tensorflow : : data : : NewDispatchServer ( config , server ) ; <nl> tensorflow : : MaybeRaiseFromStatus ( status ) ; <nl> return server ; <nl> } , <nl> PYBIND11_MODULE ( _pywrap_server_lib , m ) { <nl> } <nl> std : : unique_ptr < tensorflow : : data : : WorkerGrpcDataServer > server ; <nl> tensorflow : : Status status = <nl> - tensorflow : : data : : NewWorkerServer ( config , & server ) ; <nl> + tensorflow : : data : : NewWorkerServer ( config , server ) ; <nl> tensorflow : : MaybeRaiseFromStatus ( status ) ; <nl> return server ; <nl> } , <nl> mmm a / tensorflow / python / data / kernel_tests / test_base . py <nl> ppp b / tensorflow / python / data / kernel_tests / test_base . py <nl> def graph_only_combinations ( ) : <nl> <nl> <nl> def v2_only_combinations ( ) : <nl> - " " " Returns the default test combinations for v1 only tf . data tests . " " " <nl> + " " " Returns the default test combinations for v2 only tf . data tests . " " " <nl> return combinations . combine ( tf_api_version = 2 , mode = [ " eager " , " graph " ] ) <nl> <nl> <nl> + def v2_eager_only_combinations ( ) : <nl> + " " " Returns the default test combinations for v2 eager only tf . data tests . " " " <nl> + return combinations . combine ( tf_api_version = 2 , mode = " eager " ) <nl> + <nl> + <nl> class DatasetTestBase ( test . TestCase ) : <nl> " " " Base class for dataset tests . " " " <nl> <nl> mmm a / tensorflow / python / distribute / BUILD <nl> ppp b / tensorflow / python / distribute / BUILD <nl> exports_files ( [ " LICENSE " ] ) <nl> py_library ( <nl> name = " distribute_test_lib_pip " , <nl> deps = [ <nl> + " : all_reduce " , <nl> " : combinations " , <nl> " : multi_worker_test_base " , <nl> " : single_loss_example " , <nl> py_library ( <nl> srcs = [ " cross_device_utils . py " ] , <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> - " : all_reduce " , <nl> " : values " , <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : collective_ops " , <nl> py_library ( <nl> ] , <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> + " : all_reduce " , <nl> " : cross_device_ops " , <nl> " : distribute_lib " , <nl> " : mirrored_strategy " , <nl> mmm a / tensorflow / python / distribute / client / utils . py <nl> ppp b / tensorflow / python / distribute / client / utils . py <nl> def start_server ( cluster_resolver , protocol ) : <nl> " " " Start a server and block the process from exiting . " " " <nl> # This function is for multi - processing test or users who would like to have <nl> # every job run the same binary for simplicity . <nl> - assert ( cluster_resolver . task_type = = ' worker ' or <nl> - cluster_resolver . task_type = = ' ps ' ) <nl> + if not ( cluster_resolver . task_type = = ' worker ' or <nl> + cluster_resolver . task_type = = ' ps ' ) : <nl> + raise ValueError ( ' Unexpected task_type to start a server : { } ' . format ( <nl> + cluster_resolver . task_type ) ) <nl> + <nl> server = server_lib . Server ( <nl> cluster_resolver . cluster_spec ( ) . as_cluster_def ( ) , <nl> job_name = cluster_resolver . task_type , <nl> mmm a / tensorflow / python / distribute / cross_device_ops . py <nl> ppp b / tensorflow / python / distribute / cross_device_ops . py <nl> def reduce_non_distributed_value ( <nl> reduce_op , value , destinations , num_replicas_in_graph ) : <nl> " " " Reduce a non - DistributedValue ` value ` to ` destinations ` . " " " <nl> if isinstance ( value , value_lib . DistributedValues ) : <nl> - raise ValueError ( " You are passing a ` DistributedValue ` to " <nl> + raise ValueError ( " You are passing a ` DistributedValues ` to " <nl> " ` reduce_non_distributed_value ` , which is not allowed . " ) <nl> <nl> # If the same value is present on all replicas then the PerReplica value will <nl> def _simple_reduce ( per_replica_value , reduce_to_device , accumulation_fn , <nl> <nl> @ tf_export ( " distribute . CrossDeviceOps " ) <nl> class CrossDeviceOps ( object ) : <nl> - " " " Base class for cross - device reduction and broadcasting algorithms . " " " <nl> + " " " Base class for cross - device reduction and broadcasting algorithms . <nl> + <nl> + The main purpose of this class is to be passed to <nl> + ` tf . distribute . MirroredStrategy ` in order to choose among different cross <nl> + device communication implementations . Prefer using the methods of <nl> + ` tf . distribute . Strategy ` instead of the ones of this class . <nl> + <nl> + Implementations : <nl> + * ` tf . distribute . ReductionToOneDevice ` <nl> + * ` tf . distribute . NcclAllReduce ` <nl> + * ` tf . distribute . HierarchicalCopyAllReduce ` <nl> + " " " <nl> <nl> def __init__ ( self ) : <nl> pass <nl> def reduce ( self , <nl> experimental_hints = None ) : <nl> " " " Reduce ` per_replica_value ` to ` destinations ` . <nl> <nl> - It runs the reduction operation defined by ` reduce_op ` and put the <nl> - result on ` destinations ` . <nl> + See ` tf . distribute . StrategyExtended . reduce_to ` . This can only be called in <nl> + the cross - replica context . <nl> <nl> Args : <nl> - reduce_op : An instance of ` tf . distribute . ReduceOp ` that indicates how <nl> - per_replica_value will be reduced . <nl> - per_replica_value : A ` tf . distribute . DistributedValues ` object or a tensor <nl> - with device set . <nl> - destinations : the reduction destinations . <nl> - experimental_hints : A ` tf . distrbute . experimental . CollectiveHints ` . Hints <nl> - to perform collective operations . <nl> + reduce_op : a ` tf . distribute . ReduceOp ` specifying how values should be <nl> + combined . <nl> + per_replica_value : a ` tf . distribute . DistributedValues ` , or a ` tf . Tensor ` <nl> + like object . <nl> + destinations : a ` tf . distribute . DistributedValues ` , a ` tf . Variable ` , a <nl> + ` tf . Tensor ` alike object , or a device string . It specifies the devices <nl> + to reduce to . To perform an all - reduce , pass the same to ` value ` and <nl> + ` destinations ` . Note that if it ' s a ` tf . Variable ` , the value is reduced <nl> + to the devices of that variable , and this method doesn ' t update the <nl> + variable . <nl> + experimental_hints : a ` tf . distribute . experimental . CollectiveHints ` . See <nl> + ` tf . distribute . experimental . CollectiveHints ` for details . <nl> <nl> Returns : <nl> - a Mirrored object . <nl> + A ` tf . Tensor ` or ` tf . distribute . DistributedValues ` . <nl> <nl> Raises : <nl> - ValueError : if per_replica_value can ' t be converted to a PerReplica <nl> - object or if destinations aren ' t strings , Variables or DistributedValues <nl> + ValueError : if per_replica_value can ' t be converted to a <nl> + ` tf . distribute . DistributedValues ` or if destinations is not a string , <nl> + ` tf . Variable ` or ` tf . distribute . DistributedValues ` . <nl> " " " <nl> if not isinstance ( per_replica_value , value_lib . DistributedValues ) : <nl> per_replica_value = _make_tensor_into_per_replica ( per_replica_value ) <nl> def batch_reduce ( self , <nl> reduce_op , <nl> value_destination_pairs , <nl> experimental_hints = None ) : <nl> - " " " Reduce PerReplica objects in a batch . <nl> - <nl> - Reduce each first element in ` value_destination_pairs ` to each second <nl> - element which indicates the destinations . <nl> + " " " Reduce values to destinations in batches . <nl> <nl> - This can be faster than multiple individual ` reduce ` s because we can <nl> - fuse several tensors into one or multiple packs before reduction . <nl> + See ` tf . distribute . StrategyExtended . batch_reduce_to ` . This can only be <nl> + called in the cross - replica context . <nl> <nl> Args : <nl> - reduce_op : An instance of ` tf . distribute . ReduceOp ` that indicates how the <nl> - ` per_replica_value ` will be reduced . <nl> - value_destination_pairs : A list or a tuple of PerReplica objects ( or <nl> - tensors with device set if there is one device ) and destinations . <nl> - experimental_hints : A ` tf . distrbute . experimental . CollectiveHints ` . Hints <nl> - to perform collective operations . <nl> + reduce_op : a ` tf . distribute . ReduceOp ` specifying how values should be <nl> + combined . <nl> + value_destination_pairs : a sequence of ( value , destinations ) pairs . See <nl> + ` tf . distribute . CrossDeviceOps . reduce ` for descriptions . <nl> + experimental_hints : a ` tf . distribute . experimental . CollectiveHints ` . See <nl> + ` tf . distribute . experimental . CollectiveHints ` for details . <nl> <nl> Returns : <nl> - a list of Mirrored objects . <nl> + A list of ` tf . Tensor ` or ` tf . distribute . DistributedValues ` , one per pair <nl> + in ` value_destination_pairs ` . <nl> <nl> Raises : <nl> ValueError : if ` value_destination_pairs ` is not an iterable of <nl> - tuples of PerReplica objects and destinations . <nl> + tuples of ` tf . distribute . DistributedValues ` and destinations . <nl> " " " <nl> # TODO ( yuefengz ) : if destinations are different , split into several <nl> # ` _batch_reduce ` invocations . <nl> def batch_reduce ( self , <nl> experimental_hints ) <nl> <nl> def broadcast ( self , tensor , destinations ) : <nl> - " " " Broadcast the ` tensor ` to destinations . <nl> + " " " Broadcast ` tensor ` to ` destinations ` . <nl> + <nl> + This can only be called in the cross - replica context . <nl> <nl> Args : <nl> - tensor : the tensor to broadcast . <nl> - destinations : the broadcast destinations . <nl> + tensor : a ` tf . Tensor ` like object . The value to broadcast . <nl> + destinations : a ` tf . distribute . DistributedValues ` , a ` tf . Variable ` , a <nl> + ` tf . Tensor ` alike object , or a device string . It specifies the devices <nl> + to broadcast to . Note that if it ' s a ` tf . Variable ` , the value is <nl> + broadcasted to the devices of that variable , this method doesn ' t update <nl> + the variable . <nl> <nl> Returns : <nl> - a Mirrored object . <nl> + A ` tf . Tensor ` or ` tf . distribute . DistributedValues ` . <nl> " " " <nl> validate_destinations ( destinations ) <nl> return self . broadcast_implementation ( tensor , destinations ) <nl> def broadcast ( self , tensor , destinations ) : <nl> @ doc_controls . for_subclass_implementers <nl> def reduce_implementation ( self , reduce_op , per_replica_value , destinations , <nl> experimental_hints ) : <nl> - " " " The implementation of reduce of ` per_replica_value ` to ` destinations ` . <nl> + " " " Implementation of ` reduce ` . <nl> <nl> Overriding this method is useful for subclass implementers . <nl> <nl> - It runs the reduction operation defined by ` reduce_op ` and put the <nl> - result on ` destinations ` . <nl> - <nl> Args : <nl> - reduce_op : An instance ` tf . distribute . ReduceOp ` that indicates of how <nl> - per_replica_value will be reduced . <nl> - per_replica_value : A PerReplica object or a tensor with device set . <nl> - destinations : the reduction destinations . <nl> - experimental_hints : A ` tf . distrbute . experimental . CollectiveHints ` . Hints <nl> - to perform collective operations . <nl> + reduce_op : a ` tf . distribute . ReduceOp ` specifying how values should be <nl> + combined . <nl> + per_replica_value : a ` tf . distribute . DistributedValues ` , or a ` tf . Tensor ` <nl> + like object . <nl> + destinations : a ` tf . distribute . DistributedValues ` , a ` tf . Variable ` , a <nl> + ` tf . Tensor ` alike object , or a device string . It specifies the devices <nl> + to reduce to . To perform an all - reduce , pass the same to ` value ` and <nl> + ` destinations ` . Note that if it ' s a ` tf . Variable ` , the value is reduced <nl> + to the devices of that variable , this method doesn ' t update the <nl> + variable . <nl> + experimental_hints : a ` tf . distribute . experimental . CollectiveHints ` . See <nl> + ` tf . distribute . experimental . CollectiveHints ` for details . <nl> <nl> Returns : <nl> - a Mirrored object . <nl> + A ` tf . Tensor ` or ` tf . distribute . DistributedValues ` . <nl> <nl> Raises : <nl> - ValueError : if per_replica_value can ' t be converted to a PerReplica <nl> - object . <nl> + ValueError : if per_replica_value can ' t be converted to a <nl> + ` tf . distribute . DistributedValues ` or if destinations is not a string , <nl> + ` tf . Variable ` or ` tf . distribute . DistributedValues ` . <nl> " " " <nl> raise NotImplementedError ( <nl> " _reduce method must be implemented in descendants . " ) <nl> def reduce_implementation ( self , reduce_op , per_replica_value , destinations , <nl> @ doc_controls . for_subclass_implementers <nl> def batch_reduce_implementation ( self , reduce_op , value_destination_pairs , <nl> experimental_hints ) : <nl> - " " " Implementation of reduce PerReplica objects in a batch . <nl> + " " " Implementation of ` batch_reduce ` . <nl> <nl> Overriding this method is useful for subclass implementers . <nl> <nl> - Reduce each first element in ` value_destination_pairs ` to each second <nl> - element which indicates the destinations . <nl> - <nl> Args : <nl> - reduce_op : An instance of ` tf . distribute . ReduceOp ` that indicates how <nl> - per_replica_value will be reduced . <nl> - value_destination_pairs : An iterable of tuples of PerReplica objects <nl> - ( or tensors with device set if there is one device ) and destinations . <nl> - experimental_hints : A ` tf . distrbute . experimental . CollectiveHints ` . Hints <nl> + reduce_op : a ` tf . distribute . ReduceOp ` specifying how values should be <nl> + combined . <nl> + value_destination_pairs : a sequence of ( value , destinations ) pairs . See <nl> + ` reduce ` for descriptions . <nl> + experimental_hints : a ` tf . distribute . experimental . CollectiveHints ` . Hints <nl> to perform collective operations . <nl> <nl> Returns : <nl> - a list of Mirrored objects . <nl> + A list of ` tf . Tensor ` or ` tf . distribute . DistributedValues ` , one per pair <nl> + in ` value_destination_pairs ` . <nl> <nl> Raises : <nl> ValueError : if ` value_destination_pairs ` is not an iterable of <nl> - tuples of PerReplica objects and destinations <nl> + tuples of ` tf . distribute . DistributedValues ` and destinations . <nl> " " " <nl> raise NotImplementedError ( <nl> " batch_reduce_implementation method must be implemented in descendants . " <nl> def batch_reduce_implementation ( self , reduce_op , value_destination_pairs , <nl> <nl> @ doc_controls . for_subclass_implementers <nl> def broadcast_implementation ( self , tensor , destinations ) : <nl> - " " " Implementation of broadcast the ` tensor ` to destinations . <nl> + " " " Implementation of ` broadcast ` . <nl> <nl> Args : <nl> - tensor : the tensor to broadcast . <nl> - destinations : the broadcast destinations . <nl> + tensor : a ` tf . Tensor ` like object . The value to broadcast . <nl> + destinations : a ` tf . distribute . DistributedValues ` , a ` tf . Variable ` , a <nl> + ` tf . Tensor ` alike object , or a device string . It specifies the devices <nl> + to broadcast to . <nl> + ` destinations ` . Note that if it ' s a ` tf . Variable ` , the value is <nl> + broadcasted to the devices of that variable , this method doesn ' t update <nl> + the variable . <nl> <nl> Returns : <nl> - a Mirrored object . <nl> + A ` tf . Tensor ` or ` tf . distribute . DistributedValues ` . <nl> " " " <nl> return simple_broadcast ( tensor , destinations , always_mirrored = True ) <nl> <nl> <nl> @ tf_export ( " distribute . ReductionToOneDevice " ) <nl> class ReductionToOneDevice ( CrossDeviceOps ) : <nl> - " " " Always do reduction to one device first and then do broadcasting . <nl> + " " " A CrossDeviceOps implementation that copies values to one device to reduce . <nl> + <nl> + This implementation always copies values to one device to reduce them , then <nl> + broadcast reduced values to the destinations . It doesn ' t support efficient <nl> + batching . <nl> <nl> - Batch reduction is done by reduction on each element one by one . <nl> + Here is how you can use ` ReductionToOneDevice ` in <nl> + ` tf . distribute . MirroredStrategy ` : <nl> <nl> ` ` ` <nl> - mirrored_strategy = tf . distribute . MirroredStrategy ( <nl> + strategy = tf . distribute . MirroredStrategy ( <nl> cross_device_ops = tf . distribute . ReductionToOneDevice ( ) ) <nl> ` ` ` <nl> " " " <nl> def __init__ ( self , reduce_to_device = None , accumulation_fn = None ) : <nl> <nl> Args : <nl> reduce_to_device : the intermediate device to reduce to . If None , reduce <nl> - to the first device in ` destinations ` of the ` reduce ( ) ` method . <nl> - accumulation_fn : a function that does accumulation . If None , then <nl> + to the first device in ` destinations ` of the ` reduce ` method . <nl> + accumulation_fn : a function that does accumulation . If None , <nl> ` tf . math . add_n ` is used . <nl> " " " <nl> self . reduce_to_device = reduce_to_device <nl> def _unpack_tensors ( reduced , tensor_packer = None ) : <nl> <nl> <nl> class AllReduceCrossDeviceOps ( CrossDeviceOps ) : <nl> - " " " Reduction using all - reduce . " " " <nl> + " " " All - reduce implementation of CrossDeviceOps . <nl> <nl> - def __init__ ( self , all_reduce_alg = " nccl " , num_packs = 1 ) : <nl> - " " " All - reduce implementation of CrossDeviceOps . <nl> + It performs all - reduce when applicable using NCCL or hierarchical copy . For <nl> + the batch API , tensors will be repacked or aggregated for more efficient <nl> + cross - device transportation . <nl> + <nl> + For reduces that are not all - reduce , it falls back to <nl> + ` tf . distribute . ReductionToOneDevice ` . <nl> + " " " <nl> <nl> - Before performing all - reduce , tensors will be packed for more efficient <nl> - cross - device transportation . <nl> + def __init__ ( self , all_reduce_alg = " nccl " , num_packs = 1 ) : <nl> + " " " Initializes the object . <nl> <nl> Args : <nl> all_reduce_alg : the all - reduce algorithm to use , currently only " nccl " or <nl> " hierarchical_copy " are supported . <nl> - num_packs : If non - zero , pack values into ` num_packs ` splits . <nl> + num_packs : a non - negative integer . The number of packs to split values <nl> + into . If zero , no packing will be done . <nl> " " " <nl> self . _all_reduce_alg = all_reduce_alg <nl> self . _num_packs = num_packs <nl> def _do_batch_all_reduce_sparse ( self , reduce_op , sparse_values ) : <nl> <nl> @ tf_export ( " distribute . NcclAllReduce " ) <nl> class NcclAllReduce ( AllReduceCrossDeviceOps ) : <nl> - " " " Reduction using NCCL all - reduce . " " " <nl> + " " " NCCL all - reduce implementation of CrossDeviceOps . <nl> <nl> - def __init__ ( self , num_packs = 1 ) : <nl> - " " " NCCL all - reduce implementation of CrossDeviceOps . <nl> + It uses Nvidia NCCL for all - reduce . For the batch API , tensors will be <nl> + repacked or aggregated for more efficient cross - device transportation . <nl> + <nl> + For reduces that are not all - reduce , it falls back to <nl> + ` tf . distribute . ReductionToOneDevice ` . <nl> + <nl> + Here is how you can use ` NcclAllReduce ` in ` tf . distribute . MirroredStrategy ` : <nl> <nl> - It uses Nvidia NCCL for all - reduce . Before performing all - reduce , tensors <nl> - will be repacked or aggregated for more efficient cross - device <nl> - transportation . <nl> + <nl> + ` ` ` <nl> + strategy = tf . distribute . MirroredStrategy ( <nl> + cross_device_ops = tf . distribute . NcclAllReduce ( ) ) <nl> + ` ` ` <nl> + " " " <nl> + <nl> + def __init__ ( self , num_packs = 1 ) : <nl> + " " " Initializes the object . <nl> <nl> Args : <nl> - num_packs : values will be packed in this many splits . ` num_packs ` should <nl> - be greater than or equals 0 . When it is zero , no packing will be done . <nl> + num_packs : a non - negative integer . The number of packs to split values <nl> + into . If zero , no packing will be done . <nl> <nl> Raises : <nl> - ValueError if ` num_packs ` is negative . <nl> + ValueError : if ` num_packs ` is negative . <nl> " " " <nl> if num_packs < 0 : <nl> raise ValueError ( <nl> def __init__ ( self , num_packs = 1 ) : <nl> <nl> @ tf_export ( " distribute . HierarchicalCopyAllReduce " ) <nl> class HierarchicalCopyAllReduce ( AllReduceCrossDeviceOps ) : <nl> - " " " Reduction using hierarchical copy all - reduce . <nl> + " " " Hierarchical copy all - reduce implementation of CrossDeviceOps . <nl> <nl> It reduces to one GPU along edges in some hierarchy and broadcasts back to <nl> - each GPU along the same path . Before performing all - reduce , tensors will be <nl> - repacked or aggregated for more efficient cross - device transportation . <nl> + each GPU along the same path . For the batch API , tensors will be repacked or <nl> + aggregated for more efficient cross - device transportation . <nl> <nl> This is a reduction created for Nvidia DGX - 1 which assumes GPUs connects like <nl> that on DGX - 1 machine . If you have different GPU inter - connections , it is <nl> likely that it would be slower than ` tf . distribute . ReductionToOneDevice ` . <nl> + <nl> + For reduces that are not all - reduce , it falls back to <nl> + ` tf . distribute . ReductionToOneDevice ` . <nl> + <nl> + Here is how you can use ` HierarchicalCopyAllReduce ` in <nl> + ` tf . distribute . MirroredStrategy ` : <nl> + <nl> + ` ` ` <nl> + strategy = tf . distribute . MirroredStrategy ( <nl> + cross_device_ops = tf . distribute . HierarchicalCopyAllReduce ( ) ) <nl> + ` ` ` <nl> " " " <nl> <nl> def __init__ ( self , num_packs = 1 ) : <nl> " " " Initializes the object . <nl> <nl> Args : <nl> - num_packs : values will be packed in this many splits . ` num_packs ` should <nl> - be greater than or equals 0 . When it is zero , no packing will be done . <nl> + num_packs : a non - negative integer . The number of packs to split values <nl> + into . If zero , no packing will be done . <nl> <nl> Raises : <nl> ValueError if ` num_packs ` is negative . <nl> mmm a / tensorflow / python / distribute / cross_device_utils . py <nl> ppp b / tensorflow / python / distribute / cross_device_utils . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> - import collections as pycoll <nl> import copy <nl> import threading <nl> <nl> - from tensorflow . python . distribute import all_reduce <nl> from tensorflow . python . distribute import values as value_lib <nl> from tensorflow . python . eager import backprop <nl> from tensorflow . python . eager import context <nl> from tensorflow . python . framework import device as pydev <nl> - from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import collective_ops <nl> def aggregate_single_gradient_using_copy ( grad_and_vars , use_mean , <nl> return ( grad , v ) , None <nl> <nl> <nl> - def group_device_names ( devices , group_size ) : <nl> - " " " Group device names into groups of group_size . <nl> - <nl> - Args : <nl> - devices : a list of canonical device strings . <nl> - group_size : integer which is equal to or greater than 1 . <nl> - <nl> - Returns : <nl> - list of lists of devices , where each inner list is group_size long , <nl> - and each device appears at least once in an inner list . If <nl> - len ( devices ) % group_size = = 0 then each device will appear exactly once . <nl> - <nl> - Raises : <nl> - ValueError : if group_size > len ( devices ) <nl> - " " " <nl> - num_devices = len ( devices ) <nl> - if group_size > num_devices : <nl> - raise ValueError ( <nl> - ' only % d devices , but group_size = % d ' % ( num_devices , group_size ) ) <nl> - num_groups = ( <nl> - num_devices / / group_size + ( 1 if ( num_devices % group_size ! = 0 ) else 0 ) ) <nl> - groups = [ [ ] for i in range ( num_groups ) ] <nl> - for i in range ( num_groups * group_size ) : <nl> - groups [ i % num_groups ] . append ( devices [ i % num_devices ] ) <nl> - return groups <nl> - <nl> - <nl> - def split_grads_by_size ( threshold_size , device_grads ) : <nl> - " " " Break gradients into two sets according to tensor size . <nl> - <nl> - Args : <nl> - threshold_size : int size cutoff for small vs large tensor . <nl> - device_grads : List of lists of ( gradient , variable ) tuples . The outer <nl> - list is over devices . The inner list is over individual gradients . <nl> - <nl> - Returns : <nl> - small_grads : Subset of device_grads where shape is < = threshold_size <nl> - elements . <nl> - large_grads : Subset of device_grads where shape is > threshold_size <nl> - elements . <nl> - " " " <nl> - small_grads = [ ] <nl> - large_grads = [ ] <nl> - for dl in device_grads : <nl> - small_dl = [ ] <nl> - large_dl = [ ] <nl> - for ( g , v ) in dl : <nl> - tensor_size = g . get_shape ( ) . num_elements ( ) <nl> - if tensor_size < = threshold_size : <nl> - small_dl . append ( [ g , v ] ) <nl> - else : <nl> - large_dl . append ( [ g , v ] ) <nl> - if small_dl : <nl> - small_grads . append ( small_dl ) <nl> - if large_dl : <nl> - large_grads . append ( large_dl ) <nl> - return small_grads , large_grads <nl> - <nl> - <nl> # TODO ( yuefengz ) : use random key starts to avoid reusing keys ? <nl> class CollectiveKeys ( object ) : <nl> " " " Class that manages collective keys . <nl> def densify_and_all_reduce ( ) : <nl> return out_slices_list <nl> <nl> <nl> - def sum_grad_and_var_all_reduce ( grad_and_vars , <nl> - num_workers , <nl> - alg , <nl> - gpu_indices , <nl> - aux_devices = None , <nl> - num_shards = 1 ) : <nl> - " " " Apply all - reduce algorithm over specified gradient tensors . " " " <nl> - with ops . name_scope ( ' allreduce ' ) : <nl> - # Note that each grad_and_vars looks like the following : <nl> - # ( ( grad0_gpu0 , var0_gpu0 ) , . . . , ( grad0_gpuN , var0_gpuN ) ) <nl> - scaled_grads = [ g for g , _ in grad_and_vars ] <nl> - if alg = = ' nccl ' : <nl> - summed_grads = nccl_ops . all_sum ( scaled_grads ) <nl> - elif alg = = ' xring ' : <nl> - summed_grads = all_reduce . build_ring_all_reduce ( <nl> - scaled_grads , num_workers , num_shards , gpu_indices , math_ops . add ) <nl> - elif alg = = ' nccl / xring ' : <nl> - summed_grads = all_reduce . build_nccl_then_ring ( scaled_grads , num_shards , <nl> - math_ops . add ) <nl> - elif alg = = ' nccl / rechd ' : <nl> - summed_grads = all_reduce . build_nccl_then_recursive_hd ( <nl> - scaled_grads , math_ops . add ) <nl> - elif alg = = ' nccl / pscpu ' : <nl> - summed_grads = all_reduce . build_nccl_then_shuffle ( <nl> - scaled_grads , aux_devices , math_ops . add , math_ops . add_n ) <nl> - elif alg = = ' pscpu / pscpu ' : <nl> - second_gather_devices = aux_devices [ : num_shards ] <nl> - summed_grads = all_reduce . build_shuffle_then_shuffle ( <nl> - scaled_grads , aux_devices , second_gather_devices , math_ops . add_n ) <nl> - elif alg in [ ' pscpu ' , ' psgpu ' ] : <nl> - summed_grads = all_reduce . build_shuffle_all_reduce ( <nl> - scaled_grads , aux_devices , math_ops . add_n ) <nl> - else : <nl> - raise ValueError ( ' unsupported all_reduce alg : ' , alg ) <nl> - <nl> - result = [ ] <nl> - for ( _ , v ) , g in zip ( grad_and_vars , summed_grads ) : <nl> - result . append ( [ g , v ] ) <nl> - return result <nl> - <nl> - <nl> - def sum_gradients_all_reduce ( dev_prefixes , replica_grads , num_workers , alg , <nl> - num_shards , gpu_indices ) : <nl> - " " " Apply all - reduce algorithm over specified gradient tensors . <nl> - <nl> - Args : <nl> - dev_prefixes : list of prefix strings to use to generate PS device names . <nl> - replica_grads : the gradients to reduce . <nl> - num_workers : number of worker processes across entire job . <nl> - alg : the all - reduce algorithm to apply . <nl> - num_shards : alg - specific sharding factor . <nl> - gpu_indices : indices of local GPUs in order usable for ring - reduce . <nl> - <nl> - Returns : <nl> - list of reduced tensors <nl> - " " " <nl> - alg_contains_shuffle = any ( n in alg for n in [ ' pscpu ' , ' psgpu ' ] ) <nl> - is_hierarchical = ' / ' in alg <nl> - if ' pscpu ' in alg : <nl> - aux_devices = [ prefix + ' / cpu : 0 ' for prefix in dev_prefixes ] <nl> - elif ' psgpu ' in alg : <nl> - aux_devices = [ <nl> - prefix + ' / gpu : % d ' % i <nl> - for i in range ( len ( gpu_indices ) ) <nl> - for prefix in dev_prefixes <nl> - ] <nl> - else : <nl> - aux_devices = [ ' / job : localhost / cpu : 0 ' ] <nl> - # Auxiliary devices for hierarchical all - reduces . <nl> - aux_device_groups = group_device_names ( <nl> - aux_devices , num_shards if alg_contains_shuffle else 1 ) <nl> - group_index = 0 <nl> - reduced_gv_list = [ ] <nl> - for grad_and_vars in zip ( * replica_grads ) : <nl> - reduced_gv_list . append ( <nl> - sum_grad_and_var_all_reduce ( <nl> - grad_and_vars , num_workers , alg , gpu_indices , aux_devices <nl> - if is_hierarchical else aux_device_groups [ group_index ] , num_shards ) ) <nl> - group_index = ( group_index + 1 ) % len ( aux_device_groups ) <nl> - new_replica_grads = [ list ( x ) for x in zip ( * reduced_gv_list ) ] <nl> - return new_replica_grads <nl> - <nl> - <nl> - def extract_ranges ( index_list , range_size_limit = 32 ) : <nl> - " " " Extract consecutive ranges and singles from index_list . <nl> - <nl> - Args : <nl> - index_list : List of monotone increasing non - negative integers . <nl> - range_size_limit : Largest size range to return . If a larger <nl> - consecutive range exists , it will be returned as multiple <nl> - ranges . <nl> - <nl> - Returns : <nl> - ( ranges , singles ) where ranges is a list of [ first , last ] pairs of <nl> - consecutive elements in index_list , and singles is all of the <nl> - other elements , in original order . <nl> - " " " <nl> - if not index_list : <nl> - return [ ] , [ ] <nl> - first = index_list [ 0 ] <nl> - last = first <nl> - ranges = [ ] <nl> - singles = [ ] <nl> - for i in index_list [ 1 : ] : <nl> - if i = = last + 1 and ( last - first ) < = range_size_limit : <nl> - last = i <nl> - else : <nl> - if last > first : <nl> - ranges . append ( [ first , last ] ) <nl> - else : <nl> - singles . append ( first ) <nl> - first = i <nl> - last = i <nl> - if last > first : <nl> - ranges . append ( [ first , last ] ) <nl> - else : <nl> - singles . append ( first ) <nl> - return ranges , singles <nl> - <nl> - <nl> - GradPackTuple = pycoll . namedtuple ( ' GradPackTuple ' , ' indices vars shapes ' ) <nl> - <nl> - <nl> - def pack_range ( key , packing , grad_vars , rng ) : <nl> - " " " Form the concatenation of a specified range of gradient tensors . <nl> - <nl> - Args : <nl> - key : Value under which to store meta - data in packing that will be used <nl> - later to restore the grad_var list structure . <nl> - packing : Dict holding data describing packed ranges of small tensors . <nl> - grad_vars : List of ( grad , var ) pairs for one replica . <nl> - rng : A pair of integers giving the first , last indices of a consecutive <nl> - range of tensors to be packed . <nl> - <nl> - Returns : <nl> - A tensor that is the concatenation of all the specified small tensors . <nl> - " " " <nl> - to_pack = grad_vars [ rng [ 0 ] : rng [ 1 ] + 1 ] <nl> - members = [ ] <nl> - variables = [ ] <nl> - restore_shapes = [ ] <nl> - with ops . name_scope ( ' pack ' ) : <nl> - for g , v in to_pack : <nl> - variables . append ( v ) <nl> - restore_shapes . append ( g . shape ) <nl> - with ops . device ( g . device ) : <nl> - members . append ( array_ops . reshape ( g , [ - 1 ] ) ) <nl> - packing [ key ] = GradPackTuple ( <nl> - indices = range ( rng [ 0 ] , rng [ 1 ] + 1 ) , <nl> - vars = variables , <nl> - shapes = restore_shapes ) <nl> - with ops . device ( members [ 0 ] . device ) : <nl> - return array_ops . concat ( members , 0 ) <nl> - <nl> - <nl> - def unpack_grad_tuple ( gv , gpt ) : <nl> - " " " Unpack a previously packed collection of gradient tensors . <nl> - <nl> - Args : <nl> - gv : A ( grad , var ) pair to be unpacked . <nl> - gpt : A GradPackTuple describing the packing operation that produced gv . <nl> - <nl> - Returns : <nl> - A list of ( grad , var ) pairs corresponding to the values that were <nl> - originally packed into gv , maybe following subsequent operations like <nl> - reduction . <nl> - " " " <nl> - elt_widths = [ x . num_elements ( ) for x in gpt . shapes ] <nl> - with ops . device ( gv [ 0 ] . device ) : <nl> - with ops . name_scope ( ' unpack ' ) : <nl> - splits = array_ops . split ( gv [ 0 ] , elt_widths ) <nl> - unpacked_gv = [ ] <nl> - for idx , s in enumerate ( splits ) : <nl> - unpacked_gv . append ( ( array_ops . reshape ( s , gpt . shapes [ idx ] ) , <nl> - gpt . vars [ idx ] ) ) <nl> - return unpacked_gv <nl> - <nl> - <nl> - def pack_small_tensors ( replica_grads , max_bytes = 0 , max_group = 0 ) : <nl> - " " " Concatenate small gradient tensors together for reduction . <nl> - <nl> - Args : <nl> - replica_grads : List of lists of ( gradient , variable ) tuples . <nl> - max_bytes : Int giving max number of bytes in a tensor that <nl> - may be considered small . <nl> - max_group : Int giving max number of small tensors that may be <nl> - concatenated into one new tensor . <nl> - <nl> - Returns : <nl> - new_replica_grads , packing where new_replica_grads is identical to <nl> - replica_grads except that all feasible small_tensors have been removed <nl> - from their places and concatenated into larger tensors that are <nl> - now in the front of the list for each replica , and packing contains <nl> - the data necessary to restore the replica_grads structure . <nl> - <nl> - Look through the first replica for gradients of the same type ( float ) , <nl> - and small size , that are all sequential . For each such group , <nl> - replace by a new tensor that is a flattened concatenation . Note <nl> - that the corresponding variable will be absent , which doesn ' t matter <nl> - because it isn ' t used during all - reduce . <nl> - <nl> - Requires : <nl> - Every gv_list in replicas must have isomorphic structure including identical <nl> - tensor sizes and types . <nl> - " " " <nl> - small_indices = [ ] <nl> - large_indices = [ ] <nl> - for idx , ( g , _ ) in enumerate ( replica_grads [ 0 ] ) : <nl> - if g . dtype = = dtypes . float32 and ( 4 * g . shape . num_elements ( ) ) < = max_bytes : <nl> - small_indices . append ( idx ) <nl> - else : <nl> - large_indices . append ( idx ) <nl> - small_ranges , small_singles = extract_ranges ( <nl> - small_indices , range_size_limit = max_group ) <nl> - large_indices = sorted ( large_indices + small_singles ) <nl> - num_gv = len ( replica_grads [ 0 ] ) <nl> - packing = { } <nl> - if small_ranges : <nl> - new_replica_grads = [ ] <nl> - for dev_idx , gv_list in enumerate ( replica_grads ) : <nl> - assert len ( gv_list ) = = num_gv <nl> - new_gv_list = [ ] <nl> - for r in small_ranges : <nl> - key = ' % d : % d ' % ( dev_idx , len ( new_gv_list ) ) <nl> - new_gv_list . append ( ( pack_range ( key , packing , gv_list , r ) , <nl> - ' packing_var_placeholder ' ) ) <nl> - for i in large_indices : <nl> - new_gv_list . append ( gv_list [ i ] ) <nl> - new_replica_grads . append ( new_gv_list ) <nl> - return new_replica_grads , packing <nl> - else : <nl> - return replica_grads , None <nl> - <nl> - <nl> - def unpack_small_tensors ( replica_grads , packing ) : <nl> - " " " Undo the structure alterations to replica_grads done by pack_small_tensors . <nl> - <nl> - Args : <nl> - replica_grads : List of List of ( grad , var ) tuples . <nl> - packing : A dict generated by pack_small_tensors describing the changes <nl> - it made to replica_grads . <nl> - <nl> - Returns : <nl> - new_replica_grads : identical to replica_grads except that concatenations <nl> - of small tensors have been split apart and returned to their original <nl> - positions , paired with their original variables . <nl> - " " " <nl> - if not packing : <nl> - return replica_grads <nl> - new_replica_grads = [ ] <nl> - num_devices = len ( replica_grads ) <nl> - num_packed = len ( packing . keys ( ) ) / / num_devices <nl> - for dev_idx , gv_list in enumerate ( replica_grads ) : <nl> - gv_list = list ( gv_list ) <nl> - new_gv_list = gv_list [ num_packed : ] <nl> - for i in range ( num_packed ) : <nl> - k = ' % d : % d ' % ( dev_idx , i ) <nl> - gpt = packing [ k ] <nl> - gv = unpack_grad_tuple ( gv_list [ i ] , gpt ) <nl> - for gi , idx in enumerate ( gpt . indices ) : <nl> - assert idx = = gpt . indices [ gi ] <nl> - new_gv_list . insert ( idx , gv [ gi ] ) <nl> - new_replica_grads . append ( new_gv_list ) <nl> - return new_replica_grads <nl> - <nl> - <nl> def aggregate_tensors_or_indexed_slices ( values , accumulation_fn = math_ops . add_n ) : <nl> " " " Aggregate tensors using ` accumulation_fn ` and IndexedSlices via concat . " " " <nl> if any ( isinstance ( v , ops . IndexedSlices ) for v in values ) : <nl> def copy_tensor_or_indexed_slices_to_device ( value , device ) : <nl> return result <nl> <nl> <nl> - def contains_indexed_slices ( value ) : <nl> - " " " Check whether the value is ` IndexedSlices ` or contains ` IndexedSlices ` . " " " <nl> - if isinstance ( value , ops . IndexedSlices ) : <nl> - return True <nl> - elif isinstance ( value , ( list , tuple ) ) and value : <nl> - return any ( contains_indexed_slices ( v ) for v in value ) <nl> - elif isinstance ( value , value_lib . DistributedValues ) : <nl> - return contains_indexed_slices ( value . values ) <nl> - else : <nl> - return False <nl> - <nl> - <nl> def is_indexed_slices ( value ) : <nl> if isinstance ( value , ops . IndexedSlices ) : <nl> return True <nl> mmm a / tensorflow / python / distribute / cross_device_utils_test . py <nl> ppp b / tensorflow / python / distribute / cross_device_utils_test . py <nl> def testDivideIndexedSlices ( self ) : <nl> def testIsIndexedSlices ( self ) : <nl> t = math_ops . _as_indexed_slices ( <nl> constant_op . constant ( [ [ 1 . , 2 . ] , [ 0 , 0 ] , [ 3 . , 4 . ] ] ) ) <nl> - self . assertTrue ( cross_device_utils . contains_indexed_slices ( t ) ) <nl> - <nl> - @ test_util . run_in_graph_and_eager_modes <nl> - def testContainsIndexedSlices_List ( self ) : <nl> - t0 = math_ops . _as_indexed_slices ( <nl> - constant_op . constant ( [ [ 1 . , 2 . ] , [ 0 , 0 ] , [ 3 . , 4 . ] ] ) ) <nl> - t1 = math_ops . _as_indexed_slices ( <nl> - constant_op . constant ( [ [ 0 . , 0 . ] , [ 5 , 6 ] , [ 7 . , 8 . ] ] ) ) <nl> - self . assertTrue ( cross_device_utils . contains_indexed_slices ( [ t0 , t1 ] ) ) <nl> - <nl> - @ test_util . run_in_graph_and_eager_modes <nl> - def testContainsIndexedSlices_Tuple ( self ) : <nl> - t0 = math_ops . _as_indexed_slices ( <nl> - constant_op . constant ( [ [ 1 . , 2 . ] , [ 0 , 0 ] , [ 3 . , 4 . ] ] ) ) <nl> - t1 = math_ops . _as_indexed_slices ( <nl> - constant_op . constant ( [ [ 0 . , 0 . ] , [ 5 , 6 ] , [ 7 . , 8 . ] ] ) ) <nl> - self . assertTrue ( cross_device_utils . contains_indexed_slices ( ( t0 , t1 ) ) ) <nl> - <nl> - @ test_util . run_in_graph_and_eager_modes <nl> - def testContainsIndexedSlices_PerReplica ( self ) : <nl> - t0 = math_ops . _as_indexed_slices ( <nl> - constant_op . constant ( [ [ 1 . , 2 . ] , [ 0 , 0 ] , [ 3 . , 4 . ] ] ) ) <nl> - t1 = math_ops . _as_indexed_slices ( <nl> - constant_op . constant ( [ [ 0 . , 0 . ] , [ 5 , 6 ] , [ 7 . , 8 . ] ] ) ) <nl> - per_replica = value_lib . PerReplica ( ( t0 , t1 ) ) <nl> - self . assertTrue ( cross_device_utils . contains_indexed_slices ( per_replica ) ) <nl> + self . assertTrue ( cross_device_utils . is_indexed_slices ( t ) ) <nl> <nl> @ combinations . generate ( combinations . combine ( <nl> mode = [ " graph " , " eager " ] , <nl> mmm a / tensorflow / python / distribute / distribute_lib . py <nl> ppp b / tensorflow / python / distribute / distribute_lib . py <nl> def reduce_to ( self , reduce_op , value , destinations , experimental_hints = None ) : <nl> < tf . Variable ' Variable : 0 ' shape = ( ) dtype = float32 , numpy = 1 . 0 > <nl> <nl> Args : <nl> - reduce_op : a ` tf . distribute . ReduceOp ` or string . How to reduce the value . <nl> - value : a ` tf . distribute . DistributedValue ` , or a ` tf . Tensor ` like object . <nl> - destinations : a ` tf . distribute . DistributedValue ` , a ` tf . Variable ` , a <nl> + reduce_op : a ` tf . distribute . ReduceOp ` value specifying how values should <nl> + be combined . Allows using string representation of the enum such as <nl> + " SUM " , " MEAN " . <nl> + value : a ` tf . distribute . DistributedValues ` , or a ` tf . Tensor ` like object . <nl> + destinations : a ` tf . distribute . DistributedValues ` , a ` tf . Variable ` , a <nl> ` tf . Tensor ` alike object , or a device string . It specifies the devices <nl> to reduce to . To perform an all - reduce , pass the same to ` value ` and <nl> ` destinations ` . Note that if it ' s a ` tf . Variable ` , the value is reduced <nl> - to the devices of that variable , this method doesn ' t update the variable . <nl> - experimental_hints : a ` tf . distrbute . experimental . CollectiveHints ` . Hints <nl> - to perform collective operations . See <nl> - ` tf . distrbute . experimental . CollectiveHints ` for details . <nl> + to the devices of that variable , and this method doesn ' t update the <nl> + variable . <nl> + experimental_hints : a ` tf . distribute . experimental . CollectiveHints ` . See <nl> + ` tf . distribute . experimental . CollectiveHints ` for details . <nl> <nl> Returns : <nl> A tensor or value reduced to ` destinations ` . <nl> def batch_reduce_to ( self , <nl> < tf . Variable ' Variable : 0 ' shape = ( ) dtype = float32 , numpy = 1 . 0 > <nl> <nl> Args : <nl> - reduce_op : a ` tf . distribute . ReduceOp ` . How to reduce the value . <nl> + reduce_op : a ` tf . distribute . ReduceOp ` value specifying how values should <nl> + be combined . Allows using string representation of the enum such as <nl> + " SUM " , " MEAN " . <nl> value_destination_pairs : a sequence of ( value , destinations ) pairs . See <nl> - ` reduce_to ( ) ` for descriptions . <nl> - experimental_hints : a ` tf . distrbute . experimental . CollectiveHints ` . Hints <nl> - to perform collective operations . <nl> + ` tf . distribute . Strategy . reduce_to ` for descriptions . <nl> + experimental_hints : a ` tf . distribute . experimental . CollectiveHints ` . See <nl> + ` tf . distribute . experimental . CollectiveHints ` for details . <nl> <nl> Returns : <nl> A list of reduced values , one per pair in ` value_destination_pairs ` . <nl> def all_reduce ( self , reduce_op , value , experimental_hints = None ) : <nl> to the host in cross - replica context . <nl> <nl> Args : <nl> - reduce_op : a ` tf . distribute . ReduceOp ` enum or its string form , which <nl> - specifies how to reduce the value . <nl> + reduce_op : a ` tf . distribute . ReduceOp ` value specifying how values should <nl> + be combined . Allows using string representation of the enum such as <nl> + " SUM " , " MEAN " . <nl> value : a nested structure of ` tf . Tensor ` which ` tf . nest . flatten ` accepts . <nl> The structure and the shapes of the ` tf . Tensor ` need to be same on all <nl> replicas . <nl> mmm a / tensorflow / python / distribute / parallel_device / BUILD <nl> ppp b / tensorflow / python / distribute / parallel_device / BUILD <nl> <nl> - load ( " / / tensorflow : tensorflow . bzl " , " tf_custom_op_library " , " tf_gen_op_wrapper_py " ) <nl> - load ( " / / tensorflow : tensorflow . bzl " , " tf_custom_op_py_library " ) <nl> - <nl> package ( <nl> default_visibility = [ " / / tensorflow : internal " ] , <nl> licenses = [ " notice " ] , # Apache 2 . 0 <nl> py_library ( <nl> srcs = [ " parallel_device . py " ] , <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> - " : parallel_device_ops " , <nl> " : saving " , <nl> " / / tensorflow / python : _pywrap_parallel_device " , <nl> " / / tensorflow / python / distribute : device_util " , <nl> py_library ( <nl> deps = [ " / / tensorflow / python : framework_ops " ] , <nl> ) <nl> <nl> - tf_gen_op_wrapper_py ( <nl> - name = " parallel_device_ops_py " , <nl> - out = " gen_parallel_device_ops . py " , <nl> - deps = [ " / / tensorflow / c / eager / parallel_device : parallel_device_ops " ] , <nl> - ) <nl> - <nl> - tf_custom_op_library ( <nl> - name = " _parallel_device_ops . so " , <nl> - srcs = [ " / / tensorflow / c / eager / parallel_device : parallel_device_ops_srcs " ] , <nl> - ) <nl> - <nl> - tf_custom_op_py_library ( <nl> - name = " parallel_device_ops " , <nl> - dso = [ " : _parallel_device_ops . so " ] , <nl> - kernels = [ " / / tensorflow / c / eager / parallel_device : parallel_device_ops " ] , <nl> - visibility = [ " / / tensorflow : internal " ] , <nl> - deps = [ " : parallel_device_ops_py " ] , <nl> - ) <nl> - <nl> py_test ( <nl> name = " parallel_device_test " , <nl> srcs = [ " parallel_device_test . py " ] , <nl> mmm a / tensorflow / python / distribute / parallel_device / parallel_device . py <nl> ppp b / tensorflow / python / distribute / parallel_device / parallel_device . py <nl> <nl> <nl> from tensorflow . python import _pywrap_parallel_device <nl> from tensorflow . python . distribute import device_util <nl> - from tensorflow . python . distribute . parallel_device import gen_parallel_device_ops <nl> from tensorflow . python . distribute . parallel_device import saving <nl> from tensorflow . python . eager import context <nl> - from tensorflow . python . framework import load_library <nl> + from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import ops <nl> - from tensorflow . python . platform import resource_loader <nl> + from tensorflow . python . ops import array_ops <nl> from tensorflow . python . tpu . ops import tpu_ops <nl> <nl> - load_library . load_op_library ( <nl> - resource_loader . get_path_to_datafile ( " _parallel_device_ops . so " ) ) <nl> - <nl> _next_device_number = 0 <nl> _next_device_number_lock = threading . Lock ( ) <nl> <nl> <nl> # TODO ( allenl ) : Expand this docstring once things like getting components on and <nl> # off the device are stable . <nl> + # <nl> + # TODO ( allenl ) : Make multi - client work ; we need an offset for device IDs , and an <nl> + # indication of how many other devices there are total for collectives which <nl> + # don ' t have a number of participants hard - coded in their attributes . <nl> class ParallelDevice ( object ) : <nl> " " " A device which executes operations in parallel . " " " <nl> <nl> def __init__ ( self , components ) : <nl> device , device_info = _pywrap_parallel_device . GetParallelDeviceCapsules ( <nl> self . _name , self . components ) <nl> context . register_custom_device ( device , self . _name , device_info ) <nl> - with ops . device ( self . _name ) : <nl> - self . _device_ids = gen_parallel_device_ops . device_id ( ) <nl> + self . _device_ids = None <nl> self . _device_scope = None <nl> self . _saving_scope = None <nl> <nl> def device_ids ( self ) : <nl> Returns : <nl> A parallel tensor containing 0 on the first device , 1 on the second , etc . <nl> " " " <nl> + if self . _device_ids is None : <nl> + # device_ids may be called from inside a tf . function , in which case the <nl> + # function captures the eager tensor . We can ' t pack tensors in a function <nl> + # at the moment , and even if we could we don ' t want to hold on to a <nl> + # symbolic tensor , so we need to init_scope out of the function <nl> + # temporarily . <nl> + with ops . init_scope ( ) : <nl> + # TODO ( allenl ) : Functions which capture eager device ID tensors won ' t be <nl> + # saveable in SavedModels . Ideally we ' d run a DeviceID op every time <nl> + # device IDs are required , with functions using the op in their bodies <nl> + # but not hard - coding a fixed number of devices ( so they can be re - used <nl> + # with a different replica count ) . <nl> + device_ids_list = [ ] <nl> + for index , device in enumerate ( self . components ) : <nl> + with ops . device ( device ) : <nl> + # The identity op ensures each device ID tensor is placed on its <nl> + # device . <nl> + device_ids_list . append ( <nl> + array_ops . identity ( constant_op . constant ( index ) ) ) <nl> + self . _device_ids = self . pack ( device_ids_list ) <nl> + <nl> return self . _device_ids <nl> <nl> def _assert_eager ( self ) : <nl> new file mode 100644 <nl> index 0000000000000 . . b895701d84f04 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / framework / py_context_manager . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + # include " tensorflow / python / framework / py_context_manager . h " <nl> + <nl> + # include < map > <nl> + <nl> + # include " tensorflow / core / platform / logging . h " <nl> + <nl> + namespace tensorflow { <nl> + <nl> + bool PyContextManager : : Enter ( PyObject * py_context_manager ) { <nl> + if ( context_manager_ ) { <nl> + PyErr_SetString ( <nl> + PyExc_ValueError , <nl> + " tensorflow : : PyContextManager : : Enter must be called at most once . " ) ; <nl> + } <nl> + if ( ! py_context_manager ) return false ; <nl> + context_manager_ . reset ( py_context_manager ) ; <nl> + static char _enter [ ] = " __enter__ " ; <nl> + var_ . reset ( PyObject_CallMethod ( context_manager_ . get ( ) , _enter , nullptr ) ) ; <nl> + return var_ ! = nullptr ; <nl> + } <nl> + <nl> + PyContextManager : : ~ PyContextManager ( ) { <nl> + if ( var_ ) { <nl> + static char _exit [ ] = " __exit__ " ; <nl> + static char _ooo [ ] = " OOO " ; <nl> + if ( PyErr_Occurred ( ) ) { <nl> + PyObject * type , * value , * traceback ; <nl> + PyErr_Fetch ( & type , & value , & traceback ) ; <nl> + value = value ? value : Py_None ; <nl> + traceback = traceback ? traceback : Py_None ; <nl> + Safe_PyObjectPtr result ( PyObject_CallMethod ( <nl> + context_manager_ . get ( ) , _exit , _ooo , type , value , traceback ) ) ; <nl> + if ( result ) { <nl> + if ( PyObject_IsTrue ( result . get ( ) ) ) { <nl> + PyErr_SetString ( <nl> + PyExc_ValueError , <nl> + " tensorflow : : PyContextManager : : Enter does not support " <nl> + " context managers that suppress exceptions . " ) ; <nl> + } else { <nl> + PyErr_Restore ( type , value , traceback ) ; <nl> + } <nl> + } <nl> + } else { <nl> + PyObject * result = PyObject_CallMethod ( context_manager_ . get ( ) , _exit , <nl> + _ooo , Py_None , Py_None , Py_None ) ; <nl> + if ( result ) { <nl> + Py_DECREF ( result ) ; <nl> + } else { <nl> + LOG ( ERROR ) <nl> + < < " A context manager wrapped by tensorflow : : PyContextManager " <nl> + " raised a new exception from its __new__ method . This behavior " <nl> + " is not supported by PyContextManager , and the exception is " <nl> + " being suppressed . " ; <nl> + PyErr_Clear ( ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . 6c15fccaf0764 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / framework / py_context_manager . h <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + # ifndef TENSORFLOW_PYTHON_FRAMEWORK_PY_CONTEXT_MANAGER_H_ <nl> + # define TENSORFLOW_PYTHON_FRAMEWORK_PY_CONTEXT_MANAGER_H_ <nl> + <nl> + # include < Python . h > <nl> + <nl> + # include < string > <nl> + <nl> + # include " tensorflow / python / lib / core / safe_pyobject_ptr . h " <nl> + <nl> + namespace tensorflow { <nl> + <nl> + / / Class that wraps a Python context manager , and calls the ` __enter__ ` and <nl> + / / ` __exit__ ` methods at appropriate times : <nl> + / / <nl> + / / * When ` PyContextManager : : Enter ( cm ) ` is called , the context manager ` cm ` <nl> + / / is stored , and ` cm . __enter__ ` is called . The result can be retrieved <nl> + / / with ` PyContextManager : : var ( ) ` . <nl> + / / * When the ` PyContextManager ` is destroyed , then ` cm . __exit__ ` is called <nl> + / / ( with information about any active exception ) . <nl> + / / * ` PyContextManager : : Enter ( cm ) ` may be called at most once . If <nl> + / / ` PyContextManager : : Enter ( ) ` is never called , then the destructor is a <nl> + / / no - op ( i . e . , ` __exit__ ` is not called ) . <nl> + / / <nl> + / / PyContextManager places two restrictons on the wrapped context managers : <nl> + / / <nl> + / / 1 . The context manager may not suppress exceptions - - i . e . , ` __exit__ ` <nl> + / / may not return a True value . If it does , then a new exception will be <nl> + / / set , indicating that this is unuspported . <nl> + / / 2 . The context manager may not raise an exception from ` __exit__ ` if the <nl> + / / an exception is not active when it is called . If it does , then an error <nl> + / / message will be logged , indicating that this is unsupported , and the <nl> + / / exception will be suppressed . <nl> + / / <nl> + / / These restrictions are both intended to ensure that the state of <nl> + / / PyErr_Occured is unchanged by PyContextManager ' s destructor . This is <nl> + / / important , because changing the state of PyErr_Occurred in the destructor <nl> + / / would mean that we are returning a nullptr with no exception set , or <nl> + / / returning a non - null value with an exception set ( both of which are invalid ) . <nl> + class PyContextManager { <nl> + public : <nl> + / / Calls ` py_context_manager . __enter__ ( ) ` , and stores the result in ` var ` . <nl> + / / Return true if ` __enter__ ` succeeds , or false if ` __enter__ ` raises an <nl> + / / exception . ( Also returns false if ` py_context_manager ` is nullptr . ) <nl> + / / <nl> + / / Steals a reference to ` py_context_manager ` . ( This reference is deleted <nl> + / / when the destructor is called . ) <nl> + bool Enter ( PyObject * py_context_manager ) ; <nl> + <nl> + / / Calls ` py_context_manager . __exit__ ( ) ` . <nl> + ~ PyContextManager ( ) ; <nl> + <nl> + / / Returns the variable returned by ` context_manager . __enter__ ( ) ` . <nl> + / / ( This is the ` var ` bound by ` with context_manager as var ` . ) <nl> + / / Returns a borrowed reference . <nl> + PyObject * var ( ) { return var_ . get ( ) ; } <nl> + <nl> + protected : <nl> + Safe_PyObjectPtr context_manager_ ; <nl> + Safe_PyObjectPtr var_ ; <nl> + } ; <nl> + <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_PYTHON_FRAMEWORK_PY_CONTEXT_MANAGER_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . 34565145444d8 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / framework / py_context_manager_pybind . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + # include " pybind11 / pybind11 . h " <nl> + # include " pybind11 / stl . h " <nl> + # include " tensorflow / python / framework / py_context_manager . h " <nl> + <nl> + namespace py = pybind11 ; <nl> + <nl> + namespace { <nl> + <nl> + / / Test harness for PyContextManager . Creates a PyContextManager ` cm ` that <nl> + / / wraps ` context_manager ` , calls ` cm . Enter ( ) ` , and then calls ` body_func ` <nl> + / / with ` cm . var ( ) ` . Returns the result of the function . <nl> + py : : handle TestPyContextManager ( py : : handle context_manager , <nl> + py : : handle body_func ) { <nl> + tensorflow : : Safe_PyObjectPtr result ; <nl> + { <nl> + tensorflow : : PyContextManager cm ; <nl> + Py_INCREF ( context_manager . ptr ( ) ) ; / / cm . Enter steals a reference . <nl> + if ( ! cm . Enter ( context_manager . ptr ( ) ) ) { <nl> + throw py : : error_already_set ( ) ; <nl> + } <nl> + result . reset ( <nl> + PyObject_CallFunctionObjArgs ( body_func . ptr ( ) , cm . var ( ) , nullptr ) ) ; <nl> + } <nl> + / / cm gets destroyed here . <nl> + <nl> + if ( result ) { <nl> + return result . release ( ) ; <nl> + } else { <nl> + throw py : : error_already_set ( ) ; <nl> + } <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> + PYBIND11_MODULE ( _py_context_manager , m ) { <nl> + m . def ( " test_py_context_manager " , TestPyContextManager ) ; <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 60c72a806ae5b <nl> mmm / dev / null <nl> ppp b / tensorflow / python / framework / py_context_manager_test . py <nl> <nl> + # Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Tests for tensorflow . python . framework . _py_context_manager . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + from tensorflow . python import _py_context_manager <nl> + from tensorflow . python . framework import test_util <nl> + from tensorflow . python . platform import googletest <nl> + <nl> + <nl> + class TestContextManager ( object ) : <nl> + <nl> + def __init__ ( self , behavior = " basic " ) : <nl> + self . log = [ ] <nl> + self . behavior = behavior <nl> + <nl> + def __enter__ ( self ) : <nl> + self . log . append ( " __enter__ ( ) " ) <nl> + if self . behavior = = " raise_from_enter " : <nl> + raise ValueError ( " exception in __enter__ " ) <nl> + return " var " <nl> + <nl> + def __exit__ ( self , ex_type , ex_value , ex_tb ) : <nl> + self . log . append ( " __exit__ ( % s , % s , % s ) " % ( ex_type , ex_value , ex_tb ) ) <nl> + if self . behavior = = " raise_from_exit " : <nl> + raise ValueError ( " exception in __exit__ " ) <nl> + if self . behavior = = " suppress_exception " : <nl> + return True <nl> + <nl> + <nl> + # Expected log when the body doesn ' t raise an exception . <nl> + NO_EXCEPTION_LOG = " " " \ <nl> + __enter__ ( ) <nl> + body ( ' var ' ) <nl> + __exit__ ( None , None , None ) " " " <nl> + <nl> + # Expected log when the body does raise an exception . ( Regular expression . ) <nl> + EXCEPTION_LOG = " " " \ <nl> + __enter__ \ \ ( \ \ ) <nl> + body \ \ ( ' var ' \ \ ) <nl> + __exit__ \ \ ( < class ' ValueError ' > , Foo , < traceback object . * > \ \ ) " " " <nl> + <nl> + <nl> + class OpDefUtilTest ( test_util . TensorFlowTestCase ) : <nl> + <nl> + def testBasic ( self ) : <nl> + cm = TestContextManager ( ) <nl> + <nl> + def body ( var ) : <nl> + cm . log . append ( " body ( % r ) " % var ) <nl> + <nl> + _py_context_manager . test_py_context_manager ( cm , body ) <nl> + self . assertEqual ( " \ n " . join ( cm . log ) , NO_EXCEPTION_LOG ) <nl> + <nl> + def testBodyRaisesException ( self ) : <nl> + cm = TestContextManager ( ) <nl> + <nl> + def body ( var ) : <nl> + cm . log . append ( " body ( % r ) " % var ) <nl> + raise ValueError ( " Foo " ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , " Foo " ) : <nl> + _py_context_manager . test_py_context_manager ( cm , body ) <nl> + self . assertRegex ( " \ n " . join ( cm . log ) , EXCEPTION_LOG ) <nl> + <nl> + def testEnterRaisesException ( self ) : <nl> + cm = TestContextManager ( " raise_from_enter " ) <nl> + <nl> + def body ( var ) : <nl> + cm . log . append ( " body ( % r ) " % var ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , " exception in __enter__ " ) : <nl> + _py_context_manager . test_py_context_manager ( cm , body ) <nl> + self . assertEqual ( " \ n " . join ( cm . log ) , " __enter__ ( ) " ) <nl> + <nl> + # Test behavior in unsupported case where __exit__ raises an exception . <nl> + def testExitRaisesException ( self ) : <nl> + cm = TestContextManager ( " raise_from_exit " ) <nl> + <nl> + def body ( var ) : <nl> + cm . log . append ( " body ( % r ) " % var ) <nl> + <nl> + # Note : this does * not * raise an exception ( but does log a warning ) : <nl> + _py_context_manager . test_py_context_manager ( cm , body ) <nl> + self . assertEqual ( " \ n " . join ( cm . log ) , NO_EXCEPTION_LOG ) <nl> + <nl> + # Test behavior in unsupported case where __exit__ suppresses exception . <nl> + def testExitSuppressesException ( self ) : <nl> + cm = TestContextManager ( " suppress_exception " ) <nl> + <nl> + def body ( var ) : <nl> + cm . log . append ( " body ( % r ) " % var ) <nl> + raise ValueError ( " Foo " ) <nl> + <nl> + with self . assertRaisesRegexp ( <nl> + ValueError , " tensorflow : : PyContextManager : : Enter does not support " <nl> + " context managers that suppress exception " ) : <nl> + _py_context_manager . test_py_context_manager ( cm , body ) <nl> + self . assertRegex ( " \ n " . join ( cm . log ) , EXCEPTION_LOG ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + googletest . main ( ) <nl> mmm a / tensorflow / python / keras / backend . py <nl> ppp b / tensorflow / python / keras / backend . py <nl> def categorical_crossentropy ( target , output , from_logits = False , axis = - 1 ) : <nl> from_logits : Boolean , whether ` output ` is the <nl> result of a softmax , or is a tensor of logits . <nl> axis : Int specifying the channels axis . ` axis = - 1 ` corresponds to data <nl> - format ` channels_last ' , and ` axis = 1 ` corresponds to data format <nl> + format ` channels_last ` , and ` axis = 1 ` corresponds to data format <nl> ` channels_first ` . <nl> <nl> Returns : <nl> def sparse_categorical_crossentropy ( target , output , from_logits = False , axis = - 1 ) : <nl> from_logits : Boolean , whether ` output ` is the <nl> result of a softmax , or is a tensor of logits . <nl> axis : Int specifying the channels axis . ` axis = - 1 ` corresponds to data <nl> - format ` channels_last ' , and ` axis = 1 ` corresponds to data format <nl> + format ` channels_last ` , and ` axis = 1 ` corresponds to data format <nl> ` channels_first ` . <nl> <nl> Returns : <nl> mmm a / tensorflow / python / keras / callbacks . py <nl> ppp b / tensorflow / python / keras / callbacks . py <nl> def on_train_batch_begin ( self , batch , logs = None ) : <nl> <nl> Subclasses should override for any actions to run . <nl> <nl> + Note that if the ` steps_per_execution ` argument to ` compile ` in <nl> + ` tf . keras . Model ` is set to ` N ` , this method will only be called every ` N ` <nl> + batches . <nl> + <nl> Arguments : <nl> batch : Integer , index of batch within the current epoch . <nl> logs : Dict , contains the return value of ` model . train_step ` . Typically , <nl> def on_train_batch_end ( self , batch , logs = None ) : <nl> <nl> Subclasses should override for any actions to run . <nl> <nl> + Note that if the ` steps_per_execution ` argument to ` compile ` in <nl> + ` tf . keras . Model ` is set to ` N ` , this method will only be called every ` N ` <nl> + batches . <nl> + <nl> Arguments : <nl> batch : Integer , index of batch within the current epoch . <nl> logs : Dict . Aggregated metric results up until this batch . <nl> def on_test_batch_begin ( self , batch , logs = None ) : <nl> <nl> Subclasses should override for any actions to run . <nl> <nl> + Note that if the ` steps_per_execution ` argument to ` compile ` in <nl> + ` tf . keras . Model ` is set to ` N ` , this method will only be called every ` N ` <nl> + batches . <nl> + <nl> Arguments : <nl> batch : Integer , index of batch within the current epoch . <nl> logs : Dict , contains the return value of ` model . test_step ` . Typically , <nl> def on_test_batch_end ( self , batch , logs = None ) : <nl> <nl> Subclasses should override for any actions to run . <nl> <nl> + Note that if the ` steps_per_execution ` argument to ` compile ` in <nl> + ` tf . keras . Model ` is set to ` N ` , this method will only be called every ` N ` <nl> + batches . <nl> + <nl> Arguments : <nl> batch : Integer , index of batch within the current epoch . <nl> logs : Dict . Aggregated metric results up until this batch . <nl> def on_predict_batch_begin ( self , batch , logs = None ) : <nl> <nl> Subclasses should override for any actions to run . <nl> <nl> + Note that if the ` steps_per_execution ` argument to ` compile ` in <nl> + ` tf . keras . Model ` is set to ` N ` , this method will only be called every ` N ` <nl> + batches . <nl> + <nl> Arguments : <nl> batch : Integer , index of batch within the current epoch . <nl> logs : Dict , contains the return value of ` model . predict_step ` , <nl> def on_predict_batch_end ( self , batch , logs = None ) : <nl> <nl> Subclasses should override for any actions to run . <nl> <nl> + Note that if the ` steps_per_execution ` argument to ` compile ` in <nl> + ` tf . keras . Model ` is set to ` N ` , this method will only be called every ` N ` <nl> + batches . <nl> + <nl> Arguments : <nl> batch : Integer , index of batch within the current epoch . <nl> logs : Dict . Aggregated metric results up until this batch . <nl> mmm a / tensorflow / python / kernel_tests / BUILD <nl> ppp b / tensorflow / python / kernel_tests / BUILD <nl> cuda_py_test ( <nl> name = " dynamic_partition_op_test " , <nl> size = " medium " , <nl> srcs = [ " dynamic_partition_op_test . py " ] , <nl> + tags = [ <nl> + " multi_and_single_gpu " , <nl> + ] , <nl> tfrt_enabled = True , <nl> deps = [ <nl> " / / tensorflow / python : array_ops " , <nl> mmm a / tensorflow / python / kernel_tests / array_ops_test . py <nl> ppp b / tensorflow / python / kernel_tests / array_ops_test . py <nl> def testTypeErrorResource ( self ) : <nl> with self . assertRaises ( ValueError ) : <nl> sess . run ( v [ : ] . assign ( too_small_val ) ) <nl> <nl> + @ test_util . disable_xla ( " b / 123559667 " ) <nl> @ test_util . run_in_graph_and_eager_modes <nl> def testTensorStridedSliceUpdateWithInputForward ( self ) : <nl> " " " Tests tensor_strided_slice_update with input - forwarding taking effect . " " " <nl> def assign ( x ) : <nl> return gen_array_ops . tensor_strided_slice_update ( y , [ 0 ] , [ 1 ] , [ 1 ] , [ 0 ] ) <nl> self . assertAllEqual ( [ 0 , 1 ] , self . evaluate ( assign ( array_ops . zeros ( [ 2 ] ) ) ) ) <nl> <nl> + @ test_util . disable_xla ( " b / 123559667 " ) <nl> @ test_util . run_in_graph_and_eager_modes <nl> def testTensorStridedSliceUpdateNoInputForward ( self ) : <nl> " " " Tests tensor_strided_slice_update with no input - forwarding . " " " <nl> def testTensorStridedSliceUpdateNoInputForward ( self ) : <nl> ans = y + z <nl> self . assertAllClose ( [ 1 . 6 , 2 . 6 ] , self . evaluate ( ans ) ) <nl> <nl> + @ test_util . disable_xla ( " b / 123559667 " ) <nl> def testTensorStridedSliceUpdateGradSimple ( self ) : <nl> original = constant_op . constant ( [ 0 . 2 , 0 . 3 ] ) <nl> updates = constant_op . constant ( [ 0 . 4 ] ) <nl> def testTensorStridedSliceUpdateGradSimple ( self ) : <nl> ( [ 4 ] , [ 5 ] , [ 3 ] , [ 1 ] , [ 3 ] , 1 , 0 , 0 , 0 , 0 ) , <nl> ( [ 2 , 2 , 3 , 2 ] , [ 0 , 0 , 1 ] , [ 1 , 0 , 2 ] , [ 1 , 0 , 1 ] , [ 2 , 3 ] , 0 , 0 , 2 , 0 , 5 ) <nl> ] ) ) <nl> + @ test_util . disable_xla ( " b / 123559667 " ) <nl> def testTensorStridedSliceUpdateGrad ( <nl> self , shape , begin , end , strides , updates_shape , * args ) : <nl> with self . cached_session ( ) : <nl> mmm a / tensorflow / python / kernel_tests / conv_ops_test . py <nl> ppp b / tensorflow / python / kernel_tests / conv_ops_test . py <nl> def _VerifyDilatedConvValues ( self , tensor_in_sizes , filter_in_sizes , strides , <nl> data_format , use_gpu ) <nl> expected_results . append ( expected ) <nl> computed_results . append ( computed ) <nl> - tolerance = 1e - 2 if use_gpu else 1e - 5 <nl> - expected_values = self . evaluate ( expected_results ) <nl> - computed_values = self . evaluate ( computed_results ) <nl> - for e_value , c_value in zip ( expected_values , computed_values ) : <nl> - tf_logging . debug ( " expected = % s " , e_value ) <nl> - tf_logging . debug ( " actual = % s " , c_value ) <nl> - self . assertAllClose ( <nl> - e_value . flatten ( ) , c_value . flatten ( ) , atol = tolerance , rtol = rtol ) <nl> + tolerance = 1e - 2 if use_gpu else 1e - 5 <nl> + expected_values = self . evaluate ( expected_results ) <nl> + computed_values = self . evaluate ( computed_results ) <nl> + for e_value , c_value in zip ( expected_values , computed_values ) : <nl> + tf_logging . debug ( " expected = % s " , e_value ) <nl> + tf_logging . debug ( " actual = % s " , c_value ) <nl> + self . assertAllClose ( <nl> + e_value . flatten ( ) , c_value . flatten ( ) , atol = tolerance , rtol = rtol ) <nl> <nl> def _VerifyValues ( self , <nl> tensor_in_sizes , <nl> mmm a / tensorflow / python / kernel_tests / dynamic_partition_op_test . py <nl> ppp b / tensorflow / python / kernel_tests / dynamic_partition_op_test . py <nl> <nl> import numpy as np <nl> from six . moves import xrange # pylint : disable = redefined - builtin <nl> <nl> + from tensorflow . python . framework import config <nl> from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> + from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import data_flow_ops <nl> def testCUBBug ( self ) : <nl> res = self . evaluate ( partitioned ) <nl> self . assertEqual ( res [ - 1 ] . shape [ 0 ] , 192 ) <nl> <nl> + # see https : / / github . com / tensorflow / tensorflow / issues / 42500 <nl> + def testMultiGPU ( self ) : <nl> + device_list = config . list_logical_devices ( " GPU " ) <nl> + results = [ ] <nl> + for device in device_list : <nl> + with ops . device ( device . name ) : <nl> + data = constant_op . constant ( np . zeros ( ( 1000 , ) ) ) <nl> + partitions = constant_op . constant ( np . arange ( 1000 , dtype = np . int32 ) % 10 ) <nl> + result = data_flow_ops . dynamic_partition ( data , partitions , 10 ) <nl> + results . append ( self . evaluate ( result ) ) <nl> + if device_list : <nl> + self . assertAllEqual ( results , np . zeros ( ( len ( device_list ) , 10 , 100 ) ) ) <nl> + <nl> <nl> if __name__ = = " __main__ " : <nl> test . main ( ) <nl> mmm a / tensorflow / python / profiler / internal / profiler_wrapper . cc <nl> ppp b / tensorflow / python / profiler / internal / profiler_wrapper . cc <nl> limitations under the License . <nl> # include < memory > <nl> <nl> # include " absl / memory / memory . h " <nl> + # include " absl / strings / match . h " <nl> # include " absl / strings / numbers . h " <nl> # include " pybind11 / pybind11 . h " <nl> # include " pybind11 / pytypes . h " <nl> tensorflow : : Status ValidateHostPortPair ( const std : : string & host_port ) { <nl> / / Must be host : port , port must be a number , host must not contain a ' / ' , <nl> / / host also must not be empty . <nl> if ( parts . size ( ) ! = 2 | | ! absl : : SimpleAtoi ( parts [ 1 ] , & port ) | | <nl> - parts [ 0 ] . find ( " / " ) ! = std : : string : : npos | | parts [ 0 ] . empty ( ) ) { <nl> + absl : : StrContains ( parts [ 0 ] , " / " ) | | parts [ 0 ] . empty ( ) ) { <nl> return tensorflow : : errors : : InvalidArgument ( <nl> " Could not interpret \ " " , host_port , " \ " as a host - port pair . " ) ; <nl> } <nl> PYBIND11_MODULE ( _pywrap_profiler , m ) { <nl> . def ( " export_to_tb " , & ProfilerSessionWrapper : : ExportToTensorBoard ) ; <nl> <nl> m . def ( " start_server " , [ ] ( int port ) { <nl> - auto profiler_server = absl : : make_unique < tensorflow : : ProfilerServer > ( ) ; <nl> + auto profiler_server = <nl> + absl : : make_unique < tensorflow : : profiler : : ProfilerServer > ( ) ; <nl> profiler_server - > StartProfilerServer ( port ) ; <nl> / / Intentionally release profiler server . Should transfer ownership to <nl> / / caller instead . <nl> mmm a / tensorflow / python / saved_model / BUILD <nl> ppp b / tensorflow / python / saved_model / BUILD <nl> tf_py_test ( <nl> " / / tensorflow / python : framework_test_lib " , <nl> " / / tensorflow / python : lib " , <nl> " / / tensorflow / python : math_ops " , <nl> - " / / tensorflow / python : saver_test_utils " , <nl> " / / tensorflow / python : session " , <nl> " / / tensorflow / python : state_ops " , <nl> " / / tensorflow / python : test_ops " , <nl> " / / tensorflow / python : training " , <nl> " / / tensorflow / python : util " , <nl> " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / training : saver_test_utils " , <nl> ] , <nl> ) <nl> <nl> py_strict_library ( <nl> " / / tensorflow / python : platform " , <nl> " / / tensorflow / python : saver " , <nl> " / / tensorflow / python : sparse_tensor " , <nl> - " / / tensorflow / python : training_lib " , <nl> " / / tensorflow / python / eager : context " , <nl> " / / tensorflow / python / eager : lift_to_graph " , <nl> " / / tensorflow / python / eager : wrap_function " , <nl> + " / / tensorflow / python / training : monitored_session " , <nl> " / / tensorflow / python / training / tracking " , <nl> ] , <nl> ) <nl> new file mode 100644 <nl> index 0000000000000 . . 0e864b176d66c <nl> mmm / dev / null <nl> ppp b / tensorflow / python / training / BUILD <nl> <nl> + load ( " / / tensorflow / core / platform : build_config . bzl " , " tf_proto_library " ) <nl> + load ( " / / tensorflow : tensorflow . bzl " , " tf_py_test " ) <nl> + <nl> + # buildifier : disable = same - origin - load <nl> + load ( " / / tensorflow : tensorflow . bzl " , " cuda_py_test " ) <nl> + <nl> + # buildifier : disable = same - origin - load <nl> + load ( " / / tensorflow : tensorflow . bzl " , " cuda_py_tests " ) <nl> + <nl> + package ( <nl> + default_visibility = [ " / / tensorflow : internal " ] , <nl> + licenses = [ " notice " ] , # Apache 2 . 0 <nl> + ) <nl> + <nl> + exports_files ( <nl> + # Used in a pybind extension whose rule must be in tensorflow / python <nl> + [ " quantize_training_wrapper . cc " ] , <nl> + visibility = [ " / / tensorflow / python : __pkg__ " ] , <nl> + ) <nl> + <nl> + exports_files ( <nl> + # Used in a rule which visibility limits to tensorflow / python <nl> + [ " learning_rate_decay . py " ] , <nl> + visibility = [ " / / tensorflow / python : __pkg__ " ] , <nl> + ) <nl> + <nl> + # Files which have their own BUILD rules , but which for compatibility with <nl> + # strict dep checking need to be direct dependencies of training_lib . Do not add <nl> + # any new files to this list . <nl> + filegroup ( <nl> + name = " deprecated_inclusions_in_training_lib " , <nl> + srcs = [ <nl> + " adadelta . py " , <nl> + " adagrad . py " , <nl> + " adagrad_da . py " , <nl> + " adam . py " , <nl> + " basic_loops . py " , <nl> + " checkpoint_ops . py " , <nl> + " checkpoint_utils . py " , <nl> + " coordinator . py " , <nl> + " device_setter . py " , <nl> + " evaluation . py " , <nl> + " ftrl . py " , <nl> + " gradient_descent . py " , <nl> + " input . py " , <nl> + " learning_rate_decay . py " , <nl> + " momentum . py " , <nl> + " monitored_session . py " , <nl> + " moving_averages . py " , <nl> + " optimizer . py " , <nl> + " proximal_adagrad . py " , <nl> + " proximal_gradient_descent . py " , <nl> + " py_checkpoint_reader . py " , <nl> + " quantize_training . py " , <nl> + " queue_runner . py " , <nl> + " queue_runner_impl . py " , <nl> + " rmsprop . py " , <nl> + " server_lib . py " , <nl> + " session_manager . py " , <nl> + " slot_creator . py " , <nl> + " summary_io . py " , <nl> + " supervisor . py " , <nl> + " sync_replicas_optimizer . py " , <nl> + " tensorboard_logging . py " , <nl> + " training . py " , <nl> + " training_ops . py " , <nl> + " warm_starting_util . py " , <nl> + ] , <nl> + visibility = [ " / / tensorflow / python / training : __pkg__ " ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " training_lib " , <nl> + srcs = [ <nl> + " __init__ . py " , <nl> + " training . py " , <nl> + " : deprecated_inclusions_in_training_lib " , <nl> + ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : adadelta " , <nl> + " : adagrad " , <nl> + " : adagrad_da " , <nl> + " : adam " , <nl> + " : basic_loops " , <nl> + " : basic_session_run_hooks " , <nl> + " : checkpoint_management " , <nl> + " : checkpoint_utils " , <nl> + " : coordinator " , <nl> + " : device_setter " , <nl> + " : ftrl " , <nl> + " : gradient_descent " , <nl> + " : input " , <nl> + " : momentum " , <nl> + " : monitored_session " , <nl> + " : moving_averages " , <nl> + " : optimizer " , <nl> + " : proximal_adagrad " , <nl> + " : proximal_gradient_descent " , <nl> + " : py_checkpoint_reader " , <nl> + " : quantize_training " , <nl> + " : queue_runner " , <nl> + " : rmsprop " , <nl> + " : saver " , <nl> + " : server_lib " , <nl> + " : session_manager " , <nl> + " : session_run_hook " , <nl> + " : summary_io " , <nl> + " : supervisor " , <nl> + " : sync_replicas_optimizer " , <nl> + " : tensorboard_logging " , <nl> + " : training_util " , <nl> + " : warm_starting_util " , <nl> + " / / tensorflow / python : learning_rate_decay " , <nl> + " / / tensorflow / python : sdca_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python / training / experimental : loss_scale_optimizer " , <nl> + " / / tensorflow / python / training / experimental : mixed_precision " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " training " , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : training_lib " , <nl> + " / / tensorflow / python / training / tracking : base " , <nl> + " / / tensorflow / python / training / tracking : python_state " , <nl> + " / / tensorflow / python / training / tracking : util " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " adadelta " , <nl> + srcs = [ " adadelta . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : optimizer " , <nl> + " : training_ops " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " adagrad_da " , <nl> + srcs = [ " adagrad_da . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : optimizer " , <nl> + " : training_ops " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " adagrad " , <nl> + srcs = [ " adagrad . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : optimizer " , <nl> + " : training_ops " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : array_ops_gen " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : init_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " adam " , <nl> + srcs = [ " adam . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : optimizer " , <nl> + " : training_ops " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : resource_variable_ops " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python / eager : context " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " basic_loops " , <nl> + srcs = [ " basic_loops . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : tf_export " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " checkpoint_ops " , <nl> + srcs = [ " checkpoint_ops . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : checkpoint_ops_gen " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : init_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " checkpoint_utils " , <nl> + srcs = [ " checkpoint_utils . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : checkpoint_management " , <nl> + " : py_checkpoint_reader " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : io_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : resource_variable_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / distribute : distribute_lib " , <nl> + " / / tensorflow / python / training / saving : saveable_object_util " , <nl> + " @ six_archive / / : six " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " coordinator " , <nl> + srcs = [ " coordinator . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : util " , <nl> + " @ six_archive / / : six " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " device_setter " , <nl> + srcs = [ " device_setter . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : server_lib " , <nl> + " / / tensorflow / python : device " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " @ six_archive / / : six " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " distribution_strategy_context " , <nl> + srcs = [ " distribution_strategy_context . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ " / / tensorflow / python / distribute : distribute_lib " ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " evaluation " , <nl> + srcs = [ " evaluation . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : basic_session_run_hooks " , <nl> + " : monitored_session " , <nl> + " : session_run_hook " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : init_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " ftrl " , <nl> + srcs = [ " ftrl . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : optimizer " , <nl> + " : training_ops " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " gradient_descent " , <nl> + srcs = [ " gradient_descent . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : optimizer " , <nl> + " : training_ops " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : resource_variable_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " input " , <nl> + srcs = [ " input . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : queue_runner " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : data_flow_ops " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : io_ops " , <nl> + " / / tensorflow / python : layers_util " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : random_ops " , <nl> + " / / tensorflow / python : sparse_ops " , <nl> + " / / tensorflow / python : sparse_tensor " , <nl> + " / / tensorflow / python : summary " , <nl> + " / / tensorflow / python : tensor_shape " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python / eager : context " , <nl> + " @ six_archive / / : six " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " momentum " , <nl> + srcs = [ " momentum . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : optimizer " , <nl> + " : training_ops " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " moving_averages " , <nl> + srcs = [ " moving_averages . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : slot_creator " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : init_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / distribute : distribute_lib " , <nl> + " / / tensorflow / python / distribute : reduce_util " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " optimizer " , <nl> + srcs = [ " optimizer . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : slot_creator " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : gradients " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : resource_variable_ops " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / distribute : distribute_lib " , <nl> + " / / tensorflow / python / distribute : reduce_util " , <nl> + " / / tensorflow / python / eager : backprop " , <nl> + " / / tensorflow / python / eager : context " , <nl> + " / / tensorflow / python / training / tracking : base " , <nl> + " @ six_archive / / : six " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " proximal_adagrad " , <nl> + srcs = [ " proximal_adagrad . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : optimizer " , <nl> + " : training_ops " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " proximal_gradient_descent " , <nl> + srcs = [ " proximal_gradient_descent . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : optimizer " , <nl> + " : training_ops " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " quantize_training " , <nl> + srcs = [ " quantize_training . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : _pywrap_quantize_training " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : util " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " queue_runner_impl " , <nl> + srcs = [ " queue_runner_impl . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : session " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python / eager : context " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " queue_runner " , <nl> + srcs = [ " queue_runner . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ " : queue_runner_impl " ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " rmsprop " , <nl> + srcs = [ " rmsprop . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : optimizer " , <nl> + " : training_ops " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : init_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " session_manager " , <nl> + srcs = [ " session_manager . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : checkpoint_management " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : session " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python / distribute : distribute_lib " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " slot_creator " , <nl> + srcs = [ " slot_creator . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : init_ops " , <nl> + " / / tensorflow / python : resource_variable_ops " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / distribute : distribute_lib " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " summary_io " , <nl> + srcs = [ " summary_io . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : util " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " sync_replicas_optimizer " , <nl> + srcs = [ " sync_replicas_optimizer . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : optimizer " , <nl> + " : queue_runner " , <nl> + " : session_manager " , <nl> + " : session_run_hook " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : data_flow_ops " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / distribute : distribute_lib " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " tensorboard_logging " , <nl> + srcs = [ " tensorboard_logging . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : platform " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " training_ops " , <nl> + srcs = [ <nl> + " gen_training_ops . py " , <nl> + " training_ops . py " , <nl> + ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : training_ops_gen " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " warm_starting_util " , <nl> + srcs = [ " warm_starting_util . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : checkpoint_ops " , <nl> + " : checkpoint_utils " , <nl> + " : saver " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / training / saving : saveable_object_util " , <nl> + " @ six_archive / / : six " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " distribute " , <nl> + srcs = [ <nl> + " distribute . py " , <nl> + " distribution_strategy_context . py " , <nl> + ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python / distribute : distribute_lib " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " server_lib_test " , <nl> + size = " small " , <nl> + srcs = [ " server_lib_test . py " ] , <nl> + grpc_enabled = True , <nl> + python_version = " PY3 " , <nl> + tags = [ <nl> + " noasan " , # TODO ( b / 161236904 ) : flaky timeout in trying to start gRPC server <nl> + ] , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : data_flow_ops " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : training " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " server_lib_multiple_containers_test " , <nl> + size = " small " , <nl> + srcs = [ " server_lib_multiple_containers_test . py " ] , <nl> + grpc_enabled = True , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : data_flow_ops " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " server_lib_same_variables_clear_container_test " , <nl> + size = " small " , <nl> + srcs = [ " server_lib_same_variables_clear_container_test . py " ] , <nl> + grpc_enabled = True , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : data_flow_ops " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " server_lib_same_variables_clear_test " , <nl> + size = " small " , <nl> + srcs = [ " server_lib_same_variables_clear_test . py " ] , <nl> + grpc_enabled = True , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : data_flow_ops " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " server_lib_same_variables_no_clear_test " , <nl> + size = " small " , <nl> + srcs = [ " server_lib_same_variables_no_clear_test . py " ] , <nl> + grpc_enabled = True , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : data_flow_ops " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " server_lib_sparse_job_test " , <nl> + size = " small " , <nl> + srcs = [ " server_lib_sparse_job_test . py " ] , <nl> + grpc_enabled = True , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : data_flow_ops " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> + cuda_py_test ( <nl> + name = " localhost_cluster_performance_test " , <nl> + size = " medium " , <nl> + srcs = [ <nl> + " localhost_cluster_performance_test . py " , <nl> + ] , <nl> + grpc_enabled = True , <nl> + python_version = " PY3 " , <nl> + tags = [ <nl> + " no_oss " , # Test flaky due to port collisions . <nl> + " oss_serial " , <nl> + ] , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " : device_setter " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : distributed_framework_test_lib " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : partitioned_variables " , <nl> + " / / tensorflow / python : session " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " sync_replicas_optimizer_test " , <nl> + size = " medium " , <nl> + srcs = [ <nl> + " sync_replicas_optimizer_test . py " , <nl> + ] , <nl> + grpc_enabled = True , <nl> + python_version = " PY3 " , <nl> + tags = [ <nl> + " no_oss " , # Test flaky due to port collisions . <nl> + " notsan " , # data race due to b / 62910646 <nl> + " oss_serial " , <nl> + ] , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : variables " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " evaluation_test " , <nl> + size = " small " , <nl> + srcs = [ " evaluation_test . py " ] , <nl> + python_version = " PY3 " , <nl> + shard_count = 3 , <nl> + tags = [ <nl> + " manual " , <nl> + " notap " , # Disabling until b / 33000128 and b / 33040312 are fixed . <nl> + ] , <nl> + deps = [ <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : framework " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework_test_lib " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : metrics " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : summary " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / ops / losses " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " py_checkpoint_reader " , <nl> + srcs = [ " py_checkpoint_reader . py " ] , <nl> + deps = [ <nl> + " / / tensorflow / python : _pywrap_checkpoint_reader " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : util " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_proto_library ( <nl> + name = " checkpoint_state " , <nl> + srcs = [ " checkpoint_state . proto " ] , <nl> + cc_api_version = 2 , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " checkpoint_management " , <nl> + srcs = [ " checkpoint_management . py " ] , <nl> + deps = [ <nl> + " : training_util " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : lib " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python / eager : context " , <nl> + ] , <nl> + ) <nl> + <nl> + cuda_py_test ( <nl> + name = " checkpoint_management_test " , <nl> + size = " small " , <nl> + srcs = [ <nl> + " checkpoint_management_test . py " , <nl> + ] , <nl> + python_version = " PY3 " , <nl> + deps = [ <nl> + " : checkpoint_management " , <nl> + " : saver " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : framework_test_lib " , <nl> + " / / tensorflow / python : lib " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / eager : context " , <nl> + " / / tensorflow / python / training / tracking : util " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " saver " , <nl> + srcs = [ " saver . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : checkpoint_management " , <nl> + " : py_checkpoint_reader " , <nl> + " : training_util " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : device " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : io_ops " , <nl> + " / / tensorflow / python : io_ops_gen " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : session " , <nl> + " / / tensorflow / python : string_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / eager : context " , <nl> + " / / tensorflow / python / training / saving : saveable_object " , <nl> + " / / tensorflow / python / training / saving : saveable_object_util " , <nl> + " / / tensorflow / python / training / tracking : base " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " saver_test_utils " , <nl> + srcs = [ " saver_test_utils . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : saver " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : lookup_ops_gen " , <nl> + " / / tensorflow / python / eager : context " , <nl> + ] , <nl> + ) <nl> + <nl> + cuda_py_test ( <nl> + name = " saver_test " , <nl> + size = " medium " , <nl> + srcs = [ <nl> + " saver_test . py " , <nl> + ] , <nl> + python_version = " PY3 " , <nl> + tags = [ " multi_gpu " ] , <nl> + deps = [ <nl> + " : adam " , <nl> + " : checkpoint_management " , <nl> + " : gradient_descent " , <nl> + " : py_checkpoint_reader " , <nl> + " : queue_runner_impl " , <nl> + " : saver " , <nl> + " : saver_test_utils " , <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : data_flow_ops " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : framework_test_lib " , <nl> + " / / tensorflow / python : function " , <nl> + " / / tensorflow / python : gradients_impl " , <nl> + " / / tensorflow / python : lib " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : nn_grad " , <nl> + " / / tensorflow / python : nn_ops " , <nl> + " / / tensorflow / python : partitioned_variables " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : random_ops " , <nl> + " / / tensorflow / python : resource_variable_ops " , <nl> + " / / tensorflow / python : session " , <nl> + " / / tensorflow / python : sparse_ops " , <nl> + " / / tensorflow / python : summary " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / data / ops : dataset_ops " , <nl> + " / / tensorflow / python / data / ops : iterator_ops " , <nl> + " / / tensorflow / python / eager : context " , <nl> + " / / tensorflow / python / training / tracking : base " , <nl> + " / / third_party / py / numpy " , <nl> + " @ six_archive / / : six " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " saver_large_variable_test " , <nl> + size = " medium " , <nl> + srcs = [ " saver_large_variable_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tags = [ <nl> + " manual " , <nl> + " noasan " , # http : / / b / 30379628 <nl> + " notsan " , # http : / / b / 30379628 <nl> + ] , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : variables " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " saver_large_partitioned_variable_test " , <nl> + size = " medium " , <nl> + srcs = [ " saver_large_partitioned_variable_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tags = [ <nl> + " noasan " , # http : / / b / 30782289 <nl> + " notsan " , # http : / / b / 30782289 <nl> + ] , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : partitioned_variables " , <nl> + " / / tensorflow / python : variables " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " basic_session_run_hooks " , <nl> + srcs = [ " basic_session_run_hooks . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : session_run_hook " , <nl> + " : summary_io " , <nl> + " : training_util " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : init_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / third_party / py / numpy " , <nl> + " @ six_archive / / : six " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " session_run_hook " , <nl> + srcs = [ " session_run_hook . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ " / / tensorflow / python : tf_export " ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " supervisor " , <nl> + srcs = [ " supervisor . py " ] , <nl> + deps = [ <nl> + " : coordinator " , <nl> + " : saver " , <nl> + " : session_manager " , <nl> + " : training_util " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : lookup_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : summary " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / eager : context " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " supervisor_test " , <nl> + size = " small " , <nl> + srcs = [ " supervisor_test . py " ] , <nl> + grpc_enabled = True , <nl> + python_version = " PY3 " , <nl> + tags = [ " no_windows " ] , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " : checkpoint_management " , <nl> + " : saver " , <nl> + " : supervisor " , <nl> + " : training " , <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : io_ops " , <nl> + " / / tensorflow / python : parsing_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : summary " , <nl> + " / / tensorflow / python : variables " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " server_lib " , <nl> + srcs = [ " server_lib . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : pywrap_tf_session " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : util " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " training_util " , <nl> + srcs = [ " training_util . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : init_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : resource_variable_ops " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / eager : context " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " training_util_test " , <nl> + size = " small " , <nl> + srcs = [ " training_util_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " : training_util " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : framework " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : variables " , <nl> + ] , <nl> + ) <nl> + <nl> + cuda_py_test ( <nl> + name = " adam_test " , <nl> + size = " medium " , <nl> + srcs = [ " adam_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " : adam " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : framework_test_lib " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : resource_variable_ops " , <nl> + " / / tensorflow / python : session " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / eager : context " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> + cuda_py_test ( <nl> + name = " moving_averages_test " , <nl> + size = " small " , <nl> + srcs = [ <nl> + " moving_averages_test . py " , <nl> + ] , <nl> + python_version = " PY3 " , <nl> + tags = [ <nl> + " no_windows " , # b / 139083295 : bfloat16 tests fail on Windows <nl> + " notsan " , <nl> + ] , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " : moving_averages " , <nl> + " : saver " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : framework_test_lib " , <nl> + " / / tensorflow / python : state_ops_gen " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / eager : context " , <nl> + ] , <nl> + ) <nl> + <nl> + cuda_py_tests ( <nl> + name = " training_tests " , <nl> + size = " medium " , <nl> + srcs = [ <nl> + " adadelta_test . py " , <nl> + " adagrad_da_test . py " , <nl> + " adagrad_test . py " , <nl> + " basic_loops_test . py " , <nl> + " coordinator_test . py " , <nl> + " device_setter_test . py " , <nl> + " ftrl_test . py " , <nl> + " gradient_descent_test . py " , <nl> + " momentum_test . py " , <nl> + " optimizer_test . py " , <nl> + " proximal_adagrad_test . py " , <nl> + " proximal_gradient_descent_test . py " , <nl> + " quantize_training_test . py " , <nl> + " queue_runner_test . py " , <nl> + " rmsprop_test . py " , <nl> + " slot_creator_test . py " , <nl> + " tensorboard_logging_test . py " , <nl> + " training_ops_test . py " , <nl> + ] , <nl> + python_version = " PY3 " , <nl> + deps = [ <nl> + " : training " , <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : data_flow_ops " , <nl> + " / / tensorflow / python : data_flow_ops_gen " , <nl> + " / / tensorflow / python : embedding_ops " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework_test_lib " , <nl> + " / / tensorflow / python : gradients " , <nl> + " / / tensorflow / python : lookup_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : nn_grad " , <nl> + " / / tensorflow / python : nn_ops " , <nl> + " / / tensorflow / python : partitioned_variables " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : platform_test " , <nl> + " / / tensorflow / python : pywrap_tensorflow " , <nl> + " / / tensorflow / python : random_ops " , <nl> + " / / tensorflow / python : resource_variable_ops " , <nl> + " / / tensorflow / python : resources " , <nl> + " / / tensorflow / python : sparse_ops " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : state_ops_gen " , <nl> + " / / tensorflow / python : summary " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / third_party / py / numpy " , <nl> + " @ six_archive / / : six " , <nl> + ] , <nl> + ) <nl> + <nl> + cuda_py_test ( <nl> + name = " session_manager_test " , <nl> + size = " medium " , # TODO ( irving ) : Can this be made small ? <nl> + srcs = [ " session_manager_test . py " ] , <nl> + grpc_enabled = True , <nl> + main = " session_manager_test . py " , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " : checkpoint_management " , <nl> + " : saver " , <nl> + " : server_lib " , <nl> + " : session_manager " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : framework_test_lib " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : session " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " basic_session_run_hooks_test " , <nl> + size = " medium " , <nl> + srcs = [ " basic_session_run_hooks_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tags = [ <nl> + " no_pip " , # Relies on contrib <nl> + " no_windows " , <nl> + " notsan " , # intermittent races on a few percent of runs <nl> + ] , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : fake_summary_writer " , <nl> + " / / tensorflow / python : framework " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : nn_grad " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : summary " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " checkpoint_utils_test " , <nl> + size = " small " , <nl> + srcs = [ " checkpoint_utils_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tags = [ <nl> + " manual " , <nl> + " no_cuda_on_cpu_tap " , <nl> + " no_oss " , <nl> + " no_windows " , <nl> + " notap " , <nl> + ] , <nl> + deps = [ <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : io_ops " , <nl> + " / / tensorflow / python : partitioned_variables " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : resource_variable_ops " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " checkpoint_ops_test " , <nl> + size = " small " , <nl> + srcs = [ " checkpoint_ops_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " / / tensorflow / python : checkpoint_ops_gen " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : io_ops " , <nl> + " / / tensorflow / python : partitioned_variables " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : pywrap_tensorflow " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " warm_starting_util_test " , <nl> + size = " medium " , <nl> + srcs = [ " warm_starting_util_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : init_ops " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " monitored_session " , <nl> + srcs = [ " monitored_session . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : basic_session_run_hooks " , <nl> + " : coordinator " , <nl> + " : queue_runner " , <nl> + " : saver " , <nl> + " : session_manager " , <nl> + " : session_run_hook " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : lookup_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : resources " , <nl> + " / / tensorflow / python : summary " , <nl> + " / / tensorflow / python : tf_export " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / distribute : distribute_coordinator_context " , <nl> + " @ six_archive / / : six " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " monitored_session_test " , <nl> + size = " medium " , <nl> + srcs = [ " monitored_session_test . py " ] , <nl> + tags = [ <nl> + " no_pip " , <nl> + " notsan " , # b / 67945581 <nl> + ] , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " : checkpoint_management " , <nl> + " : monitored_session " , <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : resource_variable_ops " , <nl> + " / / tensorflow / python : saver " , <nl> + " / / tensorflow / python : session " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : summary " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / distribute : collective_all_reduce_strategy " , <nl> + " / / tensorflow / python / distribute : distribute_coordinator " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_py_test ( <nl> + name = " input_test " , <nl> + size = " medium " , <nl> + srcs = [ " input_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : errors " , <nl> + " / / tensorflow / python : framework " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> new file mode 100644 <nl> index 0000000000000 . . 4f897881ec6b3 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / training / experimental / BUILD <nl> <nl> + load ( " / / tensorflow : tensorflow . bzl " , " cuda_py_test " ) <nl> + <nl> + package ( <nl> + default_visibility = [ " / / tensorflow : internal " ] , <nl> + licenses = [ " notice " ] , # Apache 2 . 0 <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " loss_scale " , <nl> + srcs = [ " loss_scale . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : framework " , <nl> + " @ absl_py / / absl / testing : parameterized " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " loss_scale_optimizer " , <nl> + srcs = [ " loss_scale_optimizer . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : loss_scale " , <nl> + " / / tensorflow / python / distribute : distribute_lib " , <nl> + " @ absl_py / / absl / testing : parameterized " , <nl> + ] , <nl> + ) <nl> + <nl> + # The test currently requires visibility only granted to tensorflow / python : __pkg__ <nl> + exports_files ( <nl> + [ " loss_scale_optimizer_test . py " ] , <nl> + visibility = [ " / / tensorflow / python : __pkg__ " ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " loss_scale_test " , <nl> + size = " medium " , <nl> + srcs = [ " loss_scale_test . py " ] , <nl> + python_version = " PY3 " , <nl> + deps = [ <nl> + " : loss_scale " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python / distribute : mirrored_strategy " , <nl> + " / / tensorflow / python / distribute : one_device_strategy " , <nl> + " @ absl_py / / absl / testing : parameterized " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " mixed_precision_global_state " , <nl> + srcs = [ " mixed_precision_global_state . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " mixed_precision " , <nl> + srcs = [ " mixed_precision . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : loss_scale " , <nl> + " : loss_scale_optimizer " , <nl> + " : mixed_precision_global_state " , <nl> + " / / tensorflow / python : config " , <nl> + " / / tensorflow / python : util " , <nl> + ] , <nl> + ) <nl> + <nl> + cuda_py_test ( <nl> + name = " mixed_precision_test " , <nl> + size = " small " , <nl> + srcs = [ " mixed_precision_test . py " ] , <nl> + python_version = " PY3 " , <nl> + tfrt_enabled = True , <nl> + deps = [ <nl> + " : mixed_precision " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " @ absl_py / / absl / testing : parameterized " , <nl> + ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " loss_scaling_gradient_tape " , <nl> + srcs = [ " loss_scaling_gradient_tape . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : loss_scale " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : unconnected_gradients " , <nl> + " / / tensorflow / python : util " , <nl> + " / / tensorflow / python / distribute : distribute_lib " , <nl> + " / / tensorflow / python / eager : backprop " , <nl> + ] , <nl> + ) <nl> + <nl> + cuda_py_test ( <nl> + name = " loss_scaling_gradient_tape_test " , <nl> + size = " medium " , <nl> + srcs = [ " loss_scaling_gradient_tape_test . py " ] , <nl> + shard_count = 2 , <nl> + deps = [ <nl> + " : loss_scale " , <nl> + " : loss_scaling_gradient_tape " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : framework_test_combinations_lib " , <nl> + " / / tensorflow / python / compat : v2_compat " , <nl> + " / / tensorflow / python / distribute : mirrored_strategy " , <nl> + " / / tensorflow / python / eager : def_function " , <nl> + " / / third_party / py / numpy " , <nl> + " @ absl_py / / absl / testing : parameterized " , <nl> + ] , <nl> + ) <nl> new file mode 100644 <nl> index 0000000000000 . . 5590b5056f8c4 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / training / gen_training_ops . py <nl> <nl> + # Copyright 2015 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + " " " Python wrappers for training ops . " " " <nl> + # NOTE ( allenl ) : The generated op wrappers for training ops were originally in <nl> + # training / gen_training_ops . py . They moved to ops / gen_training_ops . py when <nl> + # training / became a module , and this is an alias to avoid breaking existing <nl> + # imports . <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + # go / tf - wildcard - import <nl> + # pylint : disable = wildcard - import <nl> + from tensorflow . python . ops . gen_training_ops import * <nl> + # pylint : enable = wildcard - import <nl> mmm a / tensorflow / python / training / training_ops . py <nl> ppp b / tensorflow / python / training / training_ops . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> - from tensorflow . python . training import gen_training_ops # pylint : disable = unused - import <nl> + from tensorflow . python . ops import gen_training_ops # pylint : disable = unused - import <nl> # go / tf - wildcard - import <nl> # pylint : disable = wildcard - import <nl> - from tensorflow . python . training . gen_training_ops import * <nl> + from tensorflow . python . ops . gen_training_ops import * <nl> # pylint : enable = wildcard - import <nl> mmm a / tensorflow / tools / def_file_filter / symbols_pybind . txt <nl> ppp b / tensorflow / tools / def_file_filter / symbols_pybind . txt <nl> tensorflow : : ProfilerSession : : Status <nl> tensorflow : : ProfilerSession : : ~ ProfilerSession <nl> <nl> [ profiler_server_impl ] # profiler <nl> - tensorflow : : ProfilerServer : : StartProfilerServer <nl> - tensorflow : : ProfilerServer : : ~ ProfilerServer <nl> + tensorflow : : profiler : : ProfilerServer : : StartProfilerServer <nl> + tensorflow : : profiler : : ProfilerServer : : ~ ProfilerServer <nl> <nl> [ profiler_client_impl ] # profiler <nl> tensorflow : : profiler : : ProfileGrpc <nl> mmm a / tensorflow / tools / pip_package / pip_smoke_test . py <nl> ppp b / tensorflow / tools / pip_package / pip_smoke_test . py <nl> def BuildPyTestDependencies ( ) : <nl> " / / tensorflow / python : tf_optimizer " , <nl> " / / tensorflow / python : compare_test_proto_py " , <nl> " / / tensorflow / core : image_testdata " , <nl> - " / / tensorflow / core : lmdb_testdata " , <nl> + " / / tensorflow / core / lib / lmdb : lmdb_testdata " , <nl> " / / tensorflow / core / kernels / cloud : bigquery_reader_ops " , <nl> " / / tensorflow / python / debug : grpc_tensorflow_server . par " , <nl> " / / tensorflow / python / feature_column : vocabulary_testdata " , <nl> mmm a / tensorflow / tools / pip_package / setup . py <nl> ppp b / tensorflow / tools / pip_package / setup . py <nl> def find_files ( pattern , root ) : <nl> version = _VERSION . replace ( ' - ' , ' ' ) , <nl> description = DOCLINES [ 0 ] , <nl> long_description = ' \ n ' . join ( DOCLINES [ 2 : ] ) , <nl> + long_description_content_type = " text / markdown " , <nl> url = ' https : / / www . tensorflow . org / ' , <nl> download_url = ' https : / / github . com / tensorflow / tensorflow / tags ' , <nl> author = ' Google Inc . ' , <nl> mmm a / tensorflow / workspace . bzl <nl> ppp b / tensorflow / workspace . bzl <nl> def tf_repositories ( path_prefix = " " , tf_repo_name = " " ) : <nl> ) <nl> <nl> # Check out LLVM and MLIR from llvm - project . <nl> - LLVM_COMMIT = " 85dacca29f8280fe72ee00adf3678ba06a9e2348 " <nl> - LLVM_SHA256 = " e10aeac0b9d37d195fba2d435f9824eed0de3177c312c4c33142eb0843534f68 " <nl> + LLVM_COMMIT = " 1d01fc100bb5bef5f5eaf92520b2e52f64ee1d6e " <nl> + LLVM_SHA256 = " a8a2503b98945e91e55df114a7b3739c88c900cf14839fba2221fd1f8cfc1d5a " <nl> LLVM_URLS = [ <nl> " https : / / storage . googleapis . com / mirror . tensorflow . org / github . com / llvm / llvm - project / archive / { commit } . tar . gz " . format ( commit = LLVM_COMMIT ) , <nl> " https : / / github . com / llvm / llvm - project / archive / { commit } . tar . gz " . format ( commit = LLVM_COMMIT ) , <nl> mmm a / third_party / llvm / llvm . autogenerated . BUILD <nl> ppp b / third_party / llvm / llvm . autogenerated . BUILD <nl> cc_library ( <nl> " : IPO " , <nl> " : InstCombine " , <nl> " : Instrumentation " , <nl> + " : ObjCARC " , <nl> " : Scalar " , <nl> " : Support " , <nl> " : Target " , <nl> | Merge remote - tracking branch ' upstream / master ' into fix - 42648 | tensorflow/tensorflow | 71e00fff8251d0f34e4fc5fe2421b77f74afaa4d | 2020-08-31T17:01:34Z |
mmm a / js / common / bootstrap / monkeypatches . js <nl> ppp b / js / common / bootstrap / monkeypatches . js <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> Object . defineProperty ( Object . prototype , ' _shallowCopy ' , { <nl> - set ( value ) { <nl> - if ( this = = = Object . prototype ) { <nl> - throw new TypeError ( ` Can not override Object . prototype . _shallowCopy ! ` ) ; <nl> - } <nl> - Object . defineProperty ( this , ' _shallowCopy ' , { <nl> - configurable : true , <nl> - enumerable : true , <nl> - writable : true , <nl> - value <nl> - } ) ; <nl> - } , <nl> get ( ) { <nl> var self = this ; <nl> return Object . prototype . propertyKeys . reduce ( function ( previous , key ) { <nl> Object . defineProperty ( Object . prototype , ' propertyKeys ' , { <nl> return Object . keys ( this ) . filter ( function ( key ) { <nl> return ( key . charAt ( 0 ) ! = = ' _ ' & & key . charAt ( 0 ) ! = = ' $ ' ) ; <nl> } ) ; <nl> - } , <nl> - set ( value ) { <nl> - if ( this = = = Object . prototype ) { <nl> - throw new TypeError ( ` Can not override Object . prototype . propertyKeys ! ` ) ; <nl> - } <nl> - Object . defineProperty ( this , ' propertyKeys ' , { <nl> - configurable : true , <nl> - enumerable : true , <nl> - writable : true , <nl> - value <nl> - } ) ; <nl> - } , <nl> + } <nl> } ) ; <nl> <nl> <nl> | Unbreak shallowCopy | arangodb/arangodb | 4af0c67c5cffc874655092ce86776f8ba5f2c606 | 2016-05-02T15:19:01Z |
mmm a / scene / gui / control . cpp <nl> ppp b / scene / gui / control . cpp <nl> bool Control : : _set ( const StringName & p_name , const Variant & p_value ) { <nl> <nl> if ( name . begins_with ( " custom_icons / " ) ) { <nl> String dname = name . get_slicec ( ' / ' , 1 ) ; <nl> + if ( data . icon_override . has ( dname ) ) { <nl> + data . icon_override [ dname ] - > disconnect ( " changed " , this , " _override_changed " ) ; <nl> + } <nl> data . icon_override . erase ( dname ) ; <nl> notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - update ( ) ; <nl> } else if ( name . begins_with ( " custom_shaders / " ) ) { <nl> String dname = name . get_slicec ( ' / ' , 1 ) ; <nl> + if ( data . shader_override . has ( dname ) ) { <nl> + data . shader_override [ dname ] - > disconnect ( " changed " , this , " _override_changed " ) ; <nl> + } <nl> data . shader_override . erase ( dname ) ; <nl> notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - update ( ) ; <nl> } else if ( name . begins_with ( " custom_styles / " ) ) { <nl> String dname = name . get_slicec ( ' / ' , 1 ) ; <nl> + if ( data . style_override . has ( dname ) ) { <nl> + data . style_override [ dname ] - > disconnect ( " changed " , this , " _override_changed " ) ; <nl> + } <nl> data . style_override . erase ( dname ) ; <nl> notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - update ( ) ; <nl> } else if ( name . begins_with ( " custom_fonts / " ) ) { <nl> String dname = name . get_slicec ( ' / ' , 1 ) ; <nl> if ( data . font_override . has ( dname ) ) { <nl> - _unref_font ( data . font_override [ dname ] ) ; <nl> + data . font_override [ dname ] - > disconnect ( " changed " , this , " _override_changed " ) ; <nl> } <nl> data . font_override . erase ( dname ) ; <nl> notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - update ( ) ; <nl> } else if ( name . begins_with ( " custom_colors / " ) ) { <nl> String dname = name . get_slicec ( ' / ' , 1 ) ; <nl> data . color_override . erase ( dname ) ; <nl> notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - update ( ) ; <nl> } else if ( name . begins_with ( " custom_constants / " ) ) { <nl> String dname = name . get_slicec ( ' / ' , 1 ) ; <nl> data . constant_override . erase ( dname ) ; <nl> notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - update ( ) ; <nl> } else <nl> return false ; <nl> <nl> } else { <nl> if ( name . begins_with ( " custom_icons / " ) ) { <nl> String dname = name . get_slicec ( ' / ' , 1 ) ; <nl> - notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> add_icon_override ( dname , p_value ) ; <nl> } else if ( name . begins_with ( " custom_shaders / " ) ) { <nl> String dname = name . get_slicec ( ' / ' , 1 ) ; <nl> add_shader_override ( dname , p_value ) ; <nl> - notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> } else if ( name . begins_with ( " custom_styles / " ) ) { <nl> String dname = name . get_slicec ( ' / ' , 1 ) ; <nl> add_style_override ( dname , p_value ) ; <nl> - notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> } else if ( name . begins_with ( " custom_fonts / " ) ) { <nl> String dname = name . get_slicec ( ' / ' , 1 ) ; <nl> add_font_override ( dname , p_value ) ; <nl> - notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> } else if ( name . begins_with ( " custom_colors / " ) ) { <nl> String dname = name . get_slicec ( ' / ' , 1 ) ; <nl> add_color_override ( dname , p_value ) ; <nl> - notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> } else if ( name . begins_with ( " custom_constants / " ) ) { <nl> String dname = name . get_slicec ( ' / ' , 1 ) ; <nl> add_constant_override ( dname , p_value ) ; <nl> - notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> } else <nl> return false ; <nl> } <nl> Rect2 Control : : get_anchorable_rect ( ) const { <nl> void Control : : add_icon_override ( const StringName & p_name , const Ref < Texture > & p_icon ) { <nl> <nl> ERR_FAIL_COND ( p_icon . is_null ( ) ) ; <nl> + if ( data . icon_override . has ( p_name ) ) { <nl> + data . icon_override [ p_name ] - > disconnect ( " changed " , this , " _override_changed " ) ; <nl> + } <nl> data . icon_override [ p_name ] = p_icon ; <nl> + if ( data . icon_override [ p_name ] . is_valid ( ) ) { <nl> + data . icon_override [ p_name ] - > connect ( " changed " , this , " _override_changed " , Vector < Variant > ( ) , CONNECT_REFERENCE_COUNTED ) ; <nl> + } <nl> notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - update ( ) ; <nl> } <nl> <nl> void Control : : add_shader_override ( const StringName & p_name , const Ref < Shader > & p_shader ) { <nl> ERR_FAIL_COND ( p_shader . is_null ( ) ) ; <nl> + if ( data . shader_override . has ( p_name ) ) { <nl> + data . shader_override [ p_name ] - > disconnect ( " changed " , this , " _override_changed " ) ; <nl> + } <nl> data . shader_override [ p_name ] = p_shader ; <nl> + if ( data . shader_override [ p_name ] . is_valid ( ) ) { <nl> + data . shader_override [ p_name ] - > connect ( " changed " , this , " _override_changed " , Vector < Variant > ( ) , CONNECT_REFERENCE_COUNTED ) ; <nl> + } <nl> notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - update ( ) ; <nl> } <nl> void Control : : add_style_override ( const StringName & p_name , const Ref < StyleBox > & p_style ) { <nl> <nl> ERR_FAIL_COND ( p_style . is_null ( ) ) ; <nl> + if ( data . style_override . has ( p_name ) ) { <nl> + data . style_override [ p_name ] - > disconnect ( " changed " , this , " _override_changed " ) ; <nl> + } <nl> data . style_override [ p_name ] = p_style ; <nl> + if ( data . style_override [ p_name ] . is_valid ( ) ) { <nl> + data . style_override [ p_name ] - > connect ( " changed " , this , " _override_changed " , Vector < Variant > ( ) , CONNECT_REFERENCE_COUNTED ) ; <nl> + } <nl> + <nl> notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - update ( ) ; <nl> } <nl> <nl> void Control : : add_font_override ( const StringName & p_name , const Ref < Font > & p_font ) { <nl> <nl> ERR_FAIL_COND ( p_font . is_null ( ) ) ; <nl> if ( data . font_override . has ( p_name ) ) { <nl> - _unref_font ( data . font_override [ p_name ] ) ; <nl> + data . font_override [ p_name ] - > disconnect ( " changed " , this , " _override_changed " ) ; <nl> } <nl> data . font_override [ p_name ] = p_font ; <nl> - <nl> - if ( p_font . is_valid ( ) ) { <nl> - _ref_font ( p_font ) ; <nl> + if ( data . font_override [ p_name ] . is_valid ( ) ) { <nl> + data . font_override [ p_name ] - > connect ( " changed " , this , " _override_changed " , Vector < Variant > ( ) , CONNECT_REFERENCE_COUNTED ) ; <nl> } <nl> <nl> notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - update ( ) ; <nl> } <nl> void Control : : add_color_override ( const StringName & p_name , const Color & p_color ) { <nl> <nl> data . color_override [ p_name ] = p_color ; <nl> notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - update ( ) ; <nl> } <nl> void Control : : add_constant_override ( const StringName & p_name , int p_constant ) { <nl> <nl> data . constant_override [ p_name ] = p_constant ; <nl> notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - update ( ) ; <nl> } <nl> <nl> void Control : : set_focus_mode ( FocusMode p_focus_mode ) { <nl> void Control : : _propagate_theme_changed ( CanvasItem * p_at , Control * p_owner , bool <nl> c - > data . theme_owner = p_owner ; <nl> } <nl> c - > notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - c - > update ( ) ; <nl> } <nl> } <nl> <nl> float Control : : get_rotation_degrees ( ) const { <nl> return Math : : rad2deg ( get_rotation ( ) ) ; <nl> } <nl> <nl> - / / needed to update the control if the font changes . . <nl> - void Control : : _ref_font ( Ref < Font > p_sc ) { <nl> + void Control : : _override_changed ( ) { <nl> <nl> - if ( ! data . font_refcount . has ( p_sc ) ) { <nl> - data . font_refcount [ p_sc ] = 1 ; <nl> - p_sc - > connect ( " changed " , this , " _font_changed " ) ; <nl> - } else { <nl> - data . font_refcount [ p_sc ] + = 1 ; <nl> - } <nl> - } <nl> - <nl> - void Control : : _unref_font ( Ref < Font > p_sc ) { <nl> - <nl> - ERR_FAIL_COND ( ! data . font_refcount . has ( p_sc ) ) ; <nl> - data . font_refcount [ p_sc ] - - ; <nl> - if ( data . font_refcount [ p_sc ] = = 0 ) { <nl> - p_sc - > disconnect ( " changed " , this , " _font_changed " ) ; <nl> - data . font_refcount . erase ( p_sc ) ; <nl> - } <nl> - } <nl> - <nl> - void Control : : _font_changed ( ) { <nl> - <nl> - update ( ) ; <nl> notification ( NOTIFICATION_THEME_CHANGED ) ; <nl> - minimum_size_changed ( ) ; / / fonts affect minimum size pretty much almost always <nl> + minimum_size_changed ( ) ; / / overrides are likely to affect minimum size <nl> } <nl> <nl> void Control : : set_pivot_offset ( const Vector2 & p_pivot ) { <nl> void Control : : _bind_methods ( ) { <nl> <nl> ClassDB : : bind_method ( D_METHOD ( " _theme_changed " ) , & Control : : _theme_changed ) ; <nl> <nl> - ClassDB : : bind_method ( D_METHOD ( " _font_changed " ) , & Control : : _font_changed ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " _override_changed " ) , & Control : : _override_changed ) ; <nl> <nl> BIND_VMETHOD ( MethodInfo ( " _gui_input " , PropertyInfo ( Variant : : OBJECT , " event " , PROPERTY_HINT_RESOURCE_TYPE , " InputEvent " ) ) ) ; <nl> BIND_VMETHOD ( MethodInfo ( Variant : : VECTOR2 , " _get_minimum_size " ) ) ; <nl> mmm a / scene / gui / control . h <nl> ppp b / scene / gui / control . h <nl> class Control : public CanvasItem { <nl> HashMap < StringName , Ref < Font > > font_override ; <nl> HashMap < StringName , Color > color_override ; <nl> HashMap < StringName , int > constant_override ; <nl> - Map < Ref < Font > , int > font_refcount ; <nl> <nl> } data ; <nl> <nl> class Control : public CanvasItem { <nl> void _size_changed ( ) ; <nl> String _get_tooltip ( ) const ; <nl> <nl> - void _ref_font ( Ref < Font > p_sc ) ; <nl> - void _unref_font ( Ref < Font > p_sc ) ; <nl> - void _font_changed ( ) ; <nl> + void _override_changed ( ) ; <nl> <nl> void _update_canvas_item_transform ( ) ; <nl> <nl> mmm a / scene / resources / style_box . cpp <nl> ppp b / scene / resources / style_box . cpp <nl> int StyleBoxLine : : get_thickness ( ) const { <nl> <nl> void StyleBoxLine : : set_vertical ( bool p_vertical ) { <nl> vertical = p_vertical ; <nl> + emit_changed ( ) ; <nl> } <nl> bool StyleBoxLine : : is_vertical ( ) const { <nl> return vertical ; <nl> | Merge pull request from bojidar - bg / 25904 - update - stylebox - change | godotengine/godot | 4a9c0ed8d9ca42e198cb8f8fdf3a60c08bad4fef | 2019-02-25T16:23:15Z |
mmm a / Marlin / src / feature / I2CPositionEncoder . cpp <nl> ppp b / Marlin / src / feature / I2CPositionEncoder . cpp <nl> bool I2CPositionEncoder : : test_axis ( ) { <nl> <nl> stepper . synchronize ( ) ; <nl> <nl> - planner . buffer_line ( startCoord [ X_AXIS ] , startCoord [ Y_AXIS ] , startCoord [ Z_AXIS ] , <nl> + planner . buffer_line ( startCoord [ X_AXIS ] , startCoord [ Y_AXIS ] , startCoord [ Z_AXIS ] , <nl> stepper . get_axis_position_mm ( E_AXIS ) , feedrate , 0 ) ; <nl> stepper . synchronize ( ) ; <nl> <nl> void I2CPositionEncoder : : calibrate_steps_mm ( const uint8_t iter ) { <nl> startCoord [ encoderAxis ] = startDistance ; <nl> endCoord [ encoderAxis ] = endDistance ; <nl> <nl> - LOOP_L_N ( i , iter ) { <nl> - stepper . synchronize ( ) ; <nl> + stepper . synchronize ( ) ; <nl> <nl> - planner . buffer_line ( startCoord [ X_AXIS ] , startCoord [ Y_AXIS ] , startCoord [ Z_AXIS ] , <nl> + LOOP_L_N ( i , iter ) { <nl> + planner . buffer_line ( startCoord [ X_AXIS ] , startCoord [ Y_AXIS ] , startCoord [ Z_AXIS ] , <nl> stepper . get_axis_position_mm ( E_AXIS ) , feedrate , 0 ) ; <nl> stepper . synchronize ( ) ; <nl> <nl> void I2CPositionEncoder : : calibrate_steps_mm ( const uint8_t iter ) { <nl> <nl> / / do_blocking_move_to ( endCoord [ X_AXIS ] , endCoord [ Y_AXIS ] , endCoord [ Z_AXIS ] ) ; <nl> <nl> - planner . buffer_line ( endCoord [ X_AXIS ] , endCoord [ Y_AXIS ] , endCoord [ Z_AXIS ] , <nl> + planner . buffer_line ( endCoord [ X_AXIS ] , endCoord [ Y_AXIS ] , endCoord [ Z_AXIS ] , <nl> stepper . get_axis_position_mm ( E_AXIS ) , feedrate , 0 ) ; <nl> stepper . synchronize ( ) ; <nl> <nl> mmm a / Marlin / src / feature / pause . cpp <nl> ppp b / Marlin / src / feature / pause . cpp <nl> static void do_pause_e_move ( const float & length , const float & fr ) { <nl> set_destination_from_current ( ) ; <nl> destination [ E_AXIS ] + = length / planner . e_factor [ active_extruder ] ; <nl> planner . buffer_line_kinematic ( destination , fr , active_extruder ) ; <nl> - stepper . synchronize ( ) ; <nl> set_current_from_destination ( ) ; <nl> + stepper . synchronize ( ) ; <nl> } <nl> <nl> / * * <nl> bool pause_print ( const float & retract , const point_t & park_point , const float & u <nl> # endif <nl> print_job_timer . pause ( ) ; <nl> <nl> - / / Wait for synchronize steppers <nl> - stepper . synchronize ( ) ; <nl> - <nl> / / Save current position <nl> COPY ( resume_position , current_position ) ; <nl> <nl> + / / Wait for buffered blocks to complete <nl> + stepper . synchronize ( ) ; <nl> + <nl> / / Initial retract before move to filament change position <nl> if ( retract & & thermalManager . hotEnoughToExtrude ( active_extruder ) ) <nl> do_pause_e_move ( retract , PAUSE_PARK_RETRACT_FEEDRATE ) ; <nl> mmm a / Marlin / src / gcode / bedlevel / G26 . cpp <nl> ppp b / Marlin / src / gcode / bedlevel / G26 . cpp <nl> void move_to ( const float & rx , const float & ry , const float & z , const float & e_de <nl> destination [ E_AXIS ] = current_position [ E_AXIS ] ; <nl> <nl> G26_line_to_destination ( feed_value ) ; <nl> - <nl> - stepper . synchronize ( ) ; <nl> set_destination_from_current ( ) ; <nl> } <nl> <nl> void move_to ( const float & rx , const float & ry , const float & z , const float & e_de <nl> destination [ E_AXIS ] + = e_delta ; <nl> <nl> G26_line_to_destination ( feed_value ) ; <nl> - <nl> - stepper . synchronize ( ) ; <nl> set_destination_from_current ( ) ; <nl> } <nl> <nl> inline bool prime_nozzle ( ) { <nl> if ( Total_Prime > = EXTRUDE_MAXLENGTH ) return G26_ERR ; <nl> # endif <nl> G26_line_to_destination ( planner . max_feedrate_mm_s [ E_AXIS ] / 15 . 0 ) ; <nl> - <nl> + set_destination_from_current ( ) ; <nl> stepper . synchronize ( ) ; / / Without this synchronize , the purge is more consistent , <nl> / / but because the planner has a buffer , we won ' t be able <nl> / / to stop as quickly . So we put up with the less smooth <nl> / / action to give the user a more responsive ' Stop ' . <nl> - set_destination_from_current ( ) ; <nl> - idle ( ) ; <nl> } <nl> <nl> wait_for_release ( ) ; <nl> inline bool prime_nozzle ( ) { <nl> set_destination_from_current ( ) ; <nl> destination [ E_AXIS ] + = g26_prime_length ; <nl> G26_line_to_destination ( planner . max_feedrate_mm_s [ E_AXIS ] / 15 . 0 ) ; <nl> - stepper . synchronize ( ) ; <nl> set_destination_from_current ( ) ; <nl> retract_filament ( destination ) ; <nl> } <nl> void GcodeSuite : : G26 ( ) { <nl> <nl> if ( current_position [ Z_AXIS ] < Z_CLEARANCE_BETWEEN_PROBES ) { <nl> do_blocking_move_to_z ( Z_CLEARANCE_BETWEEN_PROBES ) ; <nl> - stepper . synchronize ( ) ; <nl> set_current_from_destination ( ) ; <nl> } <nl> <nl> mmm a / Marlin / src / gcode / bedlevel / abl / G29 . cpp <nl> ppp b / Marlin / src / gcode / bedlevel / abl / G29 . cpp <nl> void GcodeSuite : : G29 ( ) { <nl> # if ENABLED ( DEBUG_LEVELING_FEATURE ) <nl> if ( DEBUGGING ( LEVELING ) ) SERIAL_ECHOLNPAIR ( " Z Probe End Script : " , Z_PROBE_END_SCRIPT ) ; <nl> # endif <nl> - enqueue_and_echo_commands_P ( PSTR ( Z_PROBE_END_SCRIPT ) ) ; <nl> stepper . synchronize ( ) ; <nl> + enqueue_and_echo_commands_P ( PSTR ( Z_PROBE_END_SCRIPT ) ) ; <nl> # endif <nl> <nl> / / Auto Bed Leveling is complete ! Enable if possible . <nl> mmm a / Marlin / src / gcode / control / M80_M81 . cpp <nl> ppp b / Marlin / src / gcode / control / M80_M81 . cpp <nl> void GcodeSuite : : M81 ( ) { <nl> safe_delay ( 1000 ) ; / / Wait 1 second before switching off <nl> <nl> # if HAS_SUICIDE <nl> - stepper . synchronize ( ) ; <nl> suicide ( ) ; <nl> # elif HAS_POWER_SWITCH <nl> PSU_OFF ( ) ; <nl> mmm a / Marlin / src / gcode / host / M114 . cpp <nl> ppp b / Marlin / src / gcode / host / M114 . cpp <nl> <nl> <nl> void report_current_position_detail ( ) { <nl> <nl> - stepper . synchronize ( ) ; <nl> - <nl> SERIAL_PROTOCOLPGM ( " \ nLogical : " ) ; <nl> const float logical [ XYZ ] = { <nl> LOGICAL_X_POSITION ( current_position [ X_AXIS ] ) , <nl> <nl> report_xyz ( delta ) ; <nl> # endif <nl> <nl> + stepper . synchronize ( ) ; <nl> + <nl> SERIAL_PROTOCOLPGM ( " Stepper : " ) ; <nl> LOOP_XYZE ( i ) { <nl> SERIAL_CHAR ( ' ' ) ; <nl> mmm a / Marlin / src / gcode / lcd / M0_M1 . cpp <nl> ppp b / Marlin / src / gcode / lcd / M0_M1 . cpp <nl> void GcodeSuite : : M0_M1 ( ) { <nl> <nl> const bool has_message = ! hasP & & ! hasS & & args & & * args ; <nl> <nl> + stepper . synchronize ( ) ; <nl> + <nl> # if ENABLED ( ULTIPANEL ) <nl> <nl> if ( has_message ) <nl> void GcodeSuite : : M0_M1 ( ) { <nl> KEEPALIVE_STATE ( PAUSED_FOR_USER ) ; <nl> wait_for_user = true ; <nl> <nl> - stepper . synchronize ( ) ; <nl> - <nl> if ( ms > 0 ) { <nl> ms + = millis ( ) ; / / wait until this time for a click <nl> while ( PENDING ( millis ( ) , ms ) & & wait_for_user ) idle ( ) ; <nl> mmm a / Marlin / src / module / motion . cpp <nl> ppp b / Marlin / src / module / motion . cpp <nl> void do_blocking_move_to ( const float rx , const float ry , const float rz , const f <nl> <nl> # endif <nl> <nl> - stepper . synchronize ( ) ; <nl> - <nl> feedrate_mm_s = old_feedrate_mm_s ; <nl> <nl> # if ENABLED ( DEBUG_LEVELING_FEATURE ) <nl> if ( DEBUGGING ( LEVELING ) ) SERIAL_ECHOLNPGM ( " < < < do_blocking_move_to " ) ; <nl> # endif <nl> + <nl> + stepper . synchronize ( ) ; <nl> } <nl> void do_blocking_move_to_x ( const float & rx , const float & fr_mm_s / * = 0 . 0 * / ) { <nl> do_blocking_move_to ( rx , current_position [ Y_AXIS ] , current_position [ Z_AXIS ] , fr_mm_s ) ; <nl> float soft_endstop_min [ XYZ ] = { X_MIN_BED , Y_MIN_BED , Z_MIN_POS } , <nl> current_position [ Y_AXIS ] , current_position [ Z_AXIS ] , current_position [ E_AXIS ] , <nl> planner . max_feedrate_mm_s [ X_AXIS ] , 1 <nl> ) ; <nl> - SYNC_PLAN_POSITION_KINEMATIC ( ) ; <nl> stepper . synchronize ( ) ; <nl> + SYNC_PLAN_POSITION_KINEMATIC ( ) ; <nl> extruder_duplication_enabled = true ; <nl> active_extruder_parked = false ; <nl> # if ENABLED ( DEBUG_LEVELING_FEATURE ) <nl> static void do_homing_move ( const AxisEnum axis , const float distance , const floa <nl> planner . buffer_line ( delta [ A_AXIS ] , delta [ B_AXIS ] , delta [ C_AXIS ] , current_position [ E_AXIS ] , fr_mm_s ? fr_mm_s : homing_feedrate ( axis ) , active_extruder ) ; <nl> # else <nl> sync_plan_position ( ) ; <nl> - current_position [ axis ] = distance ; <nl> + current_position [ axis ] = distance ; / / Set delta / cartesian axes directly <nl> planner . buffer_line ( current_position [ X_AXIS ] , current_position [ Y_AXIS ] , current_position [ Z_AXIS ] , current_position [ E_AXIS ] , fr_mm_s ? fr_mm_s : homing_feedrate ( axis ) , active_extruder ) ; <nl> # endif <nl> <nl> | Adjust usage of stepper . synchronize | MarlinFirmware/Marlin | bfe223e1209af4e50917e752124c7215a4c38953 | 2018-05-06T06:24:14Z |
new file mode 100644 <nl> index 00000000000 . . 1c67ac1cbbf <nl> mmm / dev / null <nl> ppp b / bin / README <nl> @ @ - 0 , 0 + 1 @ @ <nl> + This file just exists to keep the bin / directory in git . <nl> deleted file mode 100755 <nl> index e9c4f57fc6c . . 00000000000 <nl> mmm a / bin / crutch . php <nl> ppp / dev / null <nl> <nl> - # ! / usr / local / bin / php <nl> - < ? php <nl> - <nl> - define ( ' MSG_MAX_SIZE ' , 10 * 1024 * 1024 ) ; <nl> - <nl> - $ p = $ argv [ 1 ] ; <nl> - $ q = msg_get_queue ( ftok ( $ p , ' a ' ) ) ; <nl> - if ( $ q = = = false ) { <nl> - die ( ' failed to get the message queue at ' . $ p ) ; <nl> - } <nl> - <nl> - if ( ! msg_send ( $ q , 2 , " CRUTCH " ) ) { <nl> - die ( ' failed to send startup signal ' ) ; <nl> - } <nl> - <nl> - $ Objects = array ( ) ; / / All resources or objects we have created so far <nl> - $ ObjectIndex = 1 ; <nl> - while ( true ) { <nl> - $ msg = null ; <nl> - if ( msg_receive ( $ q , 1 , $ type , MSG_MAX_SIZE , $ msg ) ) { <nl> - if ( $ type < = 0 ) break ; <nl> - <nl> - $ i = 0 ; <nl> - $ func = $ msg [ $ i + + ] ; <nl> - $ schema = $ msg [ $ i + + ] ; <nl> - $ count = $ msg [ $ i + + ] ; <nl> - $ args = $ msg [ $ i + + ] ; <nl> - <nl> - if ( $ schema ) { <nl> - foreach ( $ schema as $ index = > $ param ) { <nl> - if ( $ index > = 0 & & $ param [ 0 ] = = ' O ' & & $ args [ $ index ] ) { <nl> - $ original_msg [ $ index ] = $ args [ $ index ] ; <nl> - $ args [ $ index ] = $ Objects [ $ args [ $ index ] ] ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - $ expr = ' $ ret = $ func ( ' ; <nl> - for ( $ i = 0 ; $ i < $ count ; $ i + + ) { <nl> - if ( $ i > 0 ) $ expr . = ' , ' ; <nl> - $ expr . = ' $ args [ ' . $ i . ' ] ' ; <nl> - } <nl> - $ expr . = ' ) ; ' ; <nl> - eval ( $ expr ) ; <nl> - <nl> - if ( $ schema ) { <nl> - $ refs = array ( ) ; <nl> - foreach ( $ schema as $ index = > $ param ) { <nl> - if ( $ param = = ' R ' / * Reference * / ) { <nl> - $ refs [ $ index ] = $ args [ $ index ] ; <nl> - } else if ( $ param = = ' OO ' / * Object / Resource Output * / ) { <nl> - if ( $ index < 0 ) { <nl> - if ( $ ret ) { <nl> - $ Objects [ $ ObjectIndex ] = $ ret ; <nl> - $ ret = $ ObjectIndex + + ; <nl> - } <nl> - } else { <nl> - $ newobj = $ args [ $ index ] ; <nl> - $ oldobj = $ Objects [ $ original_msg [ $ index ] ] ; <nl> - if ( $ newobj & & $ newobj ! = = $ oldobj ) { <nl> - $ Objects [ $ ObjectIndex ] = $ newobj ; <nl> - $ refs [ $ index ] = $ ObjectIndex + + ; <nl> - } <nl> - } <nl> - } <nl> - } <nl> - } <nl> - <nl> - $ response = array ( $ ret , $ refs ) ; <nl> - if ( ! msg_send ( $ q , 2 , $ response ) ) break ; <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index 9644a24722f . . 00000000000 <nl> mmm a / bin / distcc_timer . php <nl> ppp / dev / null <nl> <nl> - < ? php <nl> - <nl> - $ file = isset ( $ argv [ 1 ] ) ? $ argv [ 1 ] : ' / tmp / distcc_timer . log ' ; <nl> - $ lines = array ( ) ; <nl> - exec ( " cat $ file " , $ lines ) ; <nl> - $ time = array ( ) ; <nl> - $ machines = array ( ) ; <nl> - foreach ( $ lines as $ line ) { <nl> - preg_match ( ' / ( [ ^ ] + ) @ ( [ ^ ] + ) / ' , $ line , $ m ) ; <nl> - $ filename = $ m [ 1 ] ; <nl> - $ machine = $ m [ 2 ] ; <nl> - if ( ! isset ( $ machines [ $ machine ] ) ) $ machines [ $ machine ] = 0 ; <nl> - + + $ machines [ $ machine ] ; <nl> - preg_match ( ' / pp start : + ( [ 0 - 9 ] + ) / ' , $ line , $ m ) ; $ pp0 = $ m [ 1 ] ; <nl> - preg_match ( ' / pp end : + ( [ 0 - 9 ] + ) / ' , $ line , $ m ) ; $ pp1 = $ m [ 1 ] ; <nl> - preg_match ( ' / remote start : + ( [ 0 - 9 ] + ) / ' , $ line , $ m ) ; $ cc0 = $ m [ 1 ] ; <nl> - preg_match ( ' / remote end : + ( [ 0 - 9 ] + ) / ' , $ line , $ m ) ; $ cc1 = $ m [ 1 ] ; <nl> - $ preprocessing = $ pp1 - $ pp0 ; <nl> - $ compiling = $ cc1 - $ cc0 ; <nl> - if ( $ preprocessing > = 10 ) { <nl> - echo " preprocessing : $ preprocessing , compiling : $ compiling - - > $ filename \ n " ; <nl> - } <nl> - $ time [ $ filename ] = $ compiling ; <nl> - } <nl> - <nl> - function cmp ( $ a , $ b ) { <nl> - if ( $ a = = = $ b ) return 0 ; <nl> - return ( $ a > $ b ) ? 1 : - 1 ; <nl> - } <nl> - <nl> - uasort ( $ time , ' cmp ' ) ; <nl> - foreach ( $ time as $ filename = > $ compiling ) { <nl> - echo " compiling : $ compiling - - > $ filename \ n " ; <nl> - } <nl> - var_dump ( $ machines ) ; <nl> deleted file mode 100644 <nl> index 804a51d3ea1 . . 00000000000 <nl> mmm a / bin / droptables . sql <nl> ppp / dev / null <nl> <nl> - drop table run ; <nl> - drop table dep ; <nl> - drop table err ; <nl> deleted file mode 100644 <nl> index c3c8d634d40 . . 00000000000 <nl> mmm a / bin / exe_profile . php <nl> ppp / dev / null <nl> <nl> - < ? php <nl> - <nl> - $ server = $ argv [ 1 ] ; <nl> - $ password = @ $ argv [ 2 ] ; <nl> - $ polling = isset ( $ argv [ 3 ] ) ? $ argv [ 3 ] : 1 ; <nl> - <nl> - $ total = array ( ) ; <nl> - $ total [ 0 ] = $ total [ 1 ] = $ total [ 2 ] = $ total [ 3 ] = $ total [ 4 ] = 0 ; <nl> - while ( true ) { <nl> - $ ret = shell_exec ( " GET http : / / $ server / prof - exe ? auth = $ password " ) ; <nl> - $ nums = json_decode ( $ ret ) ; <nl> - for ( $ i = 0 ; $ i < count ( $ nums ) ; $ i + + ) { <nl> - $ num = $ nums [ $ i ] ; <nl> - if ( $ num = = - 1 ) break ; <nl> - <nl> - $ total [ $ num ] + = $ nums [ + + $ i ] ; <nl> - } <nl> - <nl> - $ sum = / * $ total [ 1 ] + * / $ total [ 2 ] + $ total [ 3 ] ; <nl> - <nl> - $ out = ' ' ; <nl> - / / $ out . = sprintf ( " Server : % 2d % % \ t " , ( int ) ( $ total [ 1 ] * 100 / $ sum ) ) ; <nl> - $ out . = sprintf ( " Extension : % 2d % % \ t " , ( int ) ( $ total [ 2 ] * 100 / $ sum ) ) ; <nl> - $ out . = sprintf ( " User : % 2d % % \ n " , ( int ) ( $ total [ 3 ] * 100 / $ sum ) ) ; <nl> - echo $ out ; <nl> - <nl> - sleep ( $ polling ) ; <nl> - } <nl> deleted file mode 100644 <nl> index b077655bcc2 . . 00000000000 <nl> mmm a / bin / hdf . el <nl> ppp / dev / null <nl> <nl> - <nl> - ; ; Better HDF read / write experience in emacs ( 9 / 25 / 09 hzhao @ facebook . com ) <nl> - <nl> - ( require ' font - lock ) <nl> - <nl> - ( defvar hdf - mode - hook nil ) <nl> - ( add - to - list ' auto - mode - alist ' ( " \ \ . hdf \ \ ' " . hdf - mode ) ) <nl> - <nl> - ( defvar hdf - indent - level 2 <nl> - " Defines 2 spaces for HDF indentation . " ) <nl> - <nl> - ; ; syntax coloring <nl> - ; ; http : / / www . gnu . org / software / emacs / elisp / html_node / Faces - for - Font - Lock . html <nl> - ( defconst hdf - font - lock - keywords <nl> - ( list <nl> - ' ( " ^ [ \ t ] * \ \ ( [ \ \ # \ \ - ] include \ \ ) [ \ t ] + \ \ ( . * \ \ ) " <nl> - ( 1 font - lock - keyword - face ) <nl> - ( 2 font - lock - string - face ) ) ; ; include <nl> - ' ( " ^ [ \ t ] * # . * $ " . font - lock - comment - face ) ; ; comments <nl> - ' ( " ^ [ \ t ] * \ \ ( [ a - z0 - 9_ \ \ . \ \ * ] + \ \ ) [ \ t ] * \ \ ( ! = \ \ ) [ \ t ] * \ \ ( . * \ \ ) " <nl> - ( 1 font - lock - variable - name - face ) <nl> - ( 2 font - lock - function - name - face ) ) ; ; shell commands <nl> - ' ( " ^ [ \ t ] * \ \ ( [ a - z0 - 9_ \ \ . \ \ * ] + \ \ ) [ \ t ] * \ \ ( : = \ \ ) [ \ t ] * \ \ ( [ a - z0 - 9 \ \ . ] + \ \ ) [ \ t ] * $ " <nl> - ( 1 font - lock - variable - name - face ) <nl> - ( 2 font - lock - function - name - face ) <nl> - ( 3 font - lock - variable - name - face ) ) ; ; node copying <nl> - ' ( " ^ [ \ t ] * \ \ ( [ a - z0 - 9_ \ \ . \ \ * ] + \ \ ) [ \ t ] * = [ \ t ] * \ \ ( true \ \ | false \ \ | yes \ \ | no \ \ | on \ \ | off \ \ ) [ \ t ] * $ " <nl> - ( 1 font - lock - variable - name - face ) <nl> - ( 2 font - lock - keyword - face ) ) ; ; booleans <nl> - ' ( " ^ [ \ t ] * \ \ ( [ a - z0 - 9_ \ \ . \ \ * ] + \ \ ) [ \ t ] * = [ \ t ] * \ \ ( [ 0 - 9 ] + \ \ ) [ \ t ] * $ " <nl> - ( 1 font - lock - variable - name - face ) <nl> - ( 2 font - lock - constant - face ) ) ; ; numbers <nl> - ' ( " ^ [ \ t ] * \ \ ( [ a - z0 - 9_ \ \ . \ \ * ] + \ \ ) [ \ t ] * = [ \ t ] * \ \ ( . * \ \ ) " <nl> - ( 1 font - lock - variable - name - face ) ) ; ; strings <nl> - ' ( " ^ [ \ t ] * \ \ ( [ a - z0 - 9_ \ \ . \ \ * ] + \ \ ) [ \ t ] * [ { = ] [ \ t ] * $ " <nl> - ( 1 font - lock - variable - name - face ) ) ; ; nodes <nl> - ' ( " ^ [ \ t ] * \ \ ( [ a - z0 - 9_ \ \ . \ \ * ] + \ \ ) [ \ t ] * \ \ ( : \ \ ) [ \ t ] * \ \ ( [ a - z0 - 9 \ \ . ] + \ \ ) [ \ t ] * $ " <nl> - ( 1 font - lock - variable - name - face ) <nl> - ( 2 font - lock - function - name - face ) <nl> - ( 3 font - lock - variable - name - face ) ) ; ; node aliases <nl> - ' ( " ^ [ \ t ] * \ \ ( @ \ \ ) \ \ ( [ a - z0 - 9_ \ \ . ] + \ \ ) [ \ t ] * $ " <nl> - ( 1 font - lock - function - name - face ) <nl> - ( 2 font - lock - variable - name - face ) ) ; ; node inheritance <nl> - ) <nl> - " Hdf Keywords " ) <nl> - <nl> - ; ; indentation <nl> - ( defun hdf - indent - line ( ) <nl> - " Indent current line as HDF code . " <nl> - ( interactive ) <nl> - ( beginning - of - line ) <nl> - ( if ( bobp ) <nl> - ( indent - line - to 0 ) <nl> - ( progn <nl> - ( if ( looking - at " ^ [ \ t ] * } " ) <nl> - ( save - excursion <nl> - ( forward - line - 1 ) <nl> - ( while ( and ( not ( bobp ) ) ( looking - at " ^ [ \ t ] * $ " ) ) <nl> - ( forward - line - 1 ) ) <nl> - ( if ( looking - at " ^ [ \ t ] * \ \ ( [ a - z0 - 9_ \ \ . \ \ * ] + \ \ ) [ \ t ] * { " ) <nl> - ( setq cur - indent ( current - indentation ) ) <nl> - ( progn <nl> - ( setq cur - indent ( - ( current - indentation ) hdf - indent - level ) ) <nl> - ( if ( < cur - indent 0 ) <nl> - ( indent - line - to 0 ) ) ) ) ) <nl> - ( save - excursion <nl> - ( forward - line - 1 ) <nl> - ( while ( and ( not ( bobp ) ) ( looking - at " ^ [ \ t ] * $ " ) ) <nl> - ( forward - line - 1 ) ) <nl> - ( if ( looking - at " ^ [ \ t ] * \ \ ( [ a - z0 - 9_ \ \ . \ \ * ] + \ \ ) [ \ t ] * { " ) <nl> - ( setq cur - indent ( + ( current - indentation ) hdf - indent - level ) ) <nl> - ( setq cur - indent ( current - indentation ) ) ) ) ) <nl> - ( if cur - indent <nl> - ( indent - line - to cur - indent ) <nl> - ( indent - line - to 0 ) ) ) ) ) <nl> - <nl> - ( defun hdf - mode ( ) <nl> - " Mode for editing HDF files " <nl> - ( interactive ) <nl> - ( kill - all - local - variables ) <nl> - ( set ( make - local - variable ' font - lock - defaults ) <nl> - ' ( hdf - font - lock - keywords nil , 1 ) ) <nl> - ( setq major - mode ' hdf - mode ) <nl> - ( setq mode - name " HDF " ) <nl> - ( run - hooks ' hdf - mode - hook ) <nl> - ( set ( make - local - variable ' indent - line - function ) ' hdf - indent - line ) <nl> - ) <nl> - ( provide ' hdf - mode ) <nl> deleted file mode 100644 <nl> index 1da279281e7 . . 00000000000 <nl> mmm a / bin / ppp . php <nl> ppp / dev / null <nl> <nl> - < ? php <nl> - define ( ' STARTER_MARKER ' , " namespace hphp_impl_starter { } \ n " ) ; <nl> - define ( ' SPLITTER_MARKER ' , " namespace hphp_impl_splitter { } \ n " ) ; <nl> - <nl> - $ inputDir = preg_replace ( ' # / $ # ' , ' ' , $ argv [ 1 ] ) ; / / stripping trailing slash <nl> - <nl> - $ files = array ( ) ; <nl> - exec ( " cd $ inputDir & & find cpp - name * . cpp " , $ files ) ; <nl> - <nl> - $ sizes = array ( ) ; <nl> - $ clusterSize = calculate_cluster_size ( $ sizes , $ inputDir , $ files ) ; <nl> - <nl> - $ merges = $ splits = $ noops = array ( ) ; <nl> - compute_merge_splits ( $ merges , $ splits , $ sizes , $ clusterSize ) ; <nl> - <nl> - / / hzhao : I didn ' t find merge helped that much , so output splits only <nl> - print " splits { \ n " ; <nl> - foreach ( $ splits as $ file = > $ count ) { <nl> - print " * { \ n " ; <nl> - print " name = $ file \ n " ; <nl> - print " count = $ count \ n " ; <nl> - print " } \ n " ; <nl> - } <nl> - print " } \ n " ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - function calculate_cluster_size ( & $ sizes , $ inputDir , $ files ) { <nl> - $ total = 0 ; <nl> - $ sizes = array ( ) ; <nl> - foreach ( $ files as $ file ) { <nl> - $ pp = shell_exec ( " make - C $ inputDir $ file . pp " ) ; <nl> - $ pos = strpos ( $ pp , STARTER_MARKER ) ; <nl> - if ( $ pos = = = false ) { <nl> - exit ( " Unable to find ImplStarter mark in $ file \ n " ) ; <nl> - } <nl> - $ size = strlen ( $ pp ) - $ pos ; <nl> - $ sizes [ $ file ] = $ size ; <nl> - $ total + = $ size ; <nl> - } <nl> - exec ( " make - C $ inputDir clobber " ) ; <nl> - <nl> - return ( int ) ( $ total / count ( $ sizes ) ) ; <nl> - } <nl> - <nl> - function compute_merge_splits ( & $ merges , & $ splits , $ sizes , $ clusterSize ) { <nl> - $ merge = array ( ) ; $ merge_size = 0 ; <nl> - foreach ( $ sizes as $ file = > $ size ) { <nl> - if ( $ size > = $ clusterSize ) { <nl> - $ splits [ $ file ] = ceil ( $ size / $ clusterSize ) ; <nl> - } else if ( $ size < $ clusterSize ) { <nl> - if ( $ merge_size + $ size > $ clusterSize ) { <nl> - $ merges [ ] = $ merge ; <nl> - $ merge = array ( ) ; <nl> - $ merge_size = 0 ; <nl> - } <nl> - $ merge [ $ file ] = $ size ; <nl> - $ merge_size + = $ size ; <nl> - } else { <nl> - $ noops [ $ file ] = $ size ; <nl> - } <nl> - } <nl> - if ( $ merge ) { <nl> - $ merges [ ] = $ merge ; <nl> - } <nl> - } <nl> - <nl> - function merge_files ( $ inputDir , $ merges ) { <nl> - $ i = 0 ; <nl> - foreach ( $ merges as $ merge ) { <nl> - if ( count ( $ merge ) > 1 ) { <nl> - + + $ i ; <nl> - $ target = sprintf ( " cpp / merge . % 03d . cpp " , $ i ) ; <nl> - $ f = fopen ( " $ inputDir / $ target " , " w + " ) ; <nl> - <nl> - $ new_contents = ' ' ; <nl> - foreach ( $ merge as $ file = > $ size ) { <nl> - $ content = file_get_contents ( " $ inputDir / $ file " ) ; <nl> - $ pos = strpos ( $ content , STARTER_MARKER ) ; <nl> - fwrite ( $ f , $ content , $ pos ) ; <nl> - $ new_contents . = substr ( $ content , $ pos + strlen ( STARTER_MARKER ) ) ; <nl> - } <nl> - <nl> - fwrite ( $ f , $ new_contents ) ; <nl> - fclose ( $ f ) ; <nl> - system ( " cd $ inputDir & & rm - f " . implode ( ' ' , array_keys ( $ merge ) ) ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - function split_files ( $ inputDir , $ splits ) { <nl> - foreach ( $ splits as $ file = > $ count ) { <nl> - $ content = file_get_contents ( " $ inputDir / $ file " ) ; <nl> - $ header_size = $ pos = strpos ( $ content , STARTER_MARKER ) ; <nl> - $ pos0 = $ pos + strlen ( STARTER_MARKER ) ; <nl> - <nl> - $ chunk_size = ( int ) ( ( strlen ( $ content ) - $ pos ) / $ count ) ; <nl> - for ( $ i = 0 ; $ i < $ count ; $ i + + ) { <nl> - $ pos = $ pos0 + $ chunk_size ; <nl> - $ pos = @ strpos ( $ content , SPLITTER_MARKER , $ pos ) ; <nl> - if ( $ pos = = false ) { <nl> - $ pos = strlen ( $ content ) ; <nl> - } <nl> - $ f = fopen ( " $ inputDir / $ file . $ i . cpp " , " w + " ) ; <nl> - fwrite ( $ f , $ content , $ header_size ) ; <nl> - <nl> - if ( $ i > 0 ) fwrite ( $ f , " namespace HPHP { \ n " ) ; <nl> - fwrite ( $ f , substr ( $ content , $ pos0 , $ pos - $ pos0 ) ) ; <nl> - <nl> - $ pos0 = $ pos + strlen ( SPLITTER_MARKER ) ; <nl> - if ( $ pos0 > strlen ( $ content ) ) { <nl> - fclose ( $ f ) ; <nl> - break ; <nl> - } <nl> - <nl> - fwrite ( $ f , " } \ n " ) ; <nl> - fclose ( $ f ) ; <nl> - } <nl> - <nl> - system ( " rm - f $ inputDir / $ file " ) ; <nl> - } <nl> - } <nl> deleted file mode 100644 <nl> index eba6cdb7a0e . . 00000000000 <nl> mmm a / bin / report_mutex . php <nl> ppp / dev / null <nl> <nl> - < ? php <nl> - <nl> - $ server = $ argv [ 1 ] ; <nl> - $ top = $ argv [ 2 ] ; <nl> - $ translate = $ argv [ 3 ] ; <nl> - if ( ! $ top ) $ top = 20 ; <nl> - <nl> - $ ret = shell_exec ( " GET ' http : / / $ server / stats . kvp ? agg = * & keys = : mutex . * : ' " ) ; <nl> - $ stats = json_decode ( $ ret ) ; <nl> - if ( ! $ stats ) { <nl> - exit ( " No mutex profile data was found on server \ n " ) ; <nl> - } <nl> - <nl> - foreach ( $ stats as $ name = > $ count ) { <nl> - if ( preg_match ( ' / mutex . ( [ 0 - 9a - f : ] + ) . ( hit | time ) / ' , $ name , $ m ) ) { <nl> - $ stack = $ m [ 1 ] ; <nl> - $ type = $ m [ 2 ] ; <nl> - <nl> - if ( $ type = = ' hit ' ) { <nl> - $ hits [ $ stack ] = $ count ; <nl> - } else { <nl> - $ times [ $ stack ] = $ count ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - arsort ( $ hits ) ; $ hits = array_slice ( $ hits , 0 , $ top ) ; <nl> - arsort ( $ times ) ; $ times = array_slice ( $ times , 0 , $ top ) ; <nl> - <nl> - $ thits = array ( ) ; <nl> - print str_repeat ( ' = ' , 70 ) . " \ n " ; <nl> - foreach ( $ hits as $ stack = > $ count ) { <nl> - print $ count . " x sampling hits : \ n " ; <nl> - print $ translate ? translate_stack ( $ stack ) : $ stack . " \ n " ; <nl> - print str_repeat ( ' - ' , 70 ) . " \ n " ; <nl> - } <nl> - $ ttimes = array ( ) ; <nl> - print str_repeat ( ' = ' , 70 ) . " \ n " ; <nl> - foreach ( $ times as $ stack = > $ count ) { <nl> - print ( int ) ( $ count / 1000000 ) . " seconds : \ n " ; <nl> - print $ translate ? translate_stack ( $ stack ) : $ stack . " \ n " ; <nl> - print str_repeat ( ' - ' , 70 ) . " \ n " ; <nl> - } <nl> - <nl> - function translate_stack ( $ stack ) { <nl> - global $ server ; <nl> - return shell_exec ( " GET http : / / $ server / translate ? stack = $ stack " ) ; <nl> - } <nl> - <nl> deleted file mode 100644 <nl> index 5eb187ef804 . . 00000000000 <nl> mmm a / bin / report_sizes . php <nl> ppp / dev / null <nl> <nl> - < ? php <nl> - <nl> - define ( ' PHP_ONLY ' , 1 ) ; <nl> - define ( ' NON_PHP_ONLY ' , 2 ) ; <nl> - define ( ' ALL_FILES ' , 3 ) ; <nl> - define ( ' USE_DU ' , 4 ) ; <nl> - <nl> - $ arg = 0 ; <nl> - $ dir = ' . ' ; if ( $ argc > + + $ arg ) $ dir = $ argv [ $ arg ] ; <nl> - $ file_type = PHP_ONLY ; if ( $ argc > + + $ arg ) $ file_type = $ argv [ $ arg ] ; <nl> - $ min_percentage = 2 ; if ( $ argc > + + $ arg ) $ min_percentage = $ argv [ $ arg ] ; <nl> - $ ascii = false ; if ( $ argc > + + $ arg ) $ ascii = $ argv [ $ arg ] ; <nl> - <nl> - if ( $ ascii ) { <nl> - define ( ' CHAR_V ' , ' | ' ) ; <nl> - define ( ' CHAR_H ' , ' . ' ) ; <nl> - define ( ' CHAR_T ' , ' L ' ) ; <nl> - define ( ' CHAR_L ' , ' L ' ) ; <nl> - define ( ' LINE_INDENT ' , 4 ) ; <nl> - define ( ' WORD_INDENT ' , 2 ) ; <nl> - } else { <nl> - define ( ' CHAR_V ' , " \ xe2 \ x94 \ x82 " ) ; <nl> - define ( ' CHAR_H ' , " \ xe2 \ x94 \ x80 " ) ; <nl> - define ( ' CHAR_T ' , " \ xe2 \ x94 \ x9c " ) ; <nl> - define ( ' CHAR_L ' , " \ xe2 \ x94 \ x94 " ) ; <nl> - define ( ' LINE_INDENT ' , 2 ) ; <nl> - define ( ' WORD_INDENT ' , 2 ) ; <nl> - } <nl> - <nl> - / / main <nl> - du_dir ( realpath ( $ dir ) ) ; <nl> - print " \ nTotal Size : " . ( int ) ( $ grand_total / 1024 + 0 . 5 ) . " M \ n " ; <nl> - <nl> - function du_dir ( $ dir , $ indent = array ( ) ) { <nl> - global $ min_percentage , $ grand_total ; <nl> - <nl> - if ( ! is_dir ( $ dir ) ) return ; <nl> - <nl> - $ total = 0 ; <nl> - $ sizes = get_file_sizes ( $ dir , $ total ) ; <nl> - if ( empty ( $ indent ) ) $ grand_total = $ total ; <nl> - <nl> - arsort ( $ sizes ) ; <nl> - $ min_size = $ grand_total * $ min_percentage / 100 ; <nl> - $ selected = array ( ) ; <nl> - foreach ( $ sizes as $ file = > $ size ) { <nl> - if ( $ size > $ min_size ) { <nl> - $ selected [ $ file ] = $ size ; <nl> - } <nl> - } <nl> - <nl> - $ index = 0 ; <nl> - foreach ( $ selected as $ file = > $ size ) { <nl> - $ last = ( + + $ index = = count ( $ selected ) ) ; <nl> - $ percentage = ( int ) ( $ size / $ grand_total * 100 + 0 . 5 ) ; <nl> - <nl> - $ mb = ( int ) ( $ size / 1024 + 0 . 5 ) ; <nl> - if ( ! empty ( $ indent ) ) { <nl> - $ first = true ; <nl> - foreach ( $ indent as $ vertical ) { <nl> - if ( $ first ) { <nl> - print str_repeat ( ' ' , WORD_INDENT ) ; <nl> - $ first = false ; <nl> - } else { <nl> - print ( $ vertical ? CHAR_V : ' ' ) . <nl> - str_repeat ( ' ' , LINE_INDENT + WORD_INDENT ) ; <nl> - } <nl> - } <nl> - print ( $ last ? CHAR_L : CHAR_T ) . str_repeat ( CHAR_H , LINE_INDENT ) ; <nl> - } <nl> - print " $ file : $ { mb } M ( $ percentage % ) \ n " ; <nl> - <nl> - $ indent [ ] = ! $ last ; <nl> - du_dir ( " $ dir / $ file " , $ indent ) ; <nl> - array_pop ( $ indent ) ; <nl> - } <nl> - } <nl> - <nl> - function get_file_sizes ( $ dir , & $ total ) { <nl> - global $ file_type ; <nl> - <nl> - if ( $ file_type = = USE_DU ) { <nl> - $ lines = array ( ) ; <nl> - exec ( ' du - L - - exclude = " * / . svn * " - - exclude = " * / . git * " - - max - depth = 1 ' . $ dir , <nl> - $ lines ) ; <nl> - <nl> - $ sizes = array ( ) ; <nl> - $ total = 0 ; <nl> - foreach ( $ lines as $ line ) { <nl> - if ( preg_match ( ' / ^ ( [ 0 - 9 ] + ) [ \ t ] + ' . preg_quote ( $ dir , ' / ' ) . ' \ / ( . * ) $ / ' , <nl> - $ line , $ m ) ) { <nl> - $ size = $ m [ 1 ] ; $ file = $ m [ 2 ] ; <nl> - $ sizes [ $ file ] = $ size ; <nl> - $ total + = $ size ; <nl> - } <nl> - } <nl> - return $ sizes ; <nl> - } <nl> - <nl> - $ cmd = ' find - L ' . $ dir . ' - type f ' . <nl> - ' - not - regex " . * / \ . svn / . * " - not - regex " . * / \ . git / . * " ' ; <nl> - if ( $ file_type = = PHP_ONLY ) { <nl> - $ cmd . = ' - regex " . * \ . php " - o - regex " . * \ . phpt " ' ; <nl> - } else if ( $ file_type = = NON_PHP_ONLY ) { <nl> - $ cmd . = ' - not - regex " . * \ . php " - not - regex " . * \ . phpt " ' ; <nl> - } <nl> - $ lines = array ( ) ; <nl> - exec ( $ cmd , $ lines ) ; <nl> - <nl> - $ sizes = array ( ) ; <nl> - $ total = 0 ; <nl> - foreach ( $ lines as $ line ) { <nl> - preg_match ( ' # ' . preg_quote ( $ dir , ' # ' ) . ' / ( [ ^ / ] + ) # ' , $ line , $ m ) ; <nl> - $ file = $ m [ 1 ] ; <nl> - $ size = filesize ( $ line ) / 1024 ; <nl> - if ( isset ( $ sizes [ $ file ] ) ) { <nl> - $ sizes [ $ file ] + = $ size ; <nl> - } else { <nl> - $ sizes [ $ file ] = $ size ; <nl> - } <nl> - $ total + = $ size ; <nl> - } <nl> - return $ sizes ; <nl> - } <nl> deleted file mode 100644 <nl> index 6a311fe4a04 . . 00000000000 <nl> mmm a / bin / schema . sql <nl> ppp / dev / null <nl> <nl> - CREATE TABLE ` hphp_run ` ( <nl> - ` id ` int ( 11 ) NOT NULL auto_increment , <nl> - ` branch ` varchar ( 255 ) NOT NULL default ' ' , <nl> - ` revision ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` file ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` line ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` byte ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` program ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` function ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` class ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` types ` text NOT NULL , <nl> - ` time ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` created ` timestamp NOT NULL default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP , <nl> - ` committed ` tinyint ( 4 ) NOT NULL default ' 0 ' , <nl> - PRIMARY KEY ( ` id ` ) <nl> - ) ENGINE = InnoDB DEFAULT CHARSET = latin1 ; <nl> - <nl> - CREATE TABLE ` hphp_dep ` ( <nl> - ` id ` int ( 11 ) NOT NULL auto_increment , <nl> - ` run ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` program ` varchar ( 255 ) NOT NULL default ' ' , <nl> - ` kind ` varchar ( 255 ) NOT NULL default ' ' , <nl> - ` parent ` varchar ( 255 ) NOT NULL default ' ' , <nl> - ` parent_file ` varchar ( 255 ) NOT NULL default ' ' , <nl> - ` parent_line ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` child ` varchar ( 255 ) NOT NULL default ' ' , <nl> - ` child_file ` varchar ( 255 ) NOT NULL default ' ' , <nl> - ` child_line ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - PRIMARY KEY ( ` id ` ) , <nl> - KEY ` program ` ( ` run ` , ` program ` ) , <nl> - KEY ` parent ` ( ` run ` , ` kind ` , ` parent ` , ` child ` ) , <nl> - KEY ` child ` ( ` run ` , ` kind ` , ` child ` , ` parent ` ) <nl> - ) ENGINE = InnoDB DEFAULT CHARSET = latin1 ; <nl> - <nl> - CREATE TABLE ` hphp_err ` ( <nl> - ` id ` int ( 11 ) NOT NULL auto_increment , <nl> - ` run ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` program ` varchar ( 255 ) NOT NULL default ' ' , <nl> - ` kind ` varchar ( 255 ) NOT NULL default ' ' , <nl> - ` construct ` bigint ( 20 ) NOT NULL default ' 0 ' , <nl> - ` file1 ` varchar ( 255 ) NOT NULL default ' ' , <nl> - ` line1 ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` file2 ` varchar ( 255 ) NOT NULL default ' ' , <nl> - ` line2 ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` expected_type ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` actual_type ` int ( 11 ) NOT NULL default ' 0 ' , <nl> - ` data ` varchar ( 255 ) NOT NULL default ' ' , <nl> - ` suppressed ` int ( 11 ) NOT NULL , <nl> - PRIMARY KEY ( ` id ` ) , <nl> - KEY ` program ` ( ` run ` , ` program ` ) , <nl> - KEY ` kind ` ( ` run ` , ` kind ` ) <nl> - ) ENGINE = InnoDB DEFAULT CHARSET = latin1 ; <nl> - <nl> deleted file mode 100755 <nl> index 6d8618714d7 . . 00000000000 <nl> mmm a / bin / tcdump . py <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env python <nl> - <nl> - # Reads lines of the following form from stdin : <nl> - # TCDump SomeName a3 bd 83 <nl> - # <nl> - # This script then takes those bytes of machine code ( expressed in hex ) and uses <nl> - # gcc + objdump to print human - readable assembly to stdout . Input lines that <nl> - # don ' t start with TCDump are ignored . <nl> - # <nl> - # These are generated from the translator by using SpaceRecorder , set to dump <nl> - # machine code . These lines will show up in hphp . log ; then you can do stuff like <nl> - # this : <nl> - # <nl> - # src $ grep DecRef hphp . log | head - n 1 | . . / bin / tcdump . py <nl> - <nl> - import os <nl> - import sys <nl> - import tempfile <nl> - <nl> - <nl> - def process_line ( line , counter , cfile ) : <nl> - # Use the name of the label , with a unique int after it , as the fn name <nl> - name = line [ 0 ] + str ( counter ) <nl> - cfile . write ( " void % s ( ) { " % name ) <nl> - <nl> - # Put a nop before and after the machine code to distinguish it from the <nl> - # function entry / exit stuff gcc puts in <nl> - cfile . write ( ' asm volatile ( " . byte 0x90 , ' ) <nl> - cfile . write ( " , " . join ( [ " 0x " + b for b in line [ 1 : ] ] ) ) <nl> - cfile . write ( ' , 0x90 " ) ; } ' ) <nl> - <nl> - <nl> - def main ( ) : <nl> - # delete = False because we need to use it after closing it <nl> - cfile = tempfile . NamedTemporaryFile ( delete = False ) <nl> - counter = 0 <nl> - <nl> - # Take each relevant line in the input , and convert it into a C function <nl> - for line in sys . stdin : <nl> - fields = line . strip ( ) . split ( ' ' ) <nl> - if fields [ 0 ] = = " TCDump " : <nl> - process_line ( fields [ 1 : ] , counter , cfile ) <nl> - counter + = 1 <nl> - <nl> - cfile . close ( ) <nl> - <nl> - # gcc - c it and then objdump - d it <nl> - objfile = tempfile . NamedTemporaryFile ( ) <nl> - os . system ( " gcc - c - x c - o % s % s " % ( objfile . name , cfile . name ) ) <nl> - os . system ( " objdump - d % s " % objfile . name ) <nl> - <nl> - os . unlink ( cfile . name ) <nl> - <nl> - <nl> - if __name__ = = ' __main__ ' : <nl> - main ( ) <nl> deleted file mode 100755 <nl> index e8b48ee95f1 . . 00000000000 <nl> mmm a / bin / tcspace . pl <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / perl <nl> - <nl> - # Process the TCSpace instrumentation in an hphp . log to produce <nl> - # human - readable summaries . Usage : <nl> - # $ env TRACE = tcspace : 1 . / hhvm / hhvm - v Eval . Jit = true \ <nl> - # - f my - script . php <nl> - # $ tcspace . pl . / hphp . log <nl> - <nl> - % cnts = { } ; <nl> - % occs = { } ; <nl> - <nl> - while ( < > ) { <nl> - if ( / ^ TCSpace / ) { <nl> - my ( $ unused , $ category , $ num ) = split ( ) ; <nl> - $ cnts { $ category } + = $ num ; <nl> - $ occs { $ category } + = 1 ; <nl> - } <nl> - } <nl> - <nl> - foreach $ value ( sort { int ( $ cnts { $ b } ) < = > int ( $ cnts { $ a } ) } <nl> - keys % cnts ) { <nl> - printf ( " % 24s % 6dB / % 6d \ n " , $ value , $ cnts { $ value } , <nl> - $ occs { $ value } ) ; <nl> - } <nl> - <nl> deleted file mode 100644 <nl> index 755d97b2d5a . . 00000000000 <nl> mmm a / bin / time_build . php <nl> ppp / dev / null <nl> <nl> - < ? php <nl> - <nl> - $ lines = file_get_contents ( $ argv [ 1 ] ) ; <nl> - $ lines = preg_split ( ' / \ n / ' , $ lines ) ; <nl> - <nl> - $ times = array ( ) ; <nl> - foreach ( $ lines as $ line ) { <nl> - if ( preg_match ( ' / ^ ( [ 0 - 9 \ . ] + ) . * ( [ ^ ] + \ . cpp | c ) $ / ' , $ line , $ matches ) ) { <nl> - $ time = $ matches [ 1 ] ; <nl> - $ file = $ matches [ 2 ] ; <nl> - $ times [ $ file ] = $ time ; <nl> - } else if ( preg_match ( ' / ^ ( [ 0 - 9 \ . ] + ) ( g \ + \ + - o | ar - crs ) / ' , $ line , $ matches ) ) { <nl> - $ linktime = $ matches [ 1 ] ; <nl> - } else { <nl> - print " Unknown output : $ line " ; <nl> - } <nl> - } <nl> - <nl> - asort ( $ times ) ; <nl> - foreach ( $ times as $ file = > $ time ) { <nl> - print format_time ( $ time ) . " compiling $ file \ n " ; <nl> - } <nl> - print format_time ( $ linktime ) . " linking \ n " ; <nl> - <nl> - function format_time ( $ time ) { <nl> - return ( int ) ( $ time / 60 ) . " ' " . <nl> - ( ( $ time % 60 ) > 9 ? ' ' : ' 0 ' ) . ( $ time % 60 ) . ' " ' ; <nl> - } <nl> deleted file mode 100644 <nl> index bbc9ea82f7b . . 00000000000 <nl> mmm a / bin / valgrind . suppression <nl> ppp / dev / null <nl> <nl> - { <nl> - Socket - 1 <nl> - Memcheck : Param <nl> - write ( buf ) <nl> - fun : __write_nocancel <nl> - fun : _IO_file_write @ @ GLIBC_2 . 2 . 5 <nl> - fun : _IO_do_write @ @ GLIBC_2 . 2 . 5 <nl> - fun : _IO_file_close_it @ @ GLIBC_2 . 2 . 5 <nl> - fun : fclose @ @ GLIBC_2 . 2 . 5 <nl> - fun : RAND_write_file <nl> - fun : _ZN14SSLInitializerC1Ev <nl> - fun : _Z41__static_initialization_and_destruction_0ii <nl> - } <nl> - { <nl> - Socket - 2 <nl> - Memcheck : Param <nl> - socketcall . sendto ( msg ) <nl> - fun : __sendto_nocancel <nl> - fun : __check_pf <nl> - fun : getaddrinfo <nl> - } <nl> - { <nl> - pthread - 1 <nl> - Memcheck : Leak <nl> - fun : calloc <nl> - fun : _dl_allocate_tls <nl> - fun : pthread_create @ @ GLIBC_2 . 2 . 5 <nl> - } <nl> - { <nl> - backtrace - 1 <nl> - Memcheck : Param <nl> - msync ( start ) <nl> - obj : / lib64 / libpthread - 2 . 3 . 5 . so <nl> - fun : access_mem <nl> - } <nl> - <nl> - { <nl> - FBML - 2 <nl> - Memcheck : Leak <nl> - fun : * <nl> - fun : * <nl> - fun : _ZN10nsHTMLTags11AddRefTableEv <nl> - } <nl> - { <nl> - FBML - 3 <nl> - Memcheck : Leak <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : _ZN10nsHTMLTags11AddRefTableEv <nl> - } <nl> - { <nl> - FBML - 4 <nl> - Memcheck : Leak <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : _ZN10nsHTMLTags11AddRefTableEv <nl> - } <nl> - { <nl> - FBML - 5 <nl> - Memcheck : Leak <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : _ZN10nsHTMLTags11AddRefTableEv <nl> - } <nl> - { <nl> - FBML - 6 <nl> - Memcheck : Leak <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : _ZN10nsHTMLTags11AddRefTableEv <nl> - } <nl> - { <nl> - FBML - 7 <nl> - Memcheck : Leak <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - fun : _ZN10nsHTMLTags11AddRefTableEv <nl> - } <nl> - { <nl> - FBML - 8 <nl> - Memcheck : Leak <nl> - fun : malloc <nl> - fun : PL_NewHashTable <nl> - fun : _ZN11nsHTMLAttrs11AddRefTableEv <nl> - } <nl> - { <nl> - hdf - 1 <nl> - Memcheck : Leak <nl> - fun : calloc <nl> - fun : uListInit <nl> - fun : nerr_init <nl> - fun : hdf_init <nl> - } <nl> - { <nl> - hdf - 2 <nl> - Memcheck : Leak <nl> - fun : realloc <nl> - fun : check_resize <nl> - fun : uListAppend <nl> - fun : nerr_register <nl> - fun : nerr_init <nl> - fun : hdf_init <nl> - } <nl> - { <nl> - SharedMemroyInit - 1 <nl> - Memcheck : Leak <nl> - fun : _Znwm <nl> - fun : _ZN4HPHP19SharedMemoryManager4InitEib <nl> - } <nl> - { <nl> - inet_ntoa - 1 <nl> - Memcheck : Leak <nl> - fun : malloc <nl> - fun : inet_ntoa <nl> - } <nl> - <nl> - { <nl> - pthread - 2 <nl> - Memcheck : Leak <nl> - fun : * <nl> - fun : * <nl> - fun : pthread_once <nl> - } <nl> - { <nl> - boost - 1 <nl> - Memcheck : Leak <nl> - fun : * <nl> - fun : * <nl> - fun : * <nl> - obj : * <nl> - fun : * <nl> - obj : / usr / local / lib / libboost_program_options - gcc40 - mt - 1_35 . so . 1 . 35 . 0 <nl> - } <nl> - { <nl> - mcrypt - 1 <nl> - Memcheck : Overlap <nl> - fun : memcpy <nl> - fun : mcrypt_dlsym <nl> - } <nl> - { <nl> - zlib - 1 <nl> - Memcheck : Cond <nl> - fun : deflate_slow <nl> - fun : deflate <nl> - } <nl> mmm a / src / CMakeLists . txt <nl> ppp b / src / CMakeLists . txt <nl> endforeach ( ) <nl> <nl> add_custom_command ( <nl> OUTPUT runtime / base / compiler_id . h <nl> - COMMAND bin / generate_compiler_id . sh <nl> + COMMAND src / tools / generate_compiler_id . sh <nl> DEPENDS $ { CXX_SOURCES } $ { C_SOURCES } <nl> WORKING_DIRECTORY $ { HPHP_HOME } <nl> COMMENT " Generating Compiler ID " <nl> add_custom_command ( <nl> <nl> add_custom_command ( <nl> OUTPUT runtime / vm / repo_schema . h <nl> - COMMAND bin / generate_repo_schema . sh <nl> + COMMAND src / tools / generate_repo_schema . sh <nl> DEPENDS $ { CXX_SOURCES } $ { C_SOURCES } <nl> WORKING_DIRECTORY $ { HPHP_HOME } <nl> COMMENT " Generating Repo Schema ID " <nl> similarity index 100 % <nl> rename from bin / apc_sample_serializer . php <nl> rename to src / doc / apc_sample_serializer . php <nl> similarity index 100 % <nl> rename from bin / mime . hdf <nl> rename to src / doc / mime . hdf <nl> mmm a / src / doc / options . compiled <nl> ppp b / src / doc / options . compiled <nl> more processes , but it has SharedMemorySize as upper limit . <nl> <nl> There is a way to prepare APC items in dry format , serialized in binary files , <nl> and these files can be loaded ( or " primed " ) extremely fast at startup time . <nl> - To prepare these . cpp files , check bin / apc_sample_serializer . php for one way <nl> + To prepare these . cpp files , check apc_sample_serializer . php for one way <nl> of doing it . Once prepared , we can compiled them into . so that can be loaded <nl> through PrimeLibrary option . The loading can be done in parallel with <nl> LoadThread count of threads . Once loading is done , it can write to APC with <nl> similarity index 100 % <nl> rename from bin / ext_injection . php <nl> rename to src / tools / ext_injection . php <nl> similarity index 100 % <nl> rename from bin / generate_compiler_id . sh <nl> rename to src / tools / generate_compiler_id . sh <nl> similarity index 100 % <nl> rename from bin / generate_repo_schema . sh <nl> rename to src / tools / generate_repo_schema . sh <nl> similarity index 96 % <nl> rename from bin / generated_files . sh <nl> rename to src / tools / generated_files . sh <nl> mmm a / bin / generated_files . sh <nl> ppp b / src / tools / generated_files . sh <nl> HPHP = $ HPHP_HOME / src / hphp / hphp <nl> HHVM = $ HPHP_HOME / src / hhvm / hhvm <nl> [ ! - x " $ HHVM " ] & & check_err 1 " $ HHVM is not executable " <nl> <nl> + HPHP_TOOLS = $ HPHP_HOME / src / tools / <nl> + <nl> if [ " $ 1 " = " help " ] ; then <nl> echo " $ 0 gen - Build src / system / gen / * files " <nl> echo " $ 0 hhvm - Build src / system / runtime / ext / * . ext_hhvm . cpp " <nl> fi <nl> if [ " $ 1 " = " injection " - o " $ 1 " = " all " ] ; then <nl> cd $ HPHP_HOME <nl> [ $ VERBOSE - eq 1 ] & & echo " Adding INJECTION macros to extensions " <nl> - $ HHVM $ HPHP_HOME / bin / ext_injection . php <nl> + $ HHVM $ HPHP_TOOLS / ext_injection . php <nl> check_err $ ? " Failed adding injection macros " <nl> fi <nl> <nl> if [ " $ 1 " = " license " - o " $ 1 " = " all " ] ; then <nl> [ $ VERBOSE - eq 1 ] & & echo " Updating license headers " <nl> # TODO : At the moment , license . php fails on PCRE_ERROR_MATCHLIMIT <nl> # Fix that script then change this to detect errors properly <nl> - $ HHVM $ HPHP_HOME / bin / license . php 2 > & 1 | grep - v PCRE_ERROR_MATCHLIMIT <nl> + $ HHVM $ HPHP_TOOLS / license . php 2 > & 1 | grep - v PCRE_ERROR_MATCHLIMIT <nl> # check_err $ ? " Failed updating license headers " <nl> fi <nl> similarity index 90 % <nl> rename from bin / license . php <nl> rename to src / tools / license . php <nl> mmm a / bin / license . php <nl> ppp b / src / tools / license . php <nl> <nl> < ? php <nl> <nl> - chdir ( preg_replace ( ' # / bin / license . php $ # ' , ' / src ' , realpath ( __FILE__ ) ) ) ; <nl> + chdir ( preg_replace ( ' # / src / tools / license . php $ # ' , ' / src ' , realpath ( __FILE__ ) ) ) ; <nl> <nl> / / parse all these files <nl> $ inputs = ' find . - regex " . * \ . cpp " - or - regex " . * \ . c " - or - regex " . * \ . h " ' ; <nl> <nl> $ files = array ( ) ; <nl> exec ( $ inputs , $ files ) ; <nl> <nl> - foreach ( $ files as $ file ) { <nl> - / / excluding some files <nl> - foreach ( $ excluded as $ e ) { <nl> - if ( preg_match ( ' / ' . preg_quote ( $ e , ' / ' ) . ' / ' , $ file ) ) { <nl> - continue 2 ; <nl> - } <nl> - } <nl> - <nl> - $ contents = file_get_contents ( $ file ) ; <nl> - if ( $ contents = = = false ) { <nl> - exit ( " unable to read $ file \ n " ) ; <nl> - } <nl> - <nl> + function process_file_contents ( $ file , $ contents ) { <nl> + global $ files_external_party , $ files_zend , $ files_php , $ generated_files , <nl> + $ external_licenses , $ license_zend , $ license_hiphop , $ license_php , <nl> + $ license_external , $ built_licenses , $ generated ; <nl> $ pattern = ' / ^ [ \ n \ s ] * \ / \ * . * ? ( Copyright | PHP license ) . * ? \ * \ / \ n ( \ / \ / ' ; <nl> $ pattern . = $ generated ; <nl> $ pattern . = ' [ ^ \ n ] * \ n ) * / s ' ; <nl> <nl> / / remove existing license <nl> while ( true ) { <nl> - $ replaced = preg_replace ( $ pattern , ' ' , $ contents ) ; <nl> + $ replaced = @ preg_replace ( $ pattern , ' ' , $ contents ) ; <nl> if ( ! $ replaced | | $ replaced = = = $ contents ) { <nl> break ; <nl> } <nl> - file_put_contents ( $ file , $ replaced ) ; <nl> $ contents = $ replaced ; <nl> } <nl> <nl> foreach ( $ files_external_party as $ f = > $ license ) { <nl> if ( preg_match ( ' / ' . preg_quote ( $ f , ' / ' ) . ' / ' , $ file ) ) { <nl> if ( isset ( $ built_licenses [ $ license ] ) ) { <nl> - file_put_contents ( $ file , $ built_licenses [ $ license ] . " \ n " . $ contents ) ; <nl> - continue 2 ; <nl> + return $ built_licenses [ $ license ] . " \ n " . $ contents ; <nl> } <nl> } <nl> } <nl> <nl> / / add zend licese <nl> foreach ( $ files_zend as $ f ) { <nl> if ( preg_match ( ' / ' . preg_quote ( $ f , ' / ' ) . ' / ' , $ file ) ) { <nl> - file_put_contents ( $ file , $ license_zend . " \ n " . $ contents ) ; <nl> - continue 2 ; <nl> + return $ license_zend . " \ n " . $ contents ; <nl> } <nl> } <nl> <nl> / / add php licese <nl> foreach ( $ files_php as $ f ) { <nl> if ( preg_match ( ' / ' . preg_quote ( $ f , ' / ' ) . ' / ' , $ file ) ) { <nl> - file_put_contents ( $ file , $ license_php . " \ n " . $ contents ) ; <nl> - continue 2 ; <nl> + return $ license_php . " \ n " . $ contents ; <nl> } <nl> } <nl> <nl> <nl> } <nl> } <nl> <nl> - / / add hiphop licese <nl> - file_put_contents ( $ file , $ license_hiphop . " \ n " . $ contents ) ; <nl> + / / add hiphop license <nl> + return $ license_hiphop . " \ n " . $ contents ; <nl> + } <nl> + <nl> + foreach ( $ files as $ file ) { <nl> + / / excluding some files <nl> + foreach ( $ excluded as $ e ) { <nl> + if ( preg_match ( ' / ' . preg_quote ( $ e , ' / ' ) . ' / ' , $ file ) ) { <nl> + continue 2 ; <nl> + } <nl> + } <nl> + <nl> + $ contents = file_get_contents ( $ file ) ; <nl> + if ( $ contents = = = false ) { <nl> + exit ( " unable to read $ file \ n " ) ; <nl> + } <nl> + <nl> + $ new_contents = process_file_contents ( $ file , $ contents ) ; <nl> + if ( $ new_contents ! = = $ contents ) { <nl> + echo " Updating license for $ file \ n " ; <nl> + file_put_contents ( $ file , $ new_contents ) ; <nl> + } <nl> } <nl> | Move / delete various files from hphp / bin , change tools to a link | facebook/hhvm | dc416fafe46a55159c5b9948cded72dd2d570541 | 2013-01-30T20:19:04Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.